diff --git a/.gitattributes b/.gitattributes index 87f697b11c58374bfc030fb9f0239f4f73a35a96..b547e80b1ce6626abce8617e13f7194b94b758db 100644 --- a/.gitattributes +++ b/.gitattributes @@ -78,3 +78,7 @@ MLPY/Lib/site-packages/PyWin32.chm filter=lfs diff=lfs merge=lfs -text MLPY/Lib/site-packages/google/protobuf/pyext/_message.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text MLPY/Lib/site-packages/grpc/_cython/cygrpc.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text MLPY/Lib/site-packages/h5py/hdf5.dll filter=lfs diff=lfs merge=lfs -text +MLPY/Lib/site-packages/numpy/.libs/libopenblas.XWYDX2IKJW2NMTWSFYNGFUWKQU3LYTCZ.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +MLPY/Lib/site-packages/numpy/core/_multiarray_umath.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text +MLPY/Lib/site-packages/numpy/core/_simd.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text +MLPY/Lib/site-packages/onnx/onnx_cpp2py_export.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text diff --git a/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/INSTALLER b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/LICENSE.txt b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..04738ae230b92fda8fe06d6e4d0627d6e02ce793 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/LICENSE.txt @@ -0,0 +1,938 @@ + +---- + +This binary distribution of NumPy also bundles the following software: + + +Name: OpenBLAS +Files: extra-dll\libopenb*.dll +Description: bundled as a dynamically linked library +Availability: https://github.com/xianyi/OpenBLAS/ +License: 3-clause BSD + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: extra-dll\libopenb*.dll +Description: bundled in OpenBLAS +Availability: https://github.com/xianyi/OpenBLAS/ +License 3-clause BSD + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: extra-dll\*.dll +Description: statically linked, in DLL files compiled with gfortran only +Availability: https://gcc.gnu.org/viewcvs/gcc/ +License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + + +Name: Microsoft Visual C++ Runtime Files +Files: extra-dll\msvcp140.dll +License: MSVC + https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime + + Subject to the License Terms for the software, you may copy and + distribute with your program any of the files within the followng + folder and its subfolders except as noted below. You may not modify + these files. + + C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist + + You may not distribute the contents of the following folders: + + C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\debug_nonredist + C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\onecore\debug_nonredist + + Subject to the License Terms for the software, you may copy and + distribute the following files with your program in your program’s + application local folder or by deploying them into the Global + Assembly Cache (GAC): + + VC\atlmfc\lib\mfcmifc80.dll + VC\atlmfc\lib\amd64\mfcmifc80.dll + + +Name: Microsoft Visual C++ Runtime Files +Files: extra-dll\msvc*90.dll, extra-dll\Microsoft.VC90.CRT.manifest +License: MSVC + For your convenience, we have provided the following folders for + use when redistributing VC++ runtime files. Subject to the license + terms for the software, you may redistribute the folder + (unmodified) in the application local folder as a sub-folder with + no change to the folder name. You may also redistribute all the + files (*.dll and *.manifest) within a folder, listed below the + folder for your convenience, as an entire set. + + \VC\redist\x86\Microsoft.VC90.ATL\ + atl90.dll + Microsoft.VC90.ATL.manifest + \VC\redist\ia64\Microsoft.VC90.ATL\ + atl90.dll + Microsoft.VC90.ATL.manifest + \VC\redist\amd64\Microsoft.VC90.ATL\ + atl90.dll + Microsoft.VC90.ATL.manifest + \VC\redist\x86\Microsoft.VC90.CRT\ + msvcm90.dll + msvcp90.dll + msvcr90.dll + Microsoft.VC90.CRT.manifest + \VC\redist\ia64\Microsoft.VC90.CRT\ + msvcm90.dll + msvcp90.dll + msvcr90.dll + Microsoft.VC90.CRT.manifest + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/LICENSES_bundled.txt b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/LICENSES_bundled.txt new file mode 100644 index 0000000000000000000000000000000000000000..decdfc83822b9bd4c529f153ae7723937ddd135d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/LICENSES_bundled.txt @@ -0,0 +1,22 @@ +The NumPy repository and source distributions bundle several libraries that are +compatibly licensed. We list these here. + +Name: lapack-lite +Files: numpy/linalg/lapack_lite/* +License: BSD-3-Clause + For details, see numpy/linalg/lapack_lite/LICENSE.txt + +Name: tempita +Files: tools/npy_tempita/* +License: MIT + For details, see tools/npy_tempita/license.txt + +Name: dragon4 +Files: numpy/core/src/multiarray/dragon4.c +License: MIT + For license text, see numpy/core/src/multiarray/dragon4.c + +Name: libdivide +Files: numpy/core/include/numpy/libdivide/* +License: Zlib + For license text, see numpy/core/include/numpy/libdivide/LICENSE.txt diff --git a/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/METADATA b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b122f9f1c3e1ba5f7340688ab408d0b56ca672ab --- /dev/null +++ b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/METADATA @@ -0,0 +1,57 @@ +Metadata-Version: 2.1 +Name: numpy +Version: 1.21.2 +Summary: NumPy is the fundamental package for array computing with Python. +Home-page: https://www.numpy.org +Author: Travis E. Oliphant et al. +Maintainer: NumPy Developers +Maintainer-email: numpy-discussion@python.org +License: BSD +Download-URL: https://pypi.python.org/pypi/numpy +Project-URL: Bug Tracker, https://github.com/numpy/numpy/issues +Project-URL: Documentation, https://numpy.org/doc/1.21 +Project-URL: Source Code, https://github.com/numpy/numpy +Platform: Windows +Platform: Linux +Platform: Solaris +Platform: Mac OS-X +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Typing :: Typed +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Requires-Python: >=3.7,<3.11 + +It provides: + +- a powerful N-dimensional array object +- sophisticated (broadcasting) functions +- tools for integrating C/C++ and Fortran code +- useful linear algebra, Fourier transform, and random number capabilities +- and much more + +Besides its obvious scientific uses, NumPy can also be used as an efficient +multi-dimensional container of generic data. Arbitrary data-types can be +defined. This allows NumPy to seamlessly and speedily integrate with a wide +variety of databases. + +All NumPy wheels distributed on PyPI are BSD licensed. + + + diff --git a/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/RECORD b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..73ccdb590cc7ca73d49145c5d83d900562213716 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/RECORD @@ -0,0 +1,1197 @@ +../../Scripts/f2py.exe,sha256=QpzJSRg5aQCGOxUSr1G71c8cvAgZcZm_W4li0MrXgrA,108388 +numpy-1.21.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +numpy-1.21.2.dist-info/LICENSE.txt,sha256=VDW_78UWZhzN_Z729vbkkr7MzLGzi7lj3OKbwZkxrP0,47238 +numpy-1.21.2.dist-info/LICENSES_bundled.txt,sha256=UWGZ0f1YKTYB3hrVHM26mHuBuGTJwRn50NMTMyInvPY,656 +numpy-1.21.2.dist-info/METADATA,sha256=mqpTLTAT2C8-7XTO_8arhEAR38RKSvkzFADd6vckaIc,2100 +numpy-1.21.2.dist-info/RECORD,, +numpy-1.21.2.dist-info/WHEEL,sha256=jr7ubY0Lkz_yXH9FfFe9PTtLhGOsf62dZkNvTYrJINE,100 +numpy-1.21.2.dist-info/entry_points.txt,sha256=cOAXaHCx7qU-bvE9TStTTZW46RN-s-i6Mz9smnWkL3g,49 +numpy-1.21.2.dist-info/top_level.txt,sha256=4J9lbBMLnAiyxatxh8iRKV5Entd_6-oqbO7pzJjMsPw,6 +numpy/.libs/libopenblas.XWYDX2IKJW2NMTWSFYNGFUWKQU3LYTCZ.gfortran-win_amd64.dll,sha256=G1ohGBXUGy0Io4xE_qn7iXe64klIUC7ZI2L9pVAP9s4,34522650 +numpy/LICENSE.txt,sha256=VDW_78UWZhzN_Z729vbkkr7MzLGzi7lj3OKbwZkxrP0,47238 +numpy/__config__.py,sha256=zS2E82p8Sv4opnPe4vDNCKEjKQiv_wyiMlI2D4cdpAs,3821 +numpy/__init__.cython-30.pxd,sha256=lewKbMGAQ7aV9kWpHtI9GbE_wkmfv1UK4m8VXdGI4PA,37291 +numpy/__init__.pxd,sha256=MztMQM3whXzKgSkVl0x2Cf-F6SxBWcSwsuBoLUNiGao,35624 +numpy/__init__.py,sha256=5T2Xz7dGNGM7l0lMNkNME9CQJ89e-GX0JwfOS7Llscc,16328 +numpy/__init__.pyi,sha256=R37NHUIqpTLfWibCFnPvkCG3_IxR1Gw1hylvnVlYXCs,135624 +numpy/__pycache__/__config__.cpython-39.pyc,, +numpy/__pycache__/__init__.cpython-39.pyc,, +numpy/__pycache__/_distributor_init.cpython-39.pyc,, +numpy/__pycache__/_globals.cpython-39.pyc,, +numpy/__pycache__/_pytesttester.cpython-39.pyc,, +numpy/__pycache__/_version.cpython-39.pyc,, +numpy/__pycache__/conftest.cpython-39.pyc,, +numpy/__pycache__/ctypeslib.cpython-39.pyc,, +numpy/__pycache__/dual.cpython-39.pyc,, +numpy/__pycache__/matlib.cpython-39.pyc,, +numpy/__pycache__/setup.cpython-39.pyc,, +numpy/__pycache__/version.cpython-39.pyc,, +numpy/_distributor_init.py,sha256=JOqSUAVCNcvuXYwzfs41kJFHvhLdrqRfp3ZYk5U71Pc,1247 +numpy/_globals.py,sha256=sTiboJDAf3mG1cmAJlWZu3Kzdm9JrUToOJ6QoP-vqEg,3040 +numpy/_pytesttester.py,sha256=HpPj-F89cLZUYhLzznsr2tw-mepAQZszu5yy_lQOoKs,6503 +numpy/_version.py,sha256=RmolP6-rOm6PCh_pRBDL3SDStmPsdYE-S2KjhT1acZE,519 +numpy/char.pyi,sha256=PFgfjizMaK59w4XkEIQMeYWbhRod756GCYLgP5rse7E,1774 +numpy/compat/__init__.py,sha256=kryOGy6TD3f9oEXy1sZZOxEMc50A7GtON1yf0nMPzr8,450 +numpy/compat/__pycache__/__init__.cpython-39.pyc,, +numpy/compat/__pycache__/_inspect.cpython-39.pyc,, +numpy/compat/__pycache__/py3k.cpython-39.pyc,, +numpy/compat/__pycache__/setup.cpython-39.pyc,, +numpy/compat/_inspect.py,sha256=4PWDVD-iE3lZGrBCWdiLMn2oSytssuFszubUkC0oruA,7638 +numpy/compat/py3k.py,sha256=AFZC_II-HOEGZT5eOBstBHY7Z2c6rX8RnlEV7FWJZh0,3624 +numpy/compat/setup.py,sha256=PmRas58NGR72H-7OsQj6kElSUeQHjN75qVh5jlQIJmc,345 +numpy/compat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/compat/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/compat/tests/__pycache__/test_compat.cpython-39.pyc,, +numpy/compat/tests/test_compat.py,sha256=6i0bPM1Nqw0n3wtMphMU7ul7fkQNwcuH2Xxc9vpnQy8,495 +numpy/conftest.py,sha256=yzmbZ_hizvOf-RT3PLYIYrvhNpj7L2CLBMoPiHMCNQ4,4150 +numpy/core/__init__.py,sha256=TzvFzAPLiqmdQmLyxf1SnDJATWXhCMbXVvTi3tYNItU,5528 +numpy/core/__init__.pyi,sha256=nBhfHv0Vy8Cc37vQn3joQShGEvBy4fyo5aBahLSj5Xo,128 +numpy/core/__pycache__/__init__.cpython-39.pyc,, +numpy/core/__pycache__/_add_newdocs.cpython-39.pyc,, +numpy/core/__pycache__/_add_newdocs_scalars.cpython-39.pyc,, +numpy/core/__pycache__/_asarray.cpython-39.pyc,, +numpy/core/__pycache__/_dtype.cpython-39.pyc,, +numpy/core/__pycache__/_dtype_ctypes.cpython-39.pyc,, +numpy/core/__pycache__/_exceptions.cpython-39.pyc,, +numpy/core/__pycache__/_internal.cpython-39.pyc,, +numpy/core/__pycache__/_methods.cpython-39.pyc,, +numpy/core/__pycache__/_string_helpers.cpython-39.pyc,, +numpy/core/__pycache__/_type_aliases.cpython-39.pyc,, +numpy/core/__pycache__/_ufunc_config.cpython-39.pyc,, +numpy/core/__pycache__/arrayprint.cpython-39.pyc,, +numpy/core/__pycache__/cversions.cpython-39.pyc,, +numpy/core/__pycache__/defchararray.cpython-39.pyc,, +numpy/core/__pycache__/einsumfunc.cpython-39.pyc,, +numpy/core/__pycache__/fromnumeric.cpython-39.pyc,, +numpy/core/__pycache__/function_base.cpython-39.pyc,, +numpy/core/__pycache__/generate_numpy_api.cpython-39.pyc,, +numpy/core/__pycache__/getlimits.cpython-39.pyc,, +numpy/core/__pycache__/machar.cpython-39.pyc,, +numpy/core/__pycache__/memmap.cpython-39.pyc,, +numpy/core/__pycache__/multiarray.cpython-39.pyc,, +numpy/core/__pycache__/numeric.cpython-39.pyc,, +numpy/core/__pycache__/numerictypes.cpython-39.pyc,, +numpy/core/__pycache__/overrides.cpython-39.pyc,, +numpy/core/__pycache__/records.cpython-39.pyc,, +numpy/core/__pycache__/setup.cpython-39.pyc,, +numpy/core/__pycache__/setup_common.cpython-39.pyc,, +numpy/core/__pycache__/shape_base.cpython-39.pyc,, +numpy/core/__pycache__/umath.cpython-39.pyc,, +numpy/core/__pycache__/umath_tests.cpython-39.pyc,, +numpy/core/_add_newdocs.py,sha256=jU0tMuSxni2NwXMmOyNsMg6gMv5v0Gif5J87XwSeUPU,198440 +numpy/core/_add_newdocs_scalars.py,sha256=tC1YmO6WGTOJMvR-irSgF_Fpq4UGDDm3POentnYWSuY,9059 +numpy/core/_asarray.py,sha256=lhMS1tXvoy7rAPgtUbeQ8nFWU3g9xz1P6VuqEA-OOMw,4315 +numpy/core/_asarray.pyi,sha256=aR2kZJO55_gzPmUETLI2uty3kfC9SFJtfmI4PIlcuys,2021 +numpy/core/_dtype.py,sha256=rrmrfzsthRMpG42judFTK7s6FJLfuYhMp5NY3F4lAaU,10185 +numpy/core/_dtype_ctypes.py,sha256=O8tYBqU1QzCG1CXviBe6jrgHYnyIPqpci9GEy9lXO08,3790 +numpy/core/_exceptions.py,sha256=X4rTKm9yWMvCml2mUqh7yxmry185BEcXbpTcfM9_IIA,6327 +numpy/core/_internal.py,sha256=sDRPBFCWakoN0J_UXLm0DhhAN9W9hnVNjyU_Ajepqik,28283 +numpy/core/_internal.pyi,sha256=FCeGqjxQHL5OG0jaqSLK08vMjVOWLoCCrAPcTFR9htY,1408 +numpy/core/_methods.py,sha256=1z6nNrUKmjXvpB2x-ukmSw6BecVcnx-ef7O8GjnYZtQ,11084 +numpy/core/_multiarray_tests.cp39-win_amd64.pyd,sha256=gBRaJ-O9t5VrsHrn_mB_4e4KmUFRIPoD4E5aing2lYM,113664 +numpy/core/_multiarray_umath.cp39-win_amd64.pyd,sha256=vX2fo64inT_roKaSiGSoib0Y55F4vRdYMcixwViWuQk,2942976 +numpy/core/_operand_flag_tests.cp39-win_amd64.pyd,sha256=MXmvPY0qnSrX4cf-4yNY51DIiehAhhkti8DBahzl68M,13824 +numpy/core/_rational_tests.cp39-win_amd64.pyd,sha256=wXMFJt8p4LmRwr4s3L-sPT5bWwnbi0zS0XmKlX-y6bs,47616 +numpy/core/_simd.cp39-win_amd64.pyd,sha256=trXpWrvlC8J11hBAyU9xMW7da6VCd2EIA14TPtPMawM,1467904 +numpy/core/_string_helpers.py,sha256=xFVFp6go9I8O7PKRR2zwkOk2VJxlnXTFSYt4s7MwXGM,2955 +numpy/core/_struct_ufunc_tests.cp39-win_amd64.pyd,sha256=erGmhI8ztJMW-bBU9dqtoNYGVebGe9ULQ_rDOibrqqg,14848 +numpy/core/_type_aliases.py,sha256=sI0z6B9Ps_wEEKD-_r06JxrvBAv2xHzbSJrlTPbEQ8Q,8083 +numpy/core/_type_aliases.pyi,sha256=lhTyivGJcuyja-I-1rGx0Op_d_UdFt9kavPeCn_o49w,539 +numpy/core/_ufunc_config.py,sha256=GZlilK-aOGwxjVo9h7R0CxAqRX2ZELvjiPIPgpumsRU,13833 +numpy/core/_ufunc_config.pyi,sha256=3AvHaqwY0JdSIk-vkagBwag-jh-5ZPdSnPiXAv-Fhf0,1293 +numpy/core/_umath_tests.cp39-win_amd64.pyd,sha256=O2sryi4SNUro0uPF75Y1LTs9Z6NSRkFXNxUDvO-ebJM,32256 +numpy/core/arrayprint.py,sha256=0Om_eXTSBT8yU2TdsubgyytEEiC0yJ4maM2tohvH794,63289 +numpy/core/arrayprint.pyi,sha256=9Vx2AQkkLuxpv7TV9_dCCFW8N9Lqkzaow5JgLABsIlo,4821 +numpy/core/cversions.py,sha256=FISv1d4R917Bi5xJjKKy8Lo6AlFkV00WvSoB7l3acA4,360 +numpy/core/defchararray.py,sha256=iLuW9thzYNqo3nXtOVnQvCv7lzHpBz5qOrkLkcehrqo,72530 +numpy/core/einsumfunc.py,sha256=8761GqpmyKXYn0tWiUSfwi8Orr6VI-TfWAqknjNj0Y4,52876 +numpy/core/einsumfunc.pyi,sha256=Hk-GVDfbzdBd_SJ1k2At5hCCDxhfm_QVvTj1DLjrZ4E,3847 +numpy/core/fromnumeric.py,sha256=j9CZm9Z6y3dZuqSI7nYcQMNay_jxBuAUrYf_64JOe8A,126566 +numpy/core/fromnumeric.pyi,sha256=4b2NEjhXTGarjW2l5Ih1RNRub-h5l6fcWJSjIih9LuE,8383 +numpy/core/function_base.py,sha256=zfEh8nDUBBu37nl-sNUA3VuaBLvr9z7tgE6ImJqNhmw,19548 +numpy/core/function_base.pyi,sha256=CeSAAa9DVTdFAfOFDxzJpadchRc4ZAkH_Ukzj2RpCf4,1528 +numpy/core/generate_numpy_api.py,sha256=5auy8-Wg0VUIwCepuDwAL11gL1P-oiZl7exPIi5BfbU,7348 +numpy/core/getlimits.py,sha256=ifS2q5tI2upbxC3ayqitKERnFT0LjA0rY9hRWPpzdj8,20338 +numpy/core/include/numpy/__multiarray_api.h,sha256=EtwVSuxB_NhjBe7zVjZl41kK5EFiUpSPm0Jy28iogwU,63572 +numpy/core/include/numpy/__ufunc_api.h,sha256=H7-TsFbz-CmyBFceTYhNcOm-GltjuAVLebBjctaRtA4,12925 +numpy/core/include/numpy/_neighborhood_iterator_imp.h,sha256=9QiCyQf-O1MEjBJQ7T4JUu_JyUyQhCBZHbwOr7sAlyk,1951 +numpy/core/include/numpy/_numpyconfig.h,sha256=2T66Co5gkAN9DD_kuhsoU9GsKfg5lIFuTi4GxtE71tw,891 +numpy/core/include/numpy/arrayobject.h,sha256=Xt8fnhPhTkbhB313Xj315N3fLi3uYBEPbqNrsF-MUXE,175 +numpy/core/include/numpy/arrayscalars.h,sha256=y624UltRg9QkKVAsdbxZ_nI2td9myIMr44TZk01P-3M,3912 +numpy/core/include/numpy/halffloat.h,sha256=AaeF7vnfAjeoIg5krxbhDn7j_T6CAQIx-HfMBYYmGiQ,1948 +numpy/core/include/numpy/libdivide/LICENSE.txt,sha256=1UR2FVi1EIZsIffootVxb8p24LmBF-O2uGMU23JE0VA,1039 +numpy/core/include/numpy/libdivide/libdivide.h,sha256=4NxM9ToNeN6RftGp2q_ak_cDUCI_WcDQ7oWVbkNuaqE,82106 +numpy/core/include/numpy/multiarray_api.txt,sha256=LAOUZ4QoaD48JHoIJp9FxJyFqDwC_r-7T4mClqxlgJ8,59356 +numpy/core/include/numpy/ndarrayobject.h,sha256=3GqcdPD2qgESrcH1O5oFlY04HC3aZ_VdN2tuLl65BrQ,10956 +numpy/core/include/numpy/ndarraytypes.h,sha256=miN_5fkuwXnZXdIwewuwxfqqrsCy53GUalvJK3OCqxM,72491 +numpy/core/include/numpy/noprefix.h,sha256=fg28OipEj4EaPsrNGWu4YNZoGK7Bxdm5o45pAO5HaSk,6998 +numpy/core/include/numpy/npy_1_7_deprecated_api.h,sha256=rVhu81d2cL3I3u7eMpWs5qKlYpxaq84VPK4gP0Yk1QM,4394 +numpy/core/include/numpy/npy_3kcompat.h,sha256=IZU8uXMfh6LHJNl64xddXoJZl8nxDQgDG2zG3DYQ6JY,16426 +numpy/core/include/numpy/npy_common.h,sha256=r0LvZPcRola6kBOa_pzCxXm1o0ClEeWWo2OXMrl6diU,39455 +numpy/core/include/numpy/npy_cpu.h,sha256=S_5Sw199_HxGT7Y8n7dQdkqCVEJ7TddjVC1uXt-_BEw,4554 +numpy/core/include/numpy/npy_endian.h,sha256=_vfSgtUccP2UrI6ag77Gj2mWbBnMKQfsPS2ggNwmJ2A,2714 +numpy/core/include/numpy/npy_interrupt.h,sha256=CjwMAXyJjxLbpbPFpyHrTgT1QDa2mrzQ-5EhGA-feqk,1923 +numpy/core/include/numpy/npy_math.h,sha256=6MDk64a5yBMnU2uN0susls1tHhclGWHFRv2iHUEA6PU,21385 +numpy/core/include/numpy/npy_no_deprecated_api.h,sha256=4-ALjOYp43ACG7z4zdabmQtFsxxKDaqYuQFTbElty1o,586 +numpy/core/include/numpy/npy_os.h,sha256=g6I_-QEotWP0p9g1v-GA_Qibg5HRAhQj46WsZ19fIuw,847 +numpy/core/include/numpy/numpyconfig.h,sha256=-k3VBsqEQEk0cOho-GSeoVsWVAh6v-jHcOTRFqejYKA,1494 +numpy/core/include/numpy/old_defines.h,sha256=gxn96tn2uCpdL0yBFEO6H_JDkhikf8Bj-w3x8sOz5Go,6493 +numpy/core/include/numpy/oldnumeric.h,sha256=b2lR7L8Lajka8FQ3HrrkSnekPKYK0v4vsUduS8gWZqc,733 +numpy/core/include/numpy/random/bitgen.h,sha256=V0PTeCTsLhKTq_z0hM8VgDa7jTQUTPASD0MkMaGXrgk,409 +numpy/core/include/numpy/random/distributions.h,sha256=SXFAUz4rPImFXS67lnsgynRpTRJShCJLk_SlB-aYfN4,9980 +numpy/core/include/numpy/ufunc_api.txt,sha256=Qnbm8bUW9JXlOmCaX-cGztnUK-QKj0ecPPeNm6ZEgv0,7533 +numpy/core/include/numpy/ufuncobject.h,sha256=9sh0VXQuqjj-Ueie1MehGxKit64XgtkjDcg5Zjz0R_E,13224 +numpy/core/include/numpy/utils.h,sha256=LX_d9Fbe_BleyoYLy4wQgwrsF924qKLs7mAjejkdUCw,1161 +numpy/core/lib/npy-pkg-config/mlib.ini,sha256=mQBSOI6opCVmMZK4vwIhLe5J7YevO3PbaHsI0MlJGHs,151 +numpy/core/lib/npy-pkg-config/npymath.ini,sha256=5dwvhvbX3a_9toniEDvGPDGChbXIfFiLa36H4YOR-vw,380 +numpy/core/lib/npymath.lib,sha256=PUGTTF7mN2qsECMwBVtItrXtNIdJULXwhzz_VooPxpI,106590 +numpy/core/machar.py,sha256=AhzxGy1gnrp-M-ikLzi9nm-Rb1vuUZsmqmnud8qen_g,11157 +numpy/core/memmap.py,sha256=_XXONlK35hVQqljype2sNt43fNAhBtBlkJwnedqTrnw,12025 +numpy/core/multiarray.py,sha256=nRSfjpyN8fs6PuCcdXgLPyNfdRCIoXFQlfRBo96PJiA,56995 +numpy/core/numeric.py,sha256=iRPg7MPwkVsFzGgH7R7W-FW75BPFF6wPl3XSvrVJc2s,79264 +numpy/core/numeric.pyi,sha256=M_y6IIJWmJUOFADUK83jAux6JYMfCapmyPmh-O-DjHI,5113 +numpy/core/numerictypes.py,sha256=uBhxhFd_evulbPZgALDrpnv1iZxMquGJqyuI4YN-pkM,17989 +numpy/core/numerictypes.pyi,sha256=AYyIzznzldbwuT7zM7FkIFAakzCaB6Nj6TXoBdK4u4Q,3063 +numpy/core/overrides.py,sha256=-DSKEKRGxmd_jTuRltc4fYh8Et_gQukq-qKi1rvR_bo,8361 +numpy/core/records.py,sha256=JzuR5T8fcBMbem_2dLuEqsVng2eFSp8hFoi2WA17vKU,38548 +numpy/core/setup.py,sha256=bvBSRo458HV8ctqL4Pj45kyPPV0YSAtMvVChjdoupUA,45822 +numpy/core/setup_common.py,sha256=AZj8UVyEvh4KloEjAAY0OJZbHBB47LMqJ2wpX8yGFSM,20168 +numpy/core/shape_base.py,sha256=BjFK0upxzeVozimsDCdDZFegvU5OLQd7KuTpF6YkDyU,29901 +numpy/core/shape_base.pyi,sha256=CUD7TY78-TAOYzB4JY__tgJoeHkS_NYwZv3-GhmqOms,1108 +numpy/core/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/core/tests/__pycache__/_locales.cpython-39.pyc,, +numpy/core/tests/__pycache__/test__exceptions.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_abc.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_api.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_argparse.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_array_coercion.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_arraymethod.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_arrayprint.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_casting_unittests.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_conversion_utils.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_cpu_dispatcher.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_cpu_features.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_cython.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_datetime.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_defchararray.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_deprecations.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_dtype.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_einsum.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_errstate.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_extint128.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_function_base.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_getlimits.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_half.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_indexerrors.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_indexing.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_item_selection.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_longdouble.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_machar.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_mem_overlap.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_memmap.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_multiarray.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_nditer.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_numeric.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_numerictypes.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_overrides.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_print.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_protocols.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_records.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_regression.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_scalar_ctors.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_scalar_methods.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_scalarbuffer.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_scalarinherit.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_scalarmath.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_scalarprint.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_shape_base.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_simd.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_simd_module.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_ufunc.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_umath.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_umath_accuracy.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_umath_complex.cpython-39.pyc,, +numpy/core/tests/__pycache__/test_unicode.cpython-39.pyc,, +numpy/core/tests/_locales.py,sha256=PAV24baH5MIl_gH_VBi5glF-7kgifkK00WnAP0Hhc60,2266 +numpy/core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 +numpy/core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 +numpy/core/tests/data/umath-validation-set-README.txt,sha256=GfrkmU_wTjpLkOftWDuGayEDdV3RPpN2GRVQX61VgWI,982 +numpy/core/tests/data/umath-validation-set-cos.csv,sha256=WO94BFik3VcliW2QBStw3Srpfd5Ykcxd-7phRf0be4Y,23898 +numpy/core/tests/data/umath-validation-set-exp.csv,sha256=mPhjF4KLe0bdwx38SJiNipD24ntLI_5aWc8h-V0UMgM,17903 +numpy/core/tests/data/umath-validation-set-log.csv,sha256=CDPky64PjaURWhqkHxkLElmMiI21v5ugGGyzhdfUbnI,11963 +numpy/core/tests/data/umath-validation-set-sin.csv,sha256=0nDRTZc1YG_zbDgpGYtMrf63nUTc8bzcCwEVWqkBqio,23705 +numpy/core/tests/examples/__pycache__/setup.cpython-39.pyc,, +numpy/core/tests/examples/checks.pyx,sha256=nZXyjisKEKm2vVXM42k-P4h0BGLoa42ABHIGENJegC4,618 +numpy/core/tests/examples/setup.py,sha256=JvbvFHHdkZx_eajlwMTtqHijLD-TZwXCZlRySAe-y94,521 +numpy/core/tests/test__exceptions.py,sha256=_jJh6Xunn9GVkkRab3PJ5xzRn5mCdb5RHxE77F5ds5I,2063 +numpy/core/tests/test_abc.py,sha256=KbyIH9nsGOnXQ8VtnwU6QyUgaFDC0XfEs476CnJ_-Wg,2382 +numpy/core/tests/test_api.py,sha256=5uS71J8O-DklNIMxVHWaY5oCQ1oUMdLxXKVHj2n7reo,22885 +numpy/core/tests/test_argparse.py,sha256=W4ys2ZU9B-LQwcuY16KNb3QExGVhxV32yJ1pG5-zF7w,2039 +numpy/core/tests/test_array_coercion.py,sha256=S_mxTzLCwj7m519WMXjWnBxBCPyCEwF7N7SgeYYfrrY,28656 +numpy/core/tests/test_arraymethod.py,sha256=GaMJgjjEYRCcFNTKjY_wV3afeInJjIp2LMUoOoNk3YM,2457 +numpy/core/tests/test_arrayprint.py,sha256=QmBVZMTpPBL-1SY0Qsi96sNeRLQ0mUxtEpJh4ho_ZqM,38144 +numpy/core/tests/test_casting_unittests.py,sha256=gge5DLb6qECO-5jSslQX3W4E4aNMJv5DAnPn595KGZw,28514 +numpy/core/tests/test_conversion_utils.py,sha256=HB-6IpP4XD5tL_FM5Oat--gQ02MI_WG8BX0P5eyvUZg,6616 +numpy/core/tests/test_cpu_dispatcher.py,sha256=gEEzMPLo057xBh7GmwXwV4ikBC2X1P0XtHUlAkbppU0,1561 +numpy/core/tests/test_cpu_features.py,sha256=TwiyXB9_5DHrwyBMci-6NZJXbnMb3BgLl6gynAfUsXo,6948 +numpy/core/tests/test_cython.py,sha256=SkCZWMLnSnDUKJjWLfbUHYx5EEgcZ8mghpAJbYs5fX0,3663 +numpy/core/tests/test_datetime.py,sha256=0HQFG7VmiZIrUASr4IPRxezdOEGXSvwIuluYTZCRV1A,115035 +numpy/core/tests/test_defchararray.py,sha256=VhAlRKEJSrcWcrZbZruagw1170409taptPGauwxp-HM,25256 +numpy/core/tests/test_deprecations.py,sha256=Hr5dhz9umN-qqtMwpFngNEYPHL91_EMjUZuAuwxLjJ0,47348 +numpy/core/tests/test_dtype.py,sha256=VgvTfpQzJpHwsEd0bAZOnFxr8uXWJWppwiWpfCIonOA,61449 +numpy/core/tests/test_einsum.py,sha256=GsmB2gxsAu9NQ3gg3ASWoYaUhFui91uHig-B5CXRqT4,47891 +numpy/core/tests/test_errstate.py,sha256=kNCMfKs1xgXUrIRQM4qB8fEkVbREVE-p-yG2hsuHjd4,2125 +numpy/core/tests/test_extint128.py,sha256=b8vP_hDPltdHoxWBn2vAmOgjJe2NVW_vjnatdCOtxu8,5862 +numpy/core/tests/test_function_base.py,sha256=FjbJ8JMRcbBxllRXRZhxbE6nlfP1HL8I4kfyW8RKr0s,14820 +numpy/core/tests/test_getlimits.py,sha256=m2xfs_MhozzD88pqRoD00GlMbA-biaK0tEMmBW0vt9g,4418 +numpy/core/tests/test_half.py,sha256=7vZngTRJJ45h4XUFws9fpHI_gYKPq9pz_DKUhZf8lEk,24370 +numpy/core/tests/test_indexerrors.py,sha256=iJu4EorQks1MmiwTV-fda-vd4HfCEwv9_Ba_rVea7xw,5263 +numpy/core/tests/test_indexing.py,sha256=dPtZuD6jef8NAxDLlR1zmM4dxY3JxMLkJAyBcQCRd4Y,55383 +numpy/core/tests/test_item_selection.py,sha256=8AukBkttHRVkRBujEDDSW0AN1i6FvRDeJO5Cot9teFc,3665 +numpy/core/tests/test_longdouble.py,sha256=0xWqtJdc-MUfxxZutfuI9W9d-hv_LouFb3355QU0UHE,13410 +numpy/core/tests/test_machar.py,sha256=o4LNEPRU2p0fzok8AiO2Y9vIoZUfWQvqMN2_rJqLoKE,1096 +numpy/core/tests/test_mem_overlap.py,sha256=69RZVSFbEN3hGXhJB9PeRHiRMWLAPru2C-3xC2AAckc,30015 +numpy/core/tests/test_memmap.py,sha256=2h9gFYyNGaGjXkcmQ1uL5isIVgs-tcafkcsI2eoKnKY,7684 +numpy/core/tests/test_multiarray.py,sha256=U7m9FtFf9u70_eTGAUXNfg_VwhVfMnSxW2b07HRfNTI,345573 +numpy/core/tests/test_nditer.py,sha256=dfoy6y8uVVDho_vD1X-nBMHsrTYIzSjY1yT3YKd2UjU,131018 +numpy/core/tests/test_numeric.py,sha256=O9we-BUhPg3zrlEzNo6p4fOXkwQQpQVILb5Y0Y_U3Fo,138245 +numpy/core/tests/test_numerictypes.py,sha256=c-xwoz-oawJff09G0tri2Q9FJQ0NLkH3vf_zmwGnheg,21400 +numpy/core/tests/test_overrides.py,sha256=7eVH_G0MkDu7-kSu4HbnxCtE0OwNRmSqWmXCevGY438,20719 +numpy/core/tests/test_print.py,sha256=I3-R4iNbFjmVdl5xHPb9ezYb4zDfdXNfzVZt3Lz3bqU,6937 +numpy/core/tests/test_protocols.py,sha256=Etu0M6T-xFJjAyXy2EcCH48Tbe5VRnZCySjMy0RhbPY,1212 +numpy/core/tests/test_records.py,sha256=910AOqDvIhCDsrmCQk737UhxDnQU3GSB5a2-w0h9hWM,20782 +numpy/core/tests/test_regression.py,sha256=odZVEMgSLs3N2jAychCiZ4EkJ4a8a-ghOz1TfLAyfok,93670 +numpy/core/tests/test_scalar_ctors.py,sha256=ilVfUULT72ALjt1WH0B54winaYbWrRy62AewkvugJao,3803 +numpy/core/tests/test_scalar_methods.py,sha256=6KyraI9zWvF73mvywM0h0sCt8ZcOSWwx1usY3YXq_bY,4197 +numpy/core/tests/test_scalarbuffer.py,sha256=E7nYL8Hnbr3d_9-oxvGtkWte2q6ZeteJhmi0kOQ_u8k,5790 +numpy/core/tests/test_scalarinherit.py,sha256=xea-dgKGvMr-HjM9my-KpMN797sOegNhXni-62kw7m0,2504 +numpy/core/tests/test_scalarmath.py,sha256=0Fy3ZN6yvTMiZrwl0-lYd8kZsT1Jf4HLn704TjwOihw,33515 +numpy/core/tests/test_scalarprint.py,sha256=7i5RxwAELBIgg0da5ES1-R_tbSRVCAmFsocMrZDG9XY,19028 +numpy/core/tests/test_shape_base.py,sha256=tndYVsnh6zATYP7q5PNgy_inq7OPWCP5z7ua6uZf3oQ,28010 +numpy/core/tests/test_simd.py,sha256=1f6sp1mAMHXg8gtzGAUbhSiAWcrFoV-94DioRG198yE,37157 +numpy/core/tests/test_simd_module.py,sha256=Bpbg_3YKO4Nu4Jm8p_QIlhrz_3tLIO6ketPhJmA_hZU,3855 +numpy/core/tests/test_ufunc.py,sha256=fmNH_oeF1Q2b3noce2fC8dRimZcAmAupGodzjAMGIN8,96728 +numpy/core/tests/test_umath.py,sha256=P_VXNnEtfJsIJE9ykJhcGG42_-Sim4QtjjoIcPuSi7w,144187 +numpy/core/tests/test_umath_accuracy.py,sha256=zuLNwNklp6MmFkKreLfIn6mZDnJp8yIMlgvKrpUWzKo,3174 +numpy/core/tests/test_umath_complex.py,sha256=-XgHM0_j0sOy90eIb6DjeiQzm5RrWkiNes6fdZdLGqw,23930 +numpy/core/tests/test_unicode.py,sha256=c6SB-PZaiNH7HvEZ2xIrfAMPmvuJmcTo79aBBkdCFvY,12915 +numpy/core/umath.py,sha256=IE9whDRUf3FOx3hdo6bGB0X_4OOJn_Wk6ajnbrc542A,2076 +numpy/core/umath_tests.py,sha256=IuFDModusxI6j5Qk-VWYHRZDIE806dzvju0qYlvwmfY,402 +numpy/ctypeslib.py,sha256=xV2uPf58Um8tWfW2KxudBqIJTOjYciQ00KzOpRxkoMQ,17760 +numpy/ctypeslib.pyi,sha256=2nfWcDbrrGvGfGMZsTGyDx6_ZXIHEQt2Z8ZDMTAhf4U,465 +numpy/distutils/__config__.py,sha256=zS2E82p8Sv4opnPe4vDNCKEjKQiv_wyiMlI2D4cdpAs,3821 +numpy/distutils/__init__.py,sha256=kuNMyZmAP8MtuKKOGG5pMz5wWcLVzHkU7wW7CTwzNxY,1610 +numpy/distutils/__init__.pyi,sha256=6KiQIH85pUXaIlow3KW06e1_ZJBocVY6lIGghNaW33A,123 +numpy/distutils/__pycache__/__config__.cpython-39.pyc,, +numpy/distutils/__pycache__/__init__.cpython-39.pyc,, +numpy/distutils/__pycache__/_shell_utils.cpython-39.pyc,, +numpy/distutils/__pycache__/ccompiler.cpython-39.pyc,, +numpy/distutils/__pycache__/ccompiler_opt.cpython-39.pyc,, +numpy/distutils/__pycache__/conv_template.cpython-39.pyc,, +numpy/distutils/__pycache__/core.cpython-39.pyc,, +numpy/distutils/__pycache__/cpuinfo.cpython-39.pyc,, +numpy/distutils/__pycache__/exec_command.cpython-39.pyc,, +numpy/distutils/__pycache__/extension.cpython-39.pyc,, +numpy/distutils/__pycache__/from_template.cpython-39.pyc,, +numpy/distutils/__pycache__/intelccompiler.cpython-39.pyc,, +numpy/distutils/__pycache__/lib2def.cpython-39.pyc,, +numpy/distutils/__pycache__/line_endings.cpython-39.pyc,, +numpy/distutils/__pycache__/log.cpython-39.pyc,, +numpy/distutils/__pycache__/mingw32ccompiler.cpython-39.pyc,, +numpy/distutils/__pycache__/misc_util.cpython-39.pyc,, +numpy/distutils/__pycache__/msvc9compiler.cpython-39.pyc,, +numpy/distutils/__pycache__/msvccompiler.cpython-39.pyc,, +numpy/distutils/__pycache__/npy_pkg_config.cpython-39.pyc,, +numpy/distutils/__pycache__/numpy_distribution.cpython-39.pyc,, +numpy/distutils/__pycache__/pathccompiler.cpython-39.pyc,, +numpy/distutils/__pycache__/setup.cpython-39.pyc,, +numpy/distutils/__pycache__/system_info.cpython-39.pyc,, +numpy/distutils/__pycache__/unixccompiler.cpython-39.pyc,, +numpy/distutils/_shell_utils.py,sha256=9pI0lXlRJxB22TPVBNUhWe7EnE-V6xIhMNQSR8LOw40,2704 +numpy/distutils/ccompiler.py,sha256=u-gCKJXW5tvgdYdGbqSWWaxb7Qb1thZeLYQ45Y2aJXw,28019 +numpy/distutils/ccompiler_opt.py,sha256=0yjnjvGAO8BMtrTohkmuKcjvLan20Xsq8w_N80C024Q,99371 +numpy/distutils/checks/cpu_asimd.c,sha256=sUn-v9fLpUUEtsrAfbGor76uYGhCINzXgkz0v3alZhQ,729 +numpy/distutils/checks/cpu_asimddp.c,sha256=7_HRp5jMJBSz02PcmAaH6m07zAbtcjMgkw7-4vhTZI4,395 +numpy/distutils/checks/cpu_asimdfhm.c,sha256=I7dTWSITAGwxHaLULZkh0nqnf-v9NF2kfQu7Tu90aB4,448 +numpy/distutils/checks/cpu_asimdhp.c,sha256=BD9NPwN4mcUZE_KydEQ0b7GtTkfEljl4u0KbrohLYcU,343 +numpy/distutils/checks/cpu_avx.c,sha256=69aCE28EArV-BmdFKhCA5djgNZAZtQg2zdea3VQD-co,799 +numpy/distutils/checks/cpu_avx2.c,sha256=207hFoh4ojzMAPQ53ug_Y5qCFIgZ1e8SdI1-o2jzdB4,769 +numpy/distutils/checks/cpu_avx512_clx.c,sha256=CfPjudkRZ9_xygLVOySSEjoAfkjjfu4ipkWK4uCahbU,864 +numpy/distutils/checks/cpu_avx512_cnl.c,sha256=eKCPRk6p1B0bPAyOY0oWRKZMfa-c5g-skvJGGlG5I4Y,972 +numpy/distutils/checks/cpu_avx512_icl.c,sha256=Zt8XOXZL85Ds5HvZlAwUVilT6mGbPU44Iir44ul6y2Y,1030 +numpy/distutils/checks/cpu_avx512_knl.c,sha256=ikHHx87EfJ3-sjJoT5QeSIvmrHrO4oC9KJzCB-wp5BE,981 +numpy/distutils/checks/cpu_avx512_knm.c,sha256=iVdJnZ5HY59XhUv4GzwqYRwz2E_jWJnk1uSz97MvxY0,1162 +numpy/distutils/checks/cpu_avx512_skx.c,sha256=aOHpYdGPEx2FcnC7TKe9Nr7wQ0QWW20Uq3xRVSb4U90,1036 +numpy/distutils/checks/cpu_avx512cd.c,sha256=zIl7AJXfxqnquZyHQvUAGr9M-vt62TIlylhdlrg-qkE,779 +numpy/distutils/checks/cpu_avx512f.c,sha256=ibW0zon6XGYkdfnYETuPfREmE5OtO0HfuLTqXMsoqNA,775 +numpy/distutils/checks/cpu_f16c.c,sha256=QxxI3vimUAkJ4eJ83va2mZzTJOk3yROI05fVY07H5To,890 +numpy/distutils/checks/cpu_fma3.c,sha256=Cq0F_UpVJ4SYHcxXfaYoqHSYvWRJzZsB8IkOVl8K2ro,839 +numpy/distutils/checks/cpu_fma4.c,sha256=Xy0YfVpQDCiFOOrCWH-RMkv7ms5ZAbSauwm2xEOT94o,314 +numpy/distutils/checks/cpu_neon.c,sha256=3lc70da2_XbT1hA36YY7vGbapW3dDZlG8xoNXp2rMzY,387 +numpy/distutils/checks/cpu_neon_fp16.c,sha256=XeOsWVVTmFecSl_luw2lNBGKxHaB2TtL7YnyIv8W0OU,262 +numpy/distutils/checks/cpu_neon_vfpv4.c,sha256=oJ1NvRYfF7Yf6S_bZuaUntE0iOnkbmTNNVR_sbE4NV8,512 +numpy/distutils/checks/cpu_popcnt.c,sha256=Jkslm5DiuxbI-fBcCIgJjxjidm-Ps_yfAb_jJIZonE8,1081 +numpy/distutils/checks/cpu_sse.c,sha256=XitLZu_qxXDINNpbfcUAL7iduT1I63HjNgtyE72SCEo,706 +numpy/distutils/checks/cpu_sse2.c,sha256=OJpQzshqCS6Cp9X1I1yqh2ZPa0b2AoSmJn6HdApOzYk,717 +numpy/distutils/checks/cpu_sse3.c,sha256=AmZkvTpXcoCAfVckXgvwloutI5CTHkwHJD86pYsntgk,709 +numpy/distutils/checks/cpu_sse41.c,sha256=5GvpgxPcDL39iydUjKyS6WczOiXTs14KeXvlWVOr6LQ,695 +numpy/distutils/checks/cpu_sse42.c,sha256=8eYzhquuXjRRGp3isTX0cNUV3pXATEPc-J-CDYTgTaU,712 +numpy/distutils/checks/cpu_ssse3.c,sha256=QXWKRz5fGQv5bn282bJL4h_92-yqHFG_Gp5uLKvcA34,725 +numpy/distutils/checks/cpu_vsx.c,sha256=gxWpdnkMeoaBCzlU_j56brB38KFo4ItFsjyiyo3YrKk,499 +numpy/distutils/checks/cpu_vsx2.c,sha256=ycKoKXszrZkECYmonzKd7TgflpZyVc1Xq-gtJqyPKxs,276 +numpy/distutils/checks/cpu_vsx3.c,sha256=pNA4w2odwo-mUfSnKnXl5SVY1z2nOxPZZcNC-L2YX1w,263 +numpy/distutils/checks/cpu_xop.c,sha256=sPhOvyT-mdlbf6RlbZvMrslRwHnTFgP-HXLjueS7nwU,246 +numpy/distutils/checks/extra_avx512bw_mask.c,sha256=7IRO24mpcuXRhm3refGWP91sy0e6RmSkmUQCWyxy__0,654 +numpy/distutils/checks/extra_avx512dq_mask.c,sha256=jFtOKEtZl3iTpfbmFNB-u4DQNXXBST2toKCpxFIjEa0,520 +numpy/distutils/checks/extra_avx512f_reduce.c,sha256=hIcCLMm_aXPfrhzCsoFdQiryIrntPqfDxz0tNOR985w,1636 +numpy/distutils/checks/extra_vsx_asm.c,sha256=anSZskhKZImNk0lsSJJY_8GJQ0h3dDrkrmrGitlS7Fw,981 +numpy/distutils/checks/test_flags.c,sha256=7rgVefVOKOBaefG_6riau_tT2IqI4MFrbSMGNFnqUBQ,17 +numpy/distutils/command/__init__.py,sha256=DCxnKqTLrauOD3Fc8b7qg9U3gV2k9SADevE_Q3H78ng,1073 +numpy/distutils/command/__pycache__/__init__.cpython-39.pyc,, +numpy/distutils/command/__pycache__/autodist.cpython-39.pyc,, +numpy/distutils/command/__pycache__/bdist_rpm.cpython-39.pyc,, +numpy/distutils/command/__pycache__/build.cpython-39.pyc,, +numpy/distutils/command/__pycache__/build_clib.cpython-39.pyc,, +numpy/distutils/command/__pycache__/build_ext.cpython-39.pyc,, +numpy/distutils/command/__pycache__/build_py.cpython-39.pyc,, +numpy/distutils/command/__pycache__/build_scripts.cpython-39.pyc,, +numpy/distutils/command/__pycache__/build_src.cpython-39.pyc,, +numpy/distutils/command/__pycache__/config.cpython-39.pyc,, +numpy/distutils/command/__pycache__/config_compiler.cpython-39.pyc,, +numpy/distutils/command/__pycache__/develop.cpython-39.pyc,, +numpy/distutils/command/__pycache__/egg_info.cpython-39.pyc,, +numpy/distutils/command/__pycache__/install.cpython-39.pyc,, +numpy/distutils/command/__pycache__/install_clib.cpython-39.pyc,, +numpy/distutils/command/__pycache__/install_data.cpython-39.pyc,, +numpy/distutils/command/__pycache__/install_headers.cpython-39.pyc,, +numpy/distutils/command/__pycache__/sdist.cpython-39.pyc,, +numpy/distutils/command/autodist.py,sha256=i2ip0Zru8_AFx3lNQhlZfj6o_vg-RQ8yu1WNstcIYhE,3866 +numpy/distutils/command/bdist_rpm.py,sha256=9uZfOzdHV0_PRUD8exNNwafc0qUqUjHuTDxQcZXLIbg,731 +numpy/distutils/command/build.py,sha256=48vCxFz9tA0EWAtP9BiFTzpz6l91qXa0z3O3pC_QMis,2627 +numpy/distutils/command/build_clib.py,sha256=w6wO5j0qdMdv_AV8Z-5I54VNhbJiO7Rr5yI6NGiePNE,18953 +numpy/distutils/command/build_ext.py,sha256=nm_RiC4SebdrXAqQBtBh1jRmonVpvO2My6E4DYQw6mE,32276 +numpy/distutils/command/build_py.py,sha256=xBHZCtx91GqucanjIBETPeXmR-gyUKPDyr1iMx1ARWE,1175 +numpy/distutils/command/build_scripts.py,sha256=AEQLNmO2v5N-GXl4lwd8v_nHlrauBx9Y-UudDcdCs_A,1714 +numpy/distutils/command/build_src.py,sha256=6hPzM9yEdLAs2U06qke9CRmrjbSRhNQ7yVj6kCBDtOw,31945 +numpy/distutils/command/config.py,sha256=DVm5hmolBpTcLTiHQC_mf0nLndAqn2fc1GTo4ucHt3I,21241 +numpy/distutils/command/config_compiler.py,sha256=I-xAL3JxaGFfpR4lg7g0tDdA_t7zCt-D4JtOACCP_Ak,4495 +numpy/distutils/command/develop.py,sha256=5ro-Sudt8l58JpKvH9FauH6vIfYRv2ohHLz-9eHytbc,590 +numpy/distutils/command/egg_info.py,sha256=n6trbjRfD1qWc_hRtMFkOJsg82BCiLvdl-NeXyuceGc,946 +numpy/distutils/command/install.py,sha256=s_0Uf39tFoRLUBlkrRK4YlROZsLdkI-IsuiFFaiS3ls,3157 +numpy/distutils/command/install_clib.py,sha256=q3yrfJY9EBaxOIYUQoiu2-juNKLKAKKfXC0nrd4t6z0,1439 +numpy/distutils/command/install_data.py,sha256=r8EVbIaXyN3aOmRugT3kp_F4Z03PsVX2l_x4RjTOWU4,872 +numpy/distutils/command/install_headers.py,sha256=g5Ag2H3j3dz-qSwWegxiZSAnvAf0thYYFwfPVHf9rxc,944 +numpy/distutils/command/sdist.py,sha256=XQM39b-MMO08bfE3SJrrtDWwX0XVnzCZqfAoVuuaFuE,760 +numpy/distutils/conv_template.py,sha256=hL0DDy7tMJ-5I-63BmkWkoLNX2c5GiQdQhj-XNG3Tm8,9865 +numpy/distutils/core.py,sha256=baf9SXdCVV8fA0lhLpOV4yEEWQP4ZwMTeeWoXHMg9LE,8374 +numpy/distutils/cpuinfo.py,sha256=frzEtCIEsSbQzGmNUXdWctiFqRcNFeLurzbvyCh8thY,23340 +numpy/distutils/exec_command.py,sha256=xMR7Dou5VZp2cP23xNd2TlXqexppXzBUd1wLMEAD2is,10668 +numpy/distutils/extension.py,sha256=IgpefxqMOPe_jaKvKgnY2xgzG5lULQ8pg-kjv4R7Swo,3461 +numpy/distutils/fcompiler/__init__.py,sha256=wMMu0T3XyXZ099KsrIh0zc3-IJCJCwqAHiMAtUuC9Dk,41123 +numpy/distutils/fcompiler/__pycache__/__init__.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/absoft.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/compaq.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/environment.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/g95.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/gnu.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/hpux.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/ibm.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/intel.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/lahey.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/mips.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/nag.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/none.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/nv.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/pathf95.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/pg.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/sun.cpython-39.pyc,, +numpy/distutils/fcompiler/__pycache__/vast.cpython-39.pyc,, +numpy/distutils/fcompiler/absoft.py,sha256=4UKxvpWQIphSdi6vJb3qILML_zyM3K_m7ddhAMS5dBI,5655 +numpy/distutils/fcompiler/compaq.py,sha256=BOHh-p68DhuLXO3pVKDEt0qFGp1qec803BmsmHentKg,4023 +numpy/distutils/fcompiler/environment.py,sha256=PVS1al3wahDNnneNVSl1sQhMPfz2dUXaIDVJfy0wZBU,3168 +numpy/distutils/fcompiler/fujitsu.py,sha256=g4dTLDFfLRAzhYayIwyHGBw1Y36DKtPOCYfA823ldNA,1379 +numpy/distutils/fcompiler/g95.py,sha256=1TJe4IynWYqqYBy8gJ-nz8WQ_TaSbv8k2UzUIY5Erqc,1372 +numpy/distutils/fcompiler/gnu.py,sha256=8PiIoJb9iLDvEv48cBYp61LYAeAT9B6Iu55S6sHE2iU,20801 +numpy/distutils/fcompiler/hpux.py,sha256=SLbDOPYgiixqE32GgUrAJjpDLFy9g7E01vGNZCGv6Pc,1394 +numpy/distutils/fcompiler/ibm.py,sha256=HARaSCruJSuHbS3LruVESj2cndZUKTHiJZBn7NU4vo0,3636 +numpy/distutils/fcompiler/intel.py,sha256=PgECyk3Q1RSYz9wiSZ7RrY0W5DbR_NujRTZe66QcziY,6756 +numpy/distutils/fcompiler/lahey.py,sha256=EV3Zhwq-iowWAu4BFBPv_UGJ-IB-qxlxmi6WU1qHDOs,1372 +numpy/distutils/fcompiler/mips.py,sha256=mlUNgGrRSLnNhtxQXWVfC9l4_OP2GMvOkgbZQwBon0A,1768 +numpy/distutils/fcompiler/nag.py,sha256=EUhxDAW6W8j8F4410lcG7Z5gR_vI4DnpCVWVyVxYTWc,2622 +numpy/distutils/fcompiler/none.py,sha256=auMK2ou1WtJ20LeMbwCZJ3XofpT9A0YYbMVd-62Mi_E,786 +numpy/distutils/fcompiler/nv.py,sha256=ml2xco_01pGH9x23Qv-3yalFzTk-GK45BVJoDMlGod4,1627 +numpy/distutils/fcompiler/pathf95.py,sha256=ipbaZIO8sqPJ1lUppOurnboiTwRzIasWNAJvKmktvv4,1094 +numpy/distutils/fcompiler/pg.py,sha256=cVcSFM9oR0KmO5AIb4Odw9OGslW6zvDGP88n-uEwxvQ,3696 +numpy/distutils/fcompiler/sun.py,sha256=JMdFfKldTYlfW1DxV7nR09k5PZypKLWpP7wmQzmlnH0,1628 +numpy/distutils/fcompiler/vast.py,sha256=JUGP68JGOUOBS9WbXftE-qCVUD13fpLyPnhpHfTL5y0,1719 +numpy/distutils/from_template.py,sha256=BL-vypfI0GNJrTo-nKs445liTW2Qdfvrsu8RMjATL5A,8174 +numpy/distutils/intelccompiler.py,sha256=77BDCj7_6Nnf92ZDeFQgA6aDKJGkzDQ7u0nuQGw1v8g,4345 +numpy/distutils/lib2def.py,sha256=HQ7i5FUtBcFGNlSlN20lgVtiBAHQbGXxmYdvkaJTjLI,3760 +numpy/distutils/line_endings.py,sha256=hlI71r840mhfu8lmzdHPVZ4NFm-kJDDUMV3lETblVTY,2109 +numpy/distutils/log.py,sha256=9tqE2Tq55mugFj_pn-RwV1xFoejmk0yWvVQHBL1e-Gc,2652 +numpy/distutils/mingw/gfortran_vs2003_hack.c,sha256=FDTA53KYTIhil9ytvZlocOqghQVp9LacLHn1IurV0wI,83 +numpy/distutils/mingw32ccompiler.py,sha256=TGQV2CtwcHmilVkU4eeNqGWvoo85ak1k3XRTJpcR1-k,26083 +numpy/distutils/misc_util.py,sha256=SpNRYiuLNokeywd5mSVWdg1aa1oA3EvRF4QHmuVGEZQ,89046 +numpy/distutils/msvc9compiler.py,sha256=bCtCVJmGrBHPm9sOoxa3oSrdrEVCNQFEM5O5hdqX8Hc,2255 +numpy/distutils/msvccompiler.py,sha256=sGGkjB-iSQFEfsMfQY8ZJfPKs6vm2DY9Y_OKi0Fk__0,1986 +numpy/distutils/npy_pkg_config.py,sha256=4CMG6sN7HAFSmw2ljPAAN5f04wdnZECkFXZcKoX06f0,13409 +numpy/distutils/numpy_distribution.py,sha256=nrdp8rlyjEBBV1tzzi5cE-aYeXB5U3X8T5-G0akXSoY,651 +numpy/distutils/pathccompiler.py,sha256=a5CYDXilCaIC85v0fVh-wrb0fClv0A7mPS87aF1inUc,734 +numpy/distutils/setup.py,sha256=zd5_kD7uTEOTgC8Fe93g9A_5AOBEySWwr-2OxHsBBEc,651 +numpy/distutils/system_info.py,sha256=Fzbn7ERDeLJ95Q1OLGT6yMyR_h5zJCvLud6IlSiV2F4,112677 +numpy/distutils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/distutils/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_build_ext.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_ccompiler_opt.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_ccompiler_opt_conf.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_exec_command.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_fcompiler.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_from_template.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_misc_util.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_shell_utils.cpython-39.pyc,, +numpy/distutils/tests/__pycache__/test_system_info.cpython-39.pyc,, +numpy/distutils/tests/test_build_ext.py,sha256=te4h-jzABgSF_efL2LC-Hwc3hlq8lfqg5JEeHGQgV3A,2736 +numpy/distutils/tests/test_ccompiler_opt.py,sha256=0xoa9cyQCFpxalcSSw6fCMsUoQ4wbhTm2wNjQYzvAVQ,28683 +numpy/distutils/tests/test_ccompiler_opt_conf.py,sha256=LmFqpGwqGFgUwLQdV5xJQhs_xB9gnL7_P-7oOOUqa78,6521 +numpy/distutils/tests/test_exec_command.py,sha256=Ltd4T3A_t3Oo_QSYjOkWKqPj-LttXEumEVKhLxVfZEU,7515 +numpy/distutils/tests/test_fcompiler.py,sha256=SS5HOLIg0eqkmZTRKeWq9_ahW2tmV9c9piwYfzcBPmc,1320 +numpy/distutils/tests/test_fcompiler_gnu.py,sha256=RlRHZbyazgKGY17NmdYSF3ehO0M0xXN4UkbsJzJz4i8,2191 +numpy/distutils/tests/test_fcompiler_intel.py,sha256=4cppjLugoa8P4bjzYdiPxmyCywmP9plXOkfsklhnYsQ,1088 +numpy/distutils/tests/test_fcompiler_nagfor.py,sha256=ntyr8f-67dNI0OF_l6-aeTwu9wW-vnxpheqrc4cXAUI,1124 +numpy/distutils/tests/test_from_template.py,sha256=ZzUSEPyZIG4Zak3-TFqmRGXHMp58aKTuLKb0t-5XpDg,1147 +numpy/distutils/tests/test_mingw32ccompiler.py,sha256=7X8V4hLMtsNj1pYoLkSSla04gJu66e87E_k-6ce3PrA,1651 +numpy/distutils/tests/test_misc_util.py,sha256=YKK2WrJqVJ5o71mWL5oP0l-EVQmqKlf3XU8y7co0KYc,3300 +numpy/distutils/tests/test_npy_pkg_config.py,sha256=1pQh-mApHjj0y9Ba2tqns79U8dsfDpJ9zcPdsa2qbps,2641 +numpy/distutils/tests/test_shell_utils.py,sha256=okNSfjFSAvY3XyBsyZrKXAtV9RBmb7vX9o4ZLJc28Ds,2030 +numpy/distutils/tests/test_system_info.py,sha256=ZM_38mX67sI218ULIzY6akiMduDuEVelQruSYGo_kRo,11079 +numpy/distutils/unixccompiler.py,sha256=A76d9fTRPfO76_e9484HX9_efUlgFULKcFMdTUNJQVQ,5537 +numpy/doc/__init__.py,sha256=llSbqjSXybPuXqt6WJFZhgYnscgYl4m1tUBy_LhfCE0,534 +numpy/doc/__pycache__/__init__.cpython-39.pyc,, +numpy/doc/__pycache__/constants.cpython-39.pyc,, +numpy/doc/__pycache__/ufuncs.cpython-39.pyc,, +numpy/doc/constants.py,sha256=OtjYHSs9p_DxLDqhlbauLMt-WWVrnewaHlUJCi3Qw_Y,9592 +numpy/doc/ufuncs.py,sha256=jL-idm49Qd8xNns12ZPp533jorDuDnUN2I96hbmFZoo,5497 +numpy/dual.py,sha256=RgoFIabqn8Hu9lSRakjO1plfDdYBJQq_8Gn__rE8TqQ,2297 +numpy/f2py/__init__.py,sha256=EBF5R57RqAxJiUUrzvKqpNuJXQbnOgScPeJdCz0sVI0,5964 +numpy/f2py/__init__.pyi,sha256=4Sn6hsjJjdP27yOuBVKwcjHw7vBv5U_Tr4r4VdrY26Y,310 +numpy/f2py/__main__.py,sha256=XSvcMI54qWQiicJ51nRxK8L8PGzuUy3c2NGTVg1tzyk,89 +numpy/f2py/__pycache__/__init__.cpython-39.pyc,, +numpy/f2py/__pycache__/__main__.cpython-39.pyc,, +numpy/f2py/__pycache__/__version__.cpython-39.pyc,, +numpy/f2py/__pycache__/auxfuncs.cpython-39.pyc,, +numpy/f2py/__pycache__/capi_maps.cpython-39.pyc,, +numpy/f2py/__pycache__/cb_rules.cpython-39.pyc,, +numpy/f2py/__pycache__/cfuncs.cpython-39.pyc,, +numpy/f2py/__pycache__/common_rules.cpython-39.pyc,, +numpy/f2py/__pycache__/crackfortran.cpython-39.pyc,, +numpy/f2py/__pycache__/diagnose.cpython-39.pyc,, +numpy/f2py/__pycache__/f2py2e.cpython-39.pyc,, +numpy/f2py/__pycache__/f2py_testing.cpython-39.pyc,, +numpy/f2py/__pycache__/f90mod_rules.cpython-39.pyc,, +numpy/f2py/__pycache__/func2subr.cpython-39.pyc,, +numpy/f2py/__pycache__/rules.cpython-39.pyc,, +numpy/f2py/__pycache__/setup.cpython-39.pyc,, +numpy/f2py/__pycache__/use_rules.cpython-39.pyc,, +numpy/f2py/__version__.py,sha256=TisKvgcg4vh5Fptw2GS1JB_3bAQsWZIKhclEX6ZcAho,35 +numpy/f2py/auxfuncs.py,sha256=d8xZkZ2Jom_FX4ALxQO4v2Uqsfi62rZEiqSH30AGpyA,22701 +numpy/f2py/capi_maps.py,sha256=pGERvcXuv3wpq2ab3inTTTdv20G1XZz2tMVn0_yVYDM,32247 +numpy/f2py/cb_rules.py,sha256=BYaZ0xIcL2tsKbHL3aak61vXY3EChSQbgmJSZKf4Lw4,24862 +numpy/f2py/cfuncs.py,sha256=YwYubDuH_FGwm_uRSexDZEeXyLmuKXWb7voaSmeve9o,48115 +numpy/f2py/common_rules.py,sha256=pRpoVr457G3JcTD_W4V0AmgQBzAIKcqPw4j_Hv3elF0,5070 +numpy/f2py/crackfortran.py,sha256=kymhjPC-dymart4TGzL7M6riM_4HiorYUFb5Au_apVc,135413 +numpy/f2py/diagnose.py,sha256=LmGu6iZKr9WHfMZFIIEobZvRJd9Ktdqx0nYekWk4N7g,5384 +numpy/f2py/f2py2e.py,sha256=an58iVb6nFOhUwT4xYN6GJcmabo-fNTxXtj7CkUICVI,25033 +numpy/f2py/f2py_testing.py,sha256=vybOK-G_b0KYFNNZdhOCUf5pZExEdWfcIueIKODu024,1503 +numpy/f2py/f90mod_rules.py,sha256=k-AxjnKtrMDFO4LYXSeEnYtlxFFtYysONHjTICuMka4,10081 +numpy/f2py/func2subr.py,sha256=-e2VG2t0YDEOO_jIhLZRFLg5uoIoPGtcjir-4BzdNCs,9655 +numpy/f2py/rules.py,sha256=_i7_ToAc5iR4W4hZrMrAJ0qDBUouEKfH4HqtiCMNa7A,60225 +numpy/f2py/setup.py,sha256=Qoa3e1-WsSeayGHlVqZ9x5eIEOBeMAOtqTP331STQAk,2533 +numpy/f2py/src/fortranobject.c,sha256=IeezP69rVbNBR7umIUirrEZyGThU5Yhx3fV2olh9TMs,37559 +numpy/f2py/src/fortranobject.h,sha256=kwkaaaO0nsg7I-I0HpVAlOy-eiuvhKbhSZrEUWJvBaU,4656 +numpy/f2py/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/f2py/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_block_docstring.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_callback.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_common.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_compile_function.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_crackfortran.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_kind.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_mixed.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_module_doc.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_parameter.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_quoted_character.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_regression.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_return_character.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_return_complex.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_return_integer.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_return_logical.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_return_real.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_size.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/test_string.cpython-39.pyc,, +numpy/f2py/tests/__pycache__/util.cpython-39.pyc,, +numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=56WWwGvAipTqHyshtz8jLLZ7gNHpzC5h_jCyIciFGLc,7511 +numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=zfuOShmuotzcLIQDnVFaARwvM66iLrOYzpquIGDbiKU,30 +numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=fqbSr7VlKfVrBulFgQtQA9fQf0mQvVbLi94e4FTST3k,494 +numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=9pbi88-uSNP5IwS49Kim982jDAuopo3tpEhg2SOU7no,540 +numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=9Cl1sdrihB8cCSsjoQGmOO8VRv9ni8Fjr0Aku1UdEWM,288 +numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=3L_F7n5ju9F0nxw95uBUaPeuiDOw6uHvB580eIj7bqI,134 +numpy/f2py/tests/src/common/block.f,sha256=tcGKa42S-6bfA6fybpM0Su_xjysEVustkEJoF51o_pE,235 +numpy/f2py/tests/src/kind/foo.f90,sha256=6_zq3OAWsuNJ5ftGTQAEynkHy-MnuLgBXmMIgbvL7yU,367 +numpy/f2py/tests/src/mixed/foo.f,sha256=Zgn0xDhhzfas3HrzgVSxIL1lGEF2mFRVohrvXN1thU0,90 +numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=6eEEYCH71gPp6lZ6e2afLrfS6F_fdP7GZDbgGJJ_6ns,187 +numpy/f2py/tests/src/mixed/foo_free.f90,sha256=UC6iVRcm0-aVXAILE5jZhivoGQbKU-prqv59HTbxUJA,147 +numpy/f2py/tests/src/module_data/mod.mod,sha256=EkjrU7NTZrOH68yKrz6C_eyJMSFSxGgC2yMQT9Zscek,412 +numpy/f2py/tests/src/module_data/module_data_docstring.f90,sha256=-asnMH7vZMwVIeMU2YiLWgYCUUUxZgPTpbAomgWByHs,236 +numpy/f2py/tests/src/parameter/constant_both.f90,sha256=L0rG6-ClvHx7Qsch46BUXRi_oIEL0uw5dpRHdOUQuv0,1996 +numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=lAT76HcXGMgr1NfKof-RIX3W2P_ik1PPqkRdJ6EyBmM,484 +numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=42jROArrG7vIag9wFa_Rr5DBnnNvGsrEUgpPU14vfIo,634 +numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=u9MRf894Cw0MVlSOUbMSnFSHP4Icz7RBO21QfMkIl-Q,632 +numpy/f2py/tests/src/parameter/constant_real.f90,sha256=QoPgKiHWrwI7w5ctYZugXWzaQsqSfGMO7Jskbg4CLTc,633 +numpy/f2py/tests/src/regression/inout.f90,sha256=TlMxJjhjjiuLI--Tg2LshLnbfZpiKz37EpR_tPKKSx8,286 +numpy/f2py/tests/src/size/foo.f90,sha256=nK_767f1TtqVr-dMalNkXmcKbSbLCiabhRkxSDCzLz0,859 +numpy/f2py/tests/src/string/char.f90,sha256=X_soOEV8cKsVZefi3iLT7ilHljjvJJ_i9VEHWOt0T9Y,647 +numpy/f2py/tests/test_abstract_interface.py,sha256=GBxj3l7J02xQ4uE2t2LBK972OQ1Jl06tZgTbRN9FwTA,1883 +numpy/f2py/tests/test_array_from_pyobj.py,sha256=kUB0hCAGPEjC46GaAEV7vog9eeVLP4GDUUz2c6kRXkc,23406 +numpy/f2py/tests/test_assumed_shape.py,sha256=vQ2pligMnltD87SG0EektWnMbLE3Cgh7sd-58EQZEfA,1615 +numpy/f2py/tests/test_block_docstring.py,sha256=AFgfCimjB0gcmqfyHhTmfRVCrMZunkIueoZGrV9SbaA,650 +numpy/f2py/tests/test_callback.py,sha256=QFvuUSetf9OA5yD9ti5MhNitIJE7n9wZXM_wkVacz1U,8513 +numpy/f2py/tests/test_common.py,sha256=tRwTdz6aQ0RYREFC-T-SfEyq25wWYo5pknVAqvvvBjM,827 +numpy/f2py/tests/test_compile_function.py,sha256=d2zOi1oPKwqSMBILFSUuJeIX3BY6uAOBZnvYw5h3K54,4434 +numpy/f2py/tests/test_crackfortran.py,sha256=Ujax9P_Q9Sn9WFwR7v2vFJhh_kTSE6IroZ-Pk1vqOo8,4200 +numpy/f2py/tests/test_kind.py,sha256=eH_sM5X5wIwe3T9yVMH6DH8I4spsgRaeIork_FJG-8Q,1044 +numpy/f2py/tests/test_mixed.py,sha256=faYM1laPKwStbz-RY-6b9LCuj2kFojTPIR0wxsosXoI,946 +numpy/f2py/tests/test_module_doc.py,sha256=Qz_TonbInzgdY8KsnC2Y8mxmiqLWMruZKDPa0FPMbrE,980 +numpy/f2py/tests/test_parameter.py,sha256=S1K9K9Uj5dWjmWF8euBUMJdGilIqn1nNH2FftcS1INU,4026 +numpy/f2py/tests/test_quoted_character.py,sha256=81CR1ZIyKygz7noFJMcgaLdg18nY8zcFfrSN2L_U9xg,959 +numpy/f2py/tests/test_regression.py,sha256=UMVOFSIybDbxS7zs3aPh6dQ1jAOusWgUh8plI1cDvzQ,1865 +numpy/f2py/tests/test_return_character.py,sha256=bpuHEjBj53YpvsI_aLQOcSVdoGAEtgtmAK8kbHKdcgc,4064 +numpy/f2py/tests/test_return_complex.py,sha256=jYcoM3pZj0opGXXF69EsGQ6HD5SPaRhBeGx3RhnJx8c,4778 +numpy/f2py/tests/test_return_integer.py,sha256=MpdNZLxeQwER4M4w5utHYe08gDzrjAWOq_z5wRO8hP8,4751 +numpy/f2py/tests/test_return_logical.py,sha256=uP9rWNyA3UARKwaQ6Fsc8grh72G5xnp9XXFSWLdPZQ8,5028 +numpy/f2py/tests/test_return_real.py,sha256=05OwUsgmI_G_pr197xUXgIgTNVOHygtY2kPqRiexeSY,5605 +numpy/f2py/tests/test_semicolon_split.py,sha256=Ap6S5tGL6R8I2i9iUlVxElW4IMNoz0LxIHK1hLIYZhQ,1577 +numpy/f2py/tests/test_size.py,sha256=ukbP0NlzqpfD6M_K58WK6IERSXc_6nHpyUqbYfEbk8M,1335 +numpy/f2py/tests/test_string.py,sha256=w2GiZmdwNAaWOBhak9IuUnnVktMhUq_Y0ajI0SX9YNY,632 +numpy/f2py/tests/util.py,sha256=QkIepB1CsYTTHl-F9Quvwg2uV5gBmpuGMn-VOVrJLvw,9947 +numpy/f2py/use_rules.py,sha256=ROzvjl0-GUOkT3kJS5KkYq8PsFxGjebA6uPq9-CyZEQ,3700 +numpy/fft/__init__.py,sha256=efF5_sqvdBfwIBHcEvkIb0a7YWZvR5oa4jh_aelUTpk,8387 +numpy/fft/__init__.pyi,sha256=ZLft_7Im6b85uM7JR-E-p2mQXx_QV6Hs8Mkq55zUcsw,815 +numpy/fft/__pycache__/__init__.cpython-39.pyc,, +numpy/fft/__pycache__/_pocketfft.cpython-39.pyc,, +numpy/fft/__pycache__/helper.cpython-39.pyc,, +numpy/fft/__pycache__/setup.cpython-39.pyc,, +numpy/fft/_pocketfft.py,sha256=Eig0b3LbIFwSRFVcB6XHbBpAbDFIjO5gijB5_cY8_1o,54321 +numpy/fft/_pocketfft_internal.cp39-win_amd64.pyd,sha256=qYZ-j1-z6mNFK16w6Lzdq2aqrSx4-K_BgfkAgwDUQ58,112640 +numpy/fft/helper.py,sha256=divqfKoOb0p-Zojqokhj3fQ5HzgWTigxDxZIQnN_TUQ,6375 +numpy/fft/setup.py,sha256=-aF3b5s_eL6Jl0rS_jMFtFPzbYyT55FQE2SgblAawf0,750 +numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/fft/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/fft/tests/__pycache__/test_helper.cpython-39.pyc,, +numpy/fft/tests/__pycache__/test_pocketfft.cpython-39.pyc,, +numpy/fft/tests/test_helper.py,sha256=iopdl7-SHA5E1Ex13rYdceskgoDI5T8_Kg12LfqK1HA,6315 +numpy/fft/tests/test_pocketfft.py,sha256=GraHId7HW81sK62wFoF2y4nt2bL6MqkPjkG12OPmgNo,13135 +numpy/lib/__init__.py,sha256=D_APB98cfipadeVf9KXXCg4MRYwNl_JrjbrJ_Fityhc,1840 +numpy/lib/__init__.pyi,sha256=7GUq7KW43yGCXqYTQrjlG-tAknbcCwhYle1_aYC_D7k,5550 +numpy/lib/__pycache__/__init__.cpython-39.pyc,, +numpy/lib/__pycache__/_datasource.cpython-39.pyc,, +numpy/lib/__pycache__/_iotools.cpython-39.pyc,, +numpy/lib/__pycache__/_version.cpython-39.pyc,, +numpy/lib/__pycache__/arraypad.cpython-39.pyc,, +numpy/lib/__pycache__/arraysetops.cpython-39.pyc,, +numpy/lib/__pycache__/arrayterator.cpython-39.pyc,, +numpy/lib/__pycache__/format.cpython-39.pyc,, +numpy/lib/__pycache__/function_base.cpython-39.pyc,, +numpy/lib/__pycache__/histograms.cpython-39.pyc,, +numpy/lib/__pycache__/index_tricks.cpython-39.pyc,, +numpy/lib/__pycache__/mixins.cpython-39.pyc,, +numpy/lib/__pycache__/nanfunctions.cpython-39.pyc,, +numpy/lib/__pycache__/npyio.cpython-39.pyc,, +numpy/lib/__pycache__/polynomial.cpython-39.pyc,, +numpy/lib/__pycache__/recfunctions.cpython-39.pyc,, +numpy/lib/__pycache__/scimath.cpython-39.pyc,, +numpy/lib/__pycache__/setup.cpython-39.pyc,, +numpy/lib/__pycache__/shape_base.cpython-39.pyc,, +numpy/lib/__pycache__/stride_tricks.cpython-39.pyc,, +numpy/lib/__pycache__/twodim_base.cpython-39.pyc,, +numpy/lib/__pycache__/type_check.cpython-39.pyc,, +numpy/lib/__pycache__/ufunclike.cpython-39.pyc,, +numpy/lib/__pycache__/user_array.cpython-39.pyc,, +numpy/lib/__pycache__/utils.cpython-39.pyc,, +numpy/lib/_datasource.py,sha256=YoG5Pxly27YWu3asvyP0NM-37aaJpn5ggRGGDUSMb_Q,23372 +numpy/lib/_iotools.py,sha256=PYVfEUIITyUmIvWS8-0CSLegJjKgDysKwYmro1yVrZ8,31834 +numpy/lib/_version.py,sha256=IjsC8gQRuFo_Ns0fSuF0BW7ndA3h9jQV5mClyWe9A8s,5010 +numpy/lib/_version.pyi,sha256=s4D_OzArROmCoR6bABfRb9dsL4AR3aZRA9f6s-GJ86U,720 +numpy/lib/arraypad.py,sha256=V4iW49CT3KyPHdVK-4Q2kjJqgEb-N35LusA6G-Q2LKU,32102 +numpy/lib/arraypad.pyi,sha256=RLtAByAYSFmjxQRnKNdQH62mc43wGU3XErU1XhqlWcU,101 +numpy/lib/arraysetops.py,sha256=1foGsO8myJ-SZ717DoOR0P-2mC1zpOVlKWoewUJN2Fk,27302 +numpy/lib/arraysetops.pyi,sha256=o_F0HewcHQUvE646-vyWi7Enyil9Qg59PrEcvq7Btt8,508 +numpy/lib/arrayterator.py,sha256=29pO5S0ciEZwt1402Q0-5cRbyKspV4tlPX1-m_D_Hgc,7282 +numpy/lib/arrayterator.pyi,sha256=g9PHQn19xAmf2yiFXkU45o1B-nODcqI1YocnP9_yxd4,1608 +numpy/lib/format.py,sha256=ypOTORtbtkVX4RCMtm5n42-PoOWeHBAO7djJB8gVCso,32367 +numpy/lib/format.pyi,sha256=jOb-vslp96sOS-c_Dm2wCfdlZMsgqPMTw03VSjjHdfM,907 +numpy/lib/function_base.py,sha256=k9eYMHMA14IikWwxGfu091DBBrd13_XKnwtaxbUk0II,166941 +numpy/lib/function_base.pyi,sha256=N2n2yxYqLKl3FJI5w41VJKSwOwqJNdutE8Ie_K79E94,2048 +numpy/lib/histograms.py,sha256=pCC7Ip_S_OynqkpC8epH1uYTv0-uqWyClvdESw5n8jI,41344 +numpy/lib/histograms.pyi,sha256=QhuO5aNp34_F3CnX6UlHxGcCYS1_6BCnXyqipCtbSJ8,287 +numpy/lib/index_tricks.py,sha256=IzC_xMinXX6lGpN6KYUHoEgSCfOS68ifknovhlhk1FY,31651 +numpy/lib/index_tricks.pyi,sha256=5oFNMKTBie2AVWHh_yJ1xKczMsPIDy85y75Ia3EKuTE,5222 +numpy/lib/mixins.py,sha256=j_Hfrfp6d6lJAVFlM9y2lu6HHsK_yDInyU74SJWsZDQ,7228 +numpy/lib/mixins.pyi,sha256=i8hQRpFQ9S3hxn345gy0rglF2MX1JV2G92kmEem6AMM,2215 +numpy/lib/nanfunctions.py,sha256=afrJSX1kBzNARHWTC8aRi0JslqTCbXrIeMeb-UDxdcE,60822 +numpy/lib/nanfunctions.pyi,sha256=uqVzsy8f90Q8r12Ds3PlRN1pRuMHFhWsurukYy3cAbI,1126 +numpy/lib/npyio.py,sha256=VF2FrmJh4rlUklDLKOr6TJI_x2aCkODMnUTUnywLMI0,91905 +numpy/lib/npyio.pyi,sha256=twUZovgZhpqp681jRTcnui40RaKBPHqSGTw1HRzEb64,2329 +numpy/lib/polynomial.py,sha256=L1KVbY5xF-tJrusWP17lxb4JkGq6J5ZwQdTzJveyhbQ,45257 +numpy/lib/polynomial.pyi,sha256=VnAB8JNkrMyahTaUzopkbHHBtzT96tkAkkrLmTYQj14,434 +numpy/lib/recfunctions.py,sha256=G_GYahtsPjRK7uknYmXMpm1p96eHPzguzWBVtldi_uw,58119 +numpy/lib/scimath.py,sha256=zn_g7YLekkk0MebyFyz20w_viaVsaV7woD6y-wIlSFA,15492 +numpy/lib/scimath.pyi,sha256=V3Qf4OCi6OZzAR2efTprM4fd8b5aipCu0wPJybDvHt8,225 +numpy/lib/setup.py,sha256=1s3corE4egZnDZ6I8au_mx7muRPgb3ZxL4Ko2SHt_2Q,417 +numpy/lib/shape_base.py,sha256=ECamk2gUCc4bqtPz2rU2dOBnTD3QjOy_MeIof08hgC0,39631 +numpy/lib/shape_base.pyi,sha256=dIoBEtLUf1HmG-drc2WeRqkoKlfKZoIEdxVRUQoAeRQ,750 +numpy/lib/stride_tricks.py,sha256=-hzSjUTw_0VXfzdHZ3afGQlP6UP3oHBdX0VcGE0PMgg,18389 +numpy/lib/stride_tricks.pyi,sha256=4d2vUerH3CiPQGTd9gK_60DnUb8lBb3dWcT6IjBqrsY,526 +numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/lib/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test__datasource.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test__iotools.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test__version.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_arraypad.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_arraysetops.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_arrayterator.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_financial_expired.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_format.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_function_base.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_histograms.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_index_tricks.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_io.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_mixins.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_nanfunctions.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_packbits.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_polynomial.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_recfunctions.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_regression.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_shape_base.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_stride_tricks.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_twodim_base.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_type_check.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_ufunclike.cpython-39.pyc,, +numpy/lib/tests/__pycache__/test_utils.cpython-39.pyc,, +numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 +numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 +numpy/lib/tests/data/py3-objarr.npy,sha256=pTTVh8ezp-lwAK3fkgvdKU8Arp5NMKznVD-M6Ex_uA0,341 +numpy/lib/tests/data/py3-objarr.npz,sha256=qQR0gS57e9ta16d_vCQjaaKM74gPdlwCPkp55P-qrdw,449 +numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 +numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 +numpy/lib/tests/test__datasource.py,sha256=OYm6qsPy7Q8yXV_5GfSMMNQKon4izf_T9ULaqjHLJA4,10837 +numpy/lib/tests/test__iotools.py,sha256=q44VFSi9VzWaf_dJ-MGBtYA7z7TFR0j0AF-rbzhLXoo,14096 +numpy/lib/tests/test__version.py,sha256=v2TOlH4f1Pmzxn1HWby3eBgLO9tGnhwH2LvBXlXtHP4,2063 +numpy/lib/tests/test_arraypad.py,sha256=ZoqM25xb5iI_K6ampX5cov2zFUwQZ8i0Xu3gY_ncdk0,55647 +numpy/lib/tests/test_arraysetops.py,sha256=zd0sx_8ermgx028uEAtpr-CpMbdO8Ox70MGsOrdEO-w,29196 +numpy/lib/tests/test_arrayterator.py,sha256=IRVmzxbr9idboJjOHKuX_8NQhMAKs7pD1xWqmU3ZERw,1337 +numpy/lib/tests/test_financial_expired.py,sha256=JfDHAoEDLPcKzcpzRsj8pijDSaFmQRWM6BiOjL4QFTs,371 +numpy/lib/tests/test_format.py,sha256=qHkj1XiN-t24ExibNg4a4ydxEpK5lic7sudOBtmsjb4,39199 +numpy/lib/tests/test_function_base.py,sha256=nV-9JDwnQcYxRW_nIlUXb0hoUBIhZHXZKXs2ivY-Dpc,139502 +numpy/lib/tests/test_histograms.py,sha256=_uKM8dtMWCwGtIgIwog3s3jyPb8w804TWqy9iOkYeSI,34510 +numpy/lib/tests/test_index_tricks.py,sha256=qw8TrxlqAm86SPWqLclufJkHxKhkjhvOXgRHjmVGdwk,19490 +numpy/lib/tests/test_io.py,sha256=ZEYwCEDx8SEa_EPhHjCRLlNsa-ONI1Cis_opkGs7V3Y,105599 +numpy/lib/tests/test_mixins.py,sha256=nIec_DZIDx7ONnlpq_Y2TLkIULAPvQ7LPqtMwEHuV4U,7246 +numpy/lib/tests/test_nanfunctions.py,sha256=ZAo5IEvXFEJE5s0RtiAMJTxTUc4D0g3nXCiCH6NNPNI,39578 +numpy/lib/tests/test_packbits.py,sha256=XpFIaL8mOWfzD3WQaxd6WrDFWh4Mc51EPXIOqxt3wS0,17922 +numpy/lib/tests/test_polynomial.py,sha256=fxIQBimaH0ZYXcWJYPBBtpWDpYzstABF_BBROu-vCfc,10995 +numpy/lib/tests/test_recfunctions.py,sha256=f9J49n_8ot0zG0Bw4XXFx3l1XN2VeAu9ROgn0dV4GLY,42134 +numpy/lib/tests/test_regression.py,sha256=RnyyMtL-Ds3GBuPl1j3QLBjqSBfhwkOozCY10Bk8v1A,8521 +numpy/lib/tests/test_shape_base.py,sha256=kC4k86CdiwUoqBs_KjAc_JekFJLVtlbanlPrXXXpWRU,25020 +numpy/lib/tests/test_stride_tricks.py,sha256=1zeBcvhltePbeE6SBoF4gomAYaZzwaHjvbWqcgI2JiY,23494 +numpy/lib/tests/test_twodim_base.py,sha256=USISKs6mDYgEl20k4_k899_pWSkFjZkPkafA_BUQxng,18890 +numpy/lib/tests/test_type_check.py,sha256=ffuA-ndaMsUb0IvPalBMwGkoukIP9OZVJXCuq299qB8,15597 +numpy/lib/tests/test_ufunclike.py,sha256=rpZTDbKPBVg5-byY1SKhkukyxB2Lvonw15ZARPE3mc0,3382 +numpy/lib/tests/test_utils.py,sha256=C5Y3YQQ6wIWNklf4Mfp-Sfx6RL60nvgjJF1ne_XAPzA,4734 +numpy/lib/twodim_base.py,sha256=QHOkarcWyqgRNuEtK7iH0QCkBldzpsVJCth6LkMWIDY,30000 +numpy/lib/twodim_base.pyi,sha256=TNbUmaUavWGI0xIPHdPoXSzZhoF9Wyps5lUmoiVybfs,891 +numpy/lib/type_check.py,sha256=YSoY_-Dv0oz7XZXZ9--ZVsACp7a6k0jNzKoK7bYbn5w,21547 +numpy/lib/type_check.pyi,sha256=TPk5lgZopLUW4jqPoqpjGUyFlsP-Jy61XSWR5jKDSbE,480 +numpy/lib/ufunclike.py,sha256=snevUCqlqCQKAlzz5f8DdFXkAarEPafG8CHmzXMs-OI,8299 +numpy/lib/ufunclike.pyi,sha256=aN1DpYuDcQ0_JxUyMv93sG6AxNHnsKpCyggbV53rR_c,1377 +numpy/lib/user_array.py,sha256=5yqkyjCmUIASGNx2bt7_ZMWJQJszkbD1Kn06qqv7POA,8007 +numpy/lib/utils.py,sha256=lgLRpIorClD1JCB5x359ygcApBY_AR4o5_EpH54iwRI,34211 +numpy/lib/utils.pyi,sha256=5wHvhBqPVk9Guy3dNYGUwyI3dXQ6Qb59uTilS_5nMHM,2629 +numpy/linalg/__init__.py,sha256=EefgbxHkx8sLgXXLtpD8i8A_SCx-V4uz1Z_aQkmW6Ec,1893 +numpy/linalg/__init__.pyi,sha256=4Trq82fvQYtnl8UYf05xD7noJ2d1M-RLnMMOlQ-8qXk,722 +numpy/linalg/__pycache__/__init__.cpython-39.pyc,, +numpy/linalg/__pycache__/linalg.cpython-39.pyc,, +numpy/linalg/__pycache__/setup.cpython-39.pyc,, +numpy/linalg/_umath_linalg.cp39-win_amd64.pyd,sha256=aT-utkHdMdaouudfOf-CR7L3hnQIWvn0mP7VeB0_NEI,155648 +numpy/linalg/lapack_lite.cp39-win_amd64.pyd,sha256=9npBBw9Tw2yz_WzmPmpL6iGI0rLk-8eB0he2WdYygmg,22016 +numpy/linalg/linalg.py,sha256=-OW4XAym0jdfm2xT5MFumfHV7-r3hYwHNX4j6wWVn2E,92420 +numpy/linalg/setup.py,sha256=SLraNgsTKvTU99ypgjhhrrx3OVP0pYtpVvK_lOrBKmk,2951 +numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/linalg/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/linalg/tests/__pycache__/test_build.cpython-39.pyc,, +numpy/linalg/tests/__pycache__/test_deprecations.cpython-39.pyc,, +numpy/linalg/tests/__pycache__/test_linalg.cpython-39.pyc,, +numpy/linalg/tests/__pycache__/test_regression.cpython-39.pyc,, +numpy/linalg/tests/test_build.py,sha256=jh_94BhgTH15YQ5k8RQLhN2xeeeFKRmkK5Ab_xE8UYM,1683 +numpy/linalg/tests/test_deprecations.py,sha256=GaeE3JnQlJLoAfbY93LmgCFUlV5M8IFmQ7EhF4WbqwU,660 +numpy/linalg/tests/test_linalg.py,sha256=n9OrL9HbnIfTH8LeC01QjfmnfsIr7mOIipdIWb9_7Ps,76589 +numpy/linalg/tests/test_regression.py,sha256=T0iQkRUhOoxIHHro5kyxU7GFRhN3pZov6UaJGXxtvu0,5745 +numpy/ma/__init__.py,sha256=9i-au2uOZ_K9q2t9Ezc9nEAS74Y4TXQZMoP9601UitU,1458 +numpy/ma/__init__.pyi,sha256=xynJTJEqRvqALgPJW1wHvqsXejXADDAXoCgOXxRyfxk,6230 +numpy/ma/__pycache__/__init__.cpython-39.pyc,, +numpy/ma/__pycache__/bench.cpython-39.pyc,, +numpy/ma/__pycache__/core.cpython-39.pyc,, +numpy/ma/__pycache__/extras.cpython-39.pyc,, +numpy/ma/__pycache__/mrecords.cpython-39.pyc,, +numpy/ma/__pycache__/setup.cpython-39.pyc,, +numpy/ma/__pycache__/testutils.cpython-39.pyc,, +numpy/ma/__pycache__/timer_comparison.cpython-39.pyc,, +numpy/ma/bench.py,sha256=wOg52Iu6JHqHWzKS2N0LpdPPUJciEZAePgmZd_mMgpM,5014 +numpy/ma/core.py,sha256=44E3T85hj7JVtxQtq6XqBkJCJW6l8u93ngmHXlBhqxs,272520 +numpy/ma/core.pyi,sha256=pmPYXKnWGLnEfKlYPMpe4XT47oAN94XW203wPCRno74,14594 +numpy/ma/extras.py,sha256=UBULC-EfycB5SBSVISS1LsBsaAk8AqBUQSlMQbx4su0,60237 +numpy/ma/extras.pyi,sha256=cQzyeK_KloJ-jJ7M5n0Bs0ZbmIrlakiaJB1QCBSbBjs,2682 +numpy/ma/mrecords.py,sha256=8E37hDvjnenk0eL4p9zCe1Gnvk3nSisgfC_uRAOkp7o,27460 +numpy/ma/mrecords.pyi,sha256=awoJxi9wnBQAjkhuxdFXL6s7v0z7QhsAAXF8TboI7NI,1958 +numpy/ma/setup.py,sha256=DCi5FtZTlkhR3ByJH5Sp5B962bfcWpaOjA-y7ueyoog,430 +numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/ma/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/ma/tests/__pycache__/test_core.cpython-39.pyc,, +numpy/ma/tests/__pycache__/test_deprecations.cpython-39.pyc,, +numpy/ma/tests/__pycache__/test_extras.cpython-39.pyc,, +numpy/ma/tests/__pycache__/test_mrecords.cpython-39.pyc,, +numpy/ma/tests/__pycache__/test_old_ma.cpython-39.pyc,, +numpy/ma/tests/__pycache__/test_regression.cpython-39.pyc,, +numpy/ma/tests/__pycache__/test_subclassing.cpython-39.pyc,, +numpy/ma/tests/test_core.py,sha256=GgPwt-Ha-V5-2hNiJhvTEnmoyVSmHEidPwmKU1eRup8,207373 +numpy/ma/tests/test_deprecations.py,sha256=Dv-fqcxKJ_tfxyk-DyK2rA5onOl-7YC06Pu9nkvECt0,2326 +numpy/ma/tests/test_extras.py,sha256=rNvSgz32IKuEBsMFqDVPsDRt16uhExz5AYz-MKNffBs,69500 +numpy/ma/tests/test_mrecords.py,sha256=d6_ClqAOTNUi5_mtnqMXKHvMaHXuIkWt3PRsyCw0ZBc,20376 +numpy/ma/tests/test_old_ma.py,sha256=v99zHnxhbwdxqnzmUCZFy5hbzAPkZG6UcA3ijmnuvlE,33123 +numpy/ma/tests/test_regression.py,sha256=Hi-p5QdcivsxUDSpTl3MGtAnmJCJPhhIj3c5nYI9rw8,3170 +numpy/ma/tests/test_subclassing.py,sha256=pVNZLKDrptdlP-xuzyPwnXfDBGnrZJdT9ml-o1nXkbw,12988 +numpy/ma/testutils.py,sha256=5nMEDNCI9e5KWPGQqFhi2_HPHJO5mZ_XQaMXwE4Fhl4,10527 +numpy/ma/timer_comparison.py,sha256=xhRDTkkqvVLvB5HeFKIQqGuicRerabKKX3VmBUGc4Zs,16101 +numpy/matlib.py,sha256=l-292Lvk_yUCK5Y_U9u1Xa8grW8Ss0uh23o8kj-Hhd8,10741 +numpy/matrixlib/__init__.py,sha256=OYwN1yrpX0doHcXpzdRm2moAUO8BCmgufnmd6DS43pI,228 +numpy/matrixlib/__init__.pyi,sha256=nxNLVudzbEgUYgknUScLKCA8F1SbRTxwEbb_dtPtU5c,197 +numpy/matrixlib/__pycache__/__init__.cpython-39.pyc,, +numpy/matrixlib/__pycache__/defmatrix.cpython-39.pyc,, +numpy/matrixlib/__pycache__/setup.cpython-39.pyc,, +numpy/matrixlib/defmatrix.py,sha256=zDXeDEu5DcW3R5ijl_MjGzp9bXM36uhWm3s0hbMTzJc,31780 +numpy/matrixlib/setup.py,sha256=DEY5vWe-ReFP31junY0nZ7HwDpRIVuHLUtiTXL_Kr3A,438 +numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/matrixlib/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-39.pyc,, +numpy/matrixlib/tests/__pycache__/test_interaction.cpython-39.pyc,, +numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-39.pyc,, +numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-39.pyc,, +numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-39.pyc,, +numpy/matrixlib/tests/__pycache__/test_numeric.cpython-39.pyc,, +numpy/matrixlib/tests/__pycache__/test_regression.cpython-39.pyc,, +numpy/matrixlib/tests/test_defmatrix.py,sha256=nbY_HkwzoJbhYhACiEN-cZmR644sVJvMKWUcsANPayQ,15435 +numpy/matrixlib/tests/test_interaction.py,sha256=C1YtIubO6Qh8RR-XONzo8Mle4bu4SvwsvBnB0x0Gy4g,12229 +numpy/matrixlib/tests/test_masked_matrix.py,sha256=aKC-imVpBSyjnokr0Ntn-e47P1MXhKzOSnGz1ji9J_c,9156 +numpy/matrixlib/tests/test_matrix_linalg.py,sha256=9S9Zrk8PMLfEEo9wBx5LyrV_TbXhI6r-Hc5t594lQFY,2152 +numpy/matrixlib/tests/test_multiarray.py,sha256=E5jvWX9ypWYNHH7iqAW3xz3tMrEV-oNgjN3_oPzZzws,570 +numpy/matrixlib/tests/test_numeric.py,sha256=l-LFBKPoP3_O1iea23MmaACBLx_tSSdPcUBBRTiTbzk,458 +numpy/matrixlib/tests/test_regression.py,sha256=FgYV3hwkpO0qyshDzG7n1JfQ-kKwnSZnA68jJHS7TeM,958 +numpy/polynomial/__init__.py,sha256=kcy-0dPj0GuuolC0BMw1RK4q1ONfJomoxYK1IyvnYGk,6973 +numpy/polynomial/__init__.pyi,sha256=U_WScHno76_O-SGxkqYYKW53z1d7cTetIA0ZCgDtlXw,661 +numpy/polynomial/__pycache__/__init__.cpython-39.pyc,, +numpy/polynomial/__pycache__/_polybase.cpython-39.pyc,, +numpy/polynomial/__pycache__/chebyshev.cpython-39.pyc,, +numpy/polynomial/__pycache__/hermite.cpython-39.pyc,, +numpy/polynomial/__pycache__/hermite_e.cpython-39.pyc,, +numpy/polynomial/__pycache__/laguerre.cpython-39.pyc,, +numpy/polynomial/__pycache__/legendre.cpython-39.pyc,, +numpy/polynomial/__pycache__/polynomial.cpython-39.pyc,, +numpy/polynomial/__pycache__/polyutils.cpython-39.pyc,, +numpy/polynomial/__pycache__/setup.cpython-39.pyc,, +numpy/polynomial/_polybase.py,sha256=IW0oYWkR-iLD7U58B_FHCw9Xmt6wINvtuV5HIYt8VkU,37539 +numpy/polynomial/_polybase.pyi,sha256=8lKGg2lbtBYaTDGcVHrqCf_40ymaQOX9CcMcYGs2nrk,2322 +numpy/polynomial/chebyshev.py,sha256=B9V6WF0813DjBlo1FMUKSiZeY5gj4GJahpdi1-zq9ls,64454 +numpy/polynomial/chebyshev.pyi,sha256=AFySGYufyEi-55ATupi4Ar6mGupv9z-kdkh2PamuY4Q,1444 +numpy/polynomial/hermite.py,sha256=CBxqSkeOzslg7Yl01PyEgJqJBB-s8v3S2wAgVCvGgfw,53822 +numpy/polynomial/hermite.pyi,sha256=XmFHSOJFH_dYGC8DBdR_qSqbL7UB93HkL0_J6ssL4Rw,1269 +numpy/polynomial/hermite_e.py,sha256=vpl-FsCXPCN0UGUbtrdO0PiJvkztiZa24FynSudfUYY,53938 +numpy/polynomial/hermite_e.pyi,sha256=tAA3oJRpGKENC0-WZ0EcXsj9PYnKctfdEO6N7_lmCEE,1290 +numpy/polynomial/laguerre.py,sha256=W9-FSh4_F2yVXP_fTpHkxXZ2fPUpAVG5rQ7Ph02QeNU,52098 +numpy/polynomial/laguerre.pyi,sha256=HnKXke8KWmU8uT_buXEegy7ZL4i0tncx5FbPqHqziKI,1230 +numpy/polynomial/legendre.py,sha256=EZFA3YV5yYq50It9xXdxoI6X814plgVPJr47n1hWvjw,52822 +numpy/polynomial/legendre.pyi,sha256=H1batvjMKK8qboBEwg9FcUFqxzmDsgkVlbjXHYcLz1I,1230 +numpy/polynomial/polynomial.py,sha256=359SQ7XXWeNrlx8Q5EM9O_W3K31QsblHA6MFCMHkFfI,50108 +numpy/polynomial/polynomial.pyi,sha256=8RdEStWi3j8uLzmS4ImkfvV6kOnpW0PXsuNnKLh1pco,1179 +numpy/polynomial/polyutils.py,sha256=gNWRDjoDsiFX5GfjR00zKCwRiB8DL7AFsd7ztD_fQo8,22857 +numpy/polynomial/polyutils.pyi,sha256=X7eGY_XslBGLXdJlPR71HLq6vfViZkUEkk0JaP70cHU,264 +numpy/polynomial/setup.py,sha256=3MP1VT1AVy8_MdlhErP-lS0Rq5fActYCkxYeHJU88Vg,383 +numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/polynomial/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-39.pyc,, +numpy/polynomial/tests/__pycache__/test_classes.cpython-39.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite.cpython-39.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-39.pyc,, +numpy/polynomial/tests/__pycache__/test_laguerre.cpython-39.pyc,, +numpy/polynomial/tests/__pycache__/test_legendre.cpython-39.pyc,, +numpy/polynomial/tests/__pycache__/test_polynomial.cpython-39.pyc,, +numpy/polynomial/tests/__pycache__/test_polyutils.cpython-39.pyc,, +numpy/polynomial/tests/__pycache__/test_printing.cpython-39.pyc,, +numpy/polynomial/tests/test_chebyshev.py,sha256=PI2XwvGGqQKEB1RxbsYRgeTG0cunB_8Otd9SBJozq-8,21141 +numpy/polynomial/tests/test_classes.py,sha256=J6abNzhFKwaagzv_x53xN0whCfr5nMwqZ---Jrd9oxY,18931 +numpy/polynomial/tests/test_hermite.py,sha256=zGYN24ia2xx4IH16D6sfAxIipnZrGrIe7D8QMJZPw4Y,19132 +numpy/polynomial/tests/test_hermite_e.py,sha256=5ZBtGi2gkeldYVSh8xlQOLUDW6fcT4YdZiTrB6AaGJU,19467 +numpy/polynomial/tests/test_laguerre.py,sha256=hBgo8w_3iEQosX2CqjTkUstTiuTPLZmfQNQtyKudZLo,18048 +numpy/polynomial/tests/test_legendre.py,sha256=v3ajjp0sg1o7njoLhbPftxaIWaxpY0pBp1suImZqJMw,19241 +numpy/polynomial/tests/test_polynomial.py,sha256=i7CaTnxfuLkVZzByFTvOVQo7HeMdQpGxgHRmHCFMTlw,20838 +numpy/polynomial/tests/test_polyutils.py,sha256=AQZZzxBCymhT1MTv8zCF_NC-nP0d9ybMNebT2d8P3j0,3700 +numpy/polynomial/tests/test_printing.py,sha256=o0D4bNFYlFo0YpPYMB6RZwH5zEFpE6qjdeZ-j-D0YsI,16176 +numpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/__init__.pxd,sha256=g3EaMi3yfmnqT-KEWj0cp6SWIxVN9ChFjEYXGOfOifE,445 +numpy/random/__init__.py,sha256=W_hFzGsKVQfdh3-U15gzsOKKAk8uZgioDkxKyuou4WA,7721 +numpy/random/__init__.pyi,sha256=BCSrAYcFNvoDTjsvhEn9cKXlQDYMmw0hLvN_lyZfSeo,2063 +numpy/random/__pycache__/__init__.cpython-39.pyc,, +numpy/random/__pycache__/_pickle.cpython-39.pyc,, +numpy/random/__pycache__/setup.cpython-39.pyc,, +numpy/random/_bounded_integers.cp39-win_amd64.pyd,sha256=DacQovJxVmX3exmtnDI_rqL27ka9XPVRmKe0op7jru8,245760 +numpy/random/_bounded_integers.pxd,sha256=ugYlh8FvGggHCjEaqgO4S_MeRcZg3mw40sDYEqx07QQ,1698 +numpy/random/_common.cp39-win_amd64.pyd,sha256=CiZctVOkQuAmPFD24W4nVhDHDRR-9nAYptNvn66eqFI,182272 +numpy/random/_common.pxd,sha256=azT8PZpof-sD2L_zkPv2RmuWNYKV4kQaEEBXZuBHxf4,4853 +numpy/random/_examples/cffi/__pycache__/extending.cpython-39.pyc,, +numpy/random/_examples/cffi/__pycache__/parse.cpython-39.pyc,, +numpy/random/_examples/cffi/extending.py,sha256=BgydYEYBb6hDghMF-KQFVc8ssUU1F5Dg-3GyeilT3Vg,920 +numpy/random/_examples/cffi/parse.py,sha256=Jb5SRj8As2P07PtoeuiZHZctaY0oAb065PXbG4-T8fI,1884 +numpy/random/_examples/cython/__pycache__/setup.cpython-39.pyc,, +numpy/random/_examples/cython/extending.pyx,sha256=_pKBslBsb8rGeFZkuQeAIhBdeIjDcX3roGqV_Jev7NE,2371 +numpy/random/_examples/cython/extending_distributions.pyx,sha256=1zrMvPbKi0RinyZ93Syyy4OXGEOzAAKHSzTmDtN09ZY,3987 +numpy/random/_examples/cython/setup.py,sha256=ehxuaXq6DAeABIQGbyrpod6yoshuZn5AJ4Td41p_mrQ,1435 +numpy/random/_examples/numba/__pycache__/extending.cpython-39.pyc,, +numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-39.pyc,, +numpy/random/_examples/numba/extending.py,sha256=vnqUqQRvlAI-3VYDzIxSQDlb-smBAyj8fA1-M2IrOQw,2041 +numpy/random/_examples/numba/extending_distributions.py,sha256=tU62JEW13VyNuBPhSpDWqd9W9ammHJCLv61apg90lMc,2101 +numpy/random/_generator.cp39-win_amd64.pyd,sha256=ojayss_vC3LUDkwiV1t1GZziDo3IFW8D3hSEkRSTFY8,685568 +numpy/random/_generator.pyi,sha256=COgYUgtPcS9sSPVSkE2VgRrvFHTwsF3YfzMjcA68qkk,22859 +numpy/random/_mt19937.cp39-win_amd64.pyd,sha256=y6NYDMWnWo-fLZYlxB8GrdG8aVvmgs_rqMdG9svCSt8,79360 +numpy/random/_mt19937.pyi,sha256=7b7q-B-AMHAtDhzEIpCVLXCoZokeuD1RbRouMhfOgb8,878 +numpy/random/_pcg64.cp39-win_amd64.pyd,sha256=J94IN3PJXIdG-OhQ7JFa3YcbLvrT1jnvgTmmb_EvvsA,86016 +numpy/random/_pcg64.pyi,sha256=6-wrSr83hA2hitD74ZoAIT7HcdKmCDWdBjbr4Jti5gw,1270 +numpy/random/_philox.cp39-win_amd64.pyd,sha256=ljIpmvmbGBeOMw9ywnTYTstw-2bst_EYCVNusw4T54c,71680 +numpy/random/_philox.pyi,sha256=PlG4BJZ2O5QbPD3QwGTq-WjowZmJAAsACJW2ws4gW38,1158 +numpy/random/_pickle.py,sha256=nusjpu3S4BDxHQ08GcUtJgUiiAboZo3mCEMI-CQj3GY,2385 +numpy/random/_sfc64.cp39-win_amd64.pyd,sha256=uy6K25OT0ctCeL9PfRwtFfIfcorFh50Dk6eUDvlXpVM,53760 +numpy/random/_sfc64.pyi,sha256=p4uv8SkUVp6EN5KuTVdxGnzV7692zzKNH9M1vDJ7GPE,869 +numpy/random/bit_generator.cp39-win_amd64.pyd,sha256=8PoGNFuf8DWw8FG4V6vIOsmXeNKlY9tWs4RgL9M3UCg,151552 +numpy/random/bit_generator.pxd,sha256=LJpeB-EKeVV8_JO69sS33XJLZQ3DAhrUCNzs_ei7AoI,1042 +numpy/random/bit_generator.pyi,sha256=DV-Kz9cp78EkVTj58FPohrW89TFidcE8qfXzLc76HwE,3714 +numpy/random/c_distributions.pxd,sha256=N8O_29MDGDOyMqmSKoaoooo4OJRRerBwsBXLw7_4j54,6147 +numpy/random/lib/npyrandom.lib,sha256=oTNyb4nZ_TYWa8luv4wv6Vg7ejTwK5_Zbtwpz2R-Ltk,110216 +numpy/random/mtrand.cp39-win_amd64.pyd,sha256=BsVCS1MXMBaE8AvrjZsgVnIKfT6MMzGhzi_1E4Y39O8,583680 +numpy/random/mtrand.pyi,sha256=x7UFMpCpYsQHBdkNSEATuagc3HpD6tSwvfCeDSvuYpg,20696 +numpy/random/setup.py,sha256=8hWFT0MpVx_7XgVDGDTy2ix9LKLI6BDrgWyn1vqF3sg,6323 +numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_direct.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_extending.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_random.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_randomstate.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_randomstate_regression.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_regression.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_seed_sequence.cpython-39.pyc,, +numpy/random/tests/__pycache__/test_smoke.cpython-39.pyc,, +numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/data/__pycache__/__init__.cpython-39.pyc,, +numpy/random/tests/data/mt19937-testset-1.csv,sha256=bA5uuOXgLpkAwJjfV8oUePg3-eyaH4-gKe8AMcl2Xn0,16845 +numpy/random/tests/data/mt19937-testset-2.csv,sha256=SnOL1nyRbblYlC254PBUSc37NguV5xN-0W_B32IxDGE,16826 +numpy/random/tests/data/pcg64-testset-1.csv,sha256=wHoS7fIR3hMEdta7MtJ8EpIWX-Bw1yfSaVxiC15vxVs,24840 +numpy/random/tests/data/pcg64-testset-2.csv,sha256=6vlnVuW_4i6LEsVn6b40HjcBWWjoX5lboSCBDpDrzFs,24846 +numpy/random/tests/data/pcg64dxsm-testset-1.csv,sha256=Fhha5-jrCmRk__rsvx6CbDFZ7EPc8BOPDTh-myZLkhM,24834 +numpy/random/tests/data/pcg64dxsm-testset-2.csv,sha256=mNYzkCh0NMt1VvTrN08BbkpAbfkFxztNcsofgeW_0ns,24840 +numpy/random/tests/data/philox-testset-1.csv,sha256=QvpTynWHQjqTz3P2MPvtMLdg2VnM6TGTpXgp-_LeJ5g,24853 +numpy/random/tests/data/philox-testset-2.csv,sha256=-BNO1OCYtDIjnN5Q-AsQezBCGmVJUIs3qAMyj8SNtsA,24839 +numpy/random/tests/data/sfc64-testset-1.csv,sha256=sgkemW0lbKJ2wh1sBj6CfmXwFYTqfAk152P0r8emO38,24841 +numpy/random/tests/data/sfc64-testset-2.csv,sha256=mkp21SG8eCqsfNyQZdmiV41-xKcsV8eutT7rVnVEG50,24834 +numpy/random/tests/test_direct.py,sha256=O9v2JANiV6UAA2rOpDmzWWqp1-MYjYQVU-URpFIB8pw,15926 +numpy/random/tests/test_extending.py,sha256=FSv8xJt8fd1-clX6rOwrMhn66PfMn4JJRbJH46ptwVo,3598 +numpy/random/tests/test_generator_mt19937.py,sha256=11hEngjmEDQkdxRydGxR3XZcev6QF9W6xcaOcEQ_dmk,112084 +numpy/random/tests/test_generator_mt19937_regressions.py,sha256=CU1ez3LTV-_acUgUU2mtnvmhnzkUM2cJ8ypCt7KEM_w,5816 +numpy/random/tests/test_random.py,sha256=UE-hRj9h10Zcg3S9-1jeOKAnTpQtThJNSG5R4uPoB9c,71499 +numpy/random/tests/test_randomstate.py,sha256=tnozs-Dhi-g5kIo5C5aK1Um-bbhwNRtnzlPiGZVHOXo,83538 +numpy/random/tests/test_randomstate_regression.py,sha256=t-4vAs8vZbrmIHzMcSpzsgelRVu5gjYq8ZqDUY0PfHc,7758 +numpy/random/tests/test_regression.py,sha256=yRzgK9Nz0kc5gZgzoNKJExc8voyXIU0GworGVFQ2-QA,5602 +numpy/random/tests/test_seed_sequence.py,sha256=zWUvhWDxBmTN2WteSFQeJ29W0-2k3ZUze_3YtL4Kgms,3391 +numpy/random/tests/test_smoke.py,sha256=KggwK88hgKJyDscYQaODUCS7jLEk1QE9JW-jJRmvxig,29001 +numpy/rec.pyi,sha256=XJVhZN1ay9rOKD2Os5OUqbI8CEjo662scC5M0jk7150,1036 +numpy/setup.py,sha256=fBEg6S2o6H9mmLBfVJOonPwEIN70qlxH5TQVdSb3yao,1012 +numpy/testing/__init__.py,sha256=s9TGtO8tH0kG8emfVLC1pBRUrgsb4ueWAwtIb6XQXLg,586 +numpy/testing/__init__.pyi,sha256=z9L-JD8byJ1vJxeDshQdaWGs6mKxyCOZRyFDkN857iM,3171 +numpy/testing/__pycache__/__init__.cpython-39.pyc,, +numpy/testing/__pycache__/print_coercion_tables.cpython-39.pyc,, +numpy/testing/__pycache__/setup.cpython-39.pyc,, +numpy/testing/__pycache__/utils.cpython-39.pyc,, +numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__pycache__/__init__.cpython-39.pyc,, +numpy/testing/_private/__pycache__/decorators.cpython-39.pyc,, +numpy/testing/_private/__pycache__/noseclasses.cpython-39.pyc,, +numpy/testing/_private/__pycache__/nosetester.cpython-39.pyc,, +numpy/testing/_private/__pycache__/parameterized.cpython-39.pyc,, +numpy/testing/_private/__pycache__/utils.cpython-39.pyc,, +numpy/testing/_private/decorators.py,sha256=lKeZylqjrMV3zA_3IjBHX64mHNZ2Qo9PNFBeIQyzns4,11732 +numpy/testing/_private/noseclasses.py,sha256=OqMSWVZEg5GGgz8bsoDWKa3oxvXYoqPst6U423s8yBM,14880 +numpy/testing/_private/nosetester.py,sha256=35S7suLWVzYBZ-zOt3ugthNZcMBCP7AjA-Z-4jqmtus,19980 +numpy/testing/_private/parameterized.py,sha256=fqtI6SFJnyvr1tKlnfhPrA-IWqOUTdQg7c17Pa_X1T0,16593 +numpy/testing/_private/utils.py,sha256=PLF6jnvdnqz4522w8m5rOyojb-9ot_YNMuUlrIDkj2Y,87774 +numpy/testing/print_coercion_tables.py,sha256=GnC-nt2Sw2D7TJOoIgK3b2Nj63r9VFqGzPStMfdOBgs,6367 +numpy/testing/setup.py,sha256=LRtPNvo1V4Eh_gCq0ScQXSxv3P428c4rAKOVj1JK3MI,685 +numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/testing/tests/__pycache__/test_doctesting.cpython-39.pyc,, +numpy/testing/tests/__pycache__/test_utils.cpython-39.pyc,, +numpy/testing/tests/test_doctesting.py,sha256=wUauOPx75yuJgIHNWlPCpF0EUIGKDI-nzlImCwGeYo0,1404 +numpy/testing/tests/test_utils.py,sha256=R5p8INOqFFVRMDzJzViy8xRAAdKQ0-dvSLqirRxcgyc,57259 +numpy/testing/utils.py,sha256=LhIaw_3hG1jUW98rUrBhOxRi1SjOUgVIr5--kwp0XZc,1260 +numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/tests/__pycache__/test_ctypeslib.cpython-39.pyc,, +numpy/tests/__pycache__/test_matlib.cpython-39.pyc,, +numpy/tests/__pycache__/test_numpy_version.cpython-39.pyc,, +numpy/tests/__pycache__/test_public_api.cpython-39.pyc,, +numpy/tests/__pycache__/test_reloading.cpython-39.pyc,, +numpy/tests/__pycache__/test_scripts.cpython-39.pyc,, +numpy/tests/__pycache__/test_warnings.cpython-39.pyc,, +numpy/tests/test_ctypeslib.py,sha256=4hdwdxDArcTLHpinigBSLLGb_ppGtksPWRsvkRD0Z84,12535 +numpy/tests/test_matlib.py,sha256=TUaQmGoz9fvQQ8FrooTq-g9BFiViGWjoTIGQSUUF6-Y,1910 +numpy/tests/test_numpy_version.py,sha256=N6ithCsZfV6UW5lVVMIPZQ-Yht5WIFplUOTSLmwtUNw,1619 +numpy/tests/test_public_api.py,sha256=2_mlvtnnu4R7RlRlLRCJqlLArvyjM7RvbBElDBeJJy4,15493 +numpy/tests/test_reloading.py,sha256=xNE63lrdOg7Adbv2C2Kzlk4L1Nx70wYS3cvJt0aGWbY,2147 +numpy/tests/test_scripts.py,sha256=KNmMlcO0sube9TLF3McxJZwzM87lLBzXZpjA6qJQkq0,1619 +numpy/tests/test_warnings.py,sha256=IMFVROBQqYZPibnHmwepGqEUQoBlDtdC8DlRulbMAd8,2354 +numpy/typing/__init__.py,sha256=QPVDyFm7QHzBlvT-tCh6SdoMTavfNEnswANurU0OWik,11485 +numpy/typing/__pycache__/__init__.cpython-39.pyc,, +numpy/typing/__pycache__/_add_docstring.cpython-39.pyc,, +numpy/typing/__pycache__/_array_like.cpython-39.pyc,, +numpy/typing/__pycache__/_callable.cpython-39.pyc,, +numpy/typing/__pycache__/_char_codes.cpython-39.pyc,, +numpy/typing/__pycache__/_dtype_like.cpython-39.pyc,, +numpy/typing/__pycache__/_extended_precision.cpython-39.pyc,, +numpy/typing/__pycache__/_generic_alias.cpython-39.pyc,, +numpy/typing/__pycache__/_nbit.cpython-39.pyc,, +numpy/typing/__pycache__/_scalars.cpython-39.pyc,, +numpy/typing/__pycache__/_shape.cpython-39.pyc,, +numpy/typing/__pycache__/mypy_plugin.cpython-39.pyc,, +numpy/typing/__pycache__/setup.cpython-39.pyc,, +numpy/typing/_add_docstring.py,sha256=fD2AVeTezkg0lZGx6PyH-X6ITSX74jb2dsZRiYeRGJo,3958 +numpy/typing/_array_like.py,sha256=JDE6zaRpeD0EKulmARIHIj_9-wd96Hmq-uH_7A_eBd0,3607 +numpy/typing/_callable.py,sha256=A_C3e9Ttsr8n6954iMjzrw_c-3Blo3oCsU94yBh67So,13101 +numpy/typing/_char_codes.py,sha256=bq0jid3ff1iOQ9wxLfbK4s_IB-4eDpvPMdrzPpSTwYg,7678 +numpy/typing/_dtype_like.py,sha256=57fPa0ROlif2gES1WMZrPKu3qA9-XKFwExZVzponEvg,6108 +numpy/typing/_extended_precision.py,sha256=o5nhQ8FwM8xHoDaTro9WBjMjwaHWVi_25AEX1YFbLLk,1158 +numpy/typing/_generic_alias.py,sha256=5RSv20ecwhc0BkMWPyp2QeBmfAsobvdyk5PfzuIsp1U,6542 +numpy/typing/_nbit.py,sha256=jo5eJF4zrw7QdDw1dIEKLIIPiLM-1tEn95YbDXrdSPs,361 +numpy/typing/_scalars.py,sha256=V8LKjaaK6eSHVGRKCZIO1MvpGHNjYYMNeHSEfavLgQQ,987 +numpy/typing/_shape.py,sha256=65_VFQudka_3Jl2ykdzglAUxjEi-NzS4lLB9rG1lKwg,395 +numpy/typing/_ufunc.pyi,sha256=mi2LEaFQ2cK5T5QgZFhwOFzLpdSNtWiSDMFRMJiflNE,11900 +numpy/typing/mypy_plugin.py,sha256=p6m-5RgxVoUvrv-Y0TDYlZYhOg_NR35pXSHQxwzTx9o,4539 +numpy/typing/setup.py,sha256=MdGTn_wxQKhOX-nkcJyDxlyz0tit-9sWiL2yQ32xYc0,421 +numpy/typing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/typing/tests/__pycache__/__init__.cpython-39.pyc,, +numpy/typing/tests/__pycache__/test_generic_alias.cpython-39.pyc,, +numpy/typing/tests/__pycache__/test_isfile.cpython-39.pyc,, +numpy/typing/tests/__pycache__/test_runtime.cpython-39.pyc,, +numpy/typing/tests/__pycache__/test_typing.cpython-39.pyc,, +numpy/typing/tests/__pycache__/test_typing_extensions.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/arithmetic.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/array_constructors.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/array_like.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/arrayprint.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/arrayterator.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/bitwise_ops.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/comparisons.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/constants.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/datasource.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/dtype.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/einsumfunc.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/flatiter.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/fromnumeric.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/index_tricks.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/lib_utils.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/lib_version.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/modules.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/ndarray.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/ndarray_misc.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/numerictypes.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/random.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/scalars.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/ufunc_config.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/ufunclike.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/ufuncs.cpython-39.pyc,, +numpy/typing/tests/data/fail/__pycache__/warnings_and_errors.cpython-39.pyc,, +numpy/typing/tests/data/fail/arithmetic.py,sha256=oLOWO9jcEXOzKsK71Khtf51i0waK6IOjmJBMgXsNJ30,3915 +numpy/typing/tests/data/fail/array_constructors.py,sha256=kNP8SE2jaL8kA3CCc4QWHGaMaJDigJ-uMyzOL6Am0a8,1041 +numpy/typing/tests/data/fail/array_like.py,sha256=q8omx4DDVcQ4m6KfZ5tBo_NTkjB98twICguQp-4xJjQ,470 +numpy/typing/tests/data/fail/arrayprint.py,sha256=JhbPEvHbcrKGlfSk8BLIIhDj8jRQIQhfuCE2NBmmk1A,535 +numpy/typing/tests/data/fail/arrayterator.py,sha256=u1pNRrRUM8Q0o131lqGjvTQRy8-eHeazSvRArhnAyOo,494 +numpy/typing/tests/data/fail/bitwise_ops.py,sha256=6cpzCLl1EH81UiJwo5Qs-HkvUByG9V5MNLpk8nVrLLo,534 +numpy/typing/tests/data/fail/comparisons.py,sha256=SahNYXQZHDnVylOn8VYh60j7O2f5jWOXk-sbR-P725I,991 +numpy/typing/tests/data/fail/constants.py,sha256=rm5lDmBIS2M3CYKKvrZuzW_jKwGknLgc2Ih8bpDrOPc,274 +numpy/typing/tests/data/fail/datasource.py,sha256=B05CkL35mBUyKmuvi5MF3vTkZRwc0SOJo_r0cKVBBuA,410 +numpy/typing/tests/data/fail/dtype.py,sha256=ltT4BFaX_KTVdRLw2dMg3_OiSNYjDSNrXsxby6eeLTw,354 +numpy/typing/tests/data/fail/einsumfunc.py,sha256=tQxUQsvtkJIw6PxFwzc_ilw3HVSrrW74CKI1wA_Bq6g,758 +numpy/typing/tests/data/fail/flatiter.py,sha256=YpW-SsU6ti1Pt2Adj-BgE0wQ9PAMYBylSN7y61Ujtfw,867 +numpy/typing/tests/data/fail/fromnumeric.py,sha256=B1sr7aafdkbfc9WzHqjUg8MOLk4pH6i4zNEUz5rV45A,6146 +numpy/typing/tests/data/fail/index_tricks.py,sha256=iQ4_ZMyKfS4q-9WXFQ9F5KL_JFUqFoOfhg0s-JvMx1E,499 +numpy/typing/tests/data/fail/lib_utils.py,sha256=UGzb3HzY1lUdiwLcT_myFfL-nWedrkhxDt9mt68eokQ,289 +numpy/typing/tests/data/fail/lib_version.py,sha256=JWtuTLcjkZpGfXshlFpJO5vINxawn9S-mxLGH0-7kcw,164 +numpy/typing/tests/data/fail/modules.py,sha256=Q-Ku1EsCmoi1E7aJ1ZK-u43T9jXVsx23bAUo93YyAvE,716 +numpy/typing/tests/data/fail/ndarray.py,sha256=2I4smD6MlUD23Xx8Rt1gCtjj_m-tI5JEma-Z0unrgas,416 +numpy/typing/tests/data/fail/ndarray_misc.py,sha256=Jdb7ombzu8KoyKQ8NSSw-uWuB2pDlo2eN7Bj0PCJ47s,1213 +numpy/typing/tests/data/fail/numerictypes.py,sha256=S6Xn_z1KoI8tSVSZ0aM7alvDfyAxNxEQuhMjXVW1Uws,397 +numpy/typing/tests/data/fail/random.py,sha256=RtCiLP7W36l5JPPWqdtr23CqJT5e1BwDm1ljWhWGa7k,2897 +numpy/typing/tests/data/fail/scalars.py,sha256=qS087V5ilbScDAk0YiY6pEZOM_5wb0YsXsIJBa7ZsgY,3095 +numpy/typing/tests/data/fail/ufunc_config.py,sha256=V5R7wY_P7KWn4IRxbLx42b1YF3wbIYzHEMr9BC41JHE,754 +numpy/typing/tests/data/fail/ufunclike.py,sha256=P541SlXJX7-aPTsOVw7WJNQpGYo8dqnjbtqezTlArxc,706 +numpy/typing/tests/data/fail/ufuncs.py,sha256=TiIj3qjjbAgNR0IahyYUGXDTA8AlSJLIKhDrfyzAHFw,1388 +numpy/typing/tests/data/fail/warnings_and_errors.py,sha256=UhM-s2DqkkD-Nd2O7_y_34_ygn4w4nEip6AXsQ1kQ4k,287 +numpy/typing/tests/data/misc/__pycache__/extended_precision.cpython-39.pyc,, +numpy/typing/tests/data/misc/extended_precision.py,sha256=-iSPpOqauIOyCLKYKr76J9dyKMTucyh61ZoABXEBWlk,364 +numpy/typing/tests/data/mypy.ini,sha256=17CYkY-Q5zscHyHfMCiApcjmAh1bUJ8XV2FckdHGI3c,149 +numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_like.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/dtype.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/literal.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/mod.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/modules.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/numeric.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/random.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/scalars.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-39.pyc,, +numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-39.pyc,, +numpy/typing/tests/data/pass/arithmetic.py,sha256=5pbn_WGO-E9ppaYKIbrUzIa5G0BwwBCudgQMm9gYfJo,8271 +numpy/typing/tests/data/pass/array_constructors.py,sha256=dd4LPu7RvPwUDwyqO69ZR_b1vKsCFWH6j-Yc70CxkXo,2602 +numpy/typing/tests/data/pass/array_like.py,sha256=GsNgenw6yNZhwjyJcpdtiT55oDEjMcnEla7yz-My9es,932 +numpy/typing/tests/data/pass/arrayprint.py,sha256=NTw1gJ9v3TDVwRov4zsg_27rI-ndKuG4mDidBWEKVyc,803 +numpy/typing/tests/data/pass/arrayterator.py,sha256=z4o0H08T7tbzzMWhu5ZXdVqbivjBicuFgRHBk_lpOck,420 +numpy/typing/tests/data/pass/bitwise_ops.py,sha256=zYz-ZNXpxjXDdo4fYWv_RQZq5af581dnaZrlu0-sSC8,1101 +numpy/typing/tests/data/pass/comparisons.py,sha256=phjskmrmZ0bHTGLyNvo3mDuHCGR6I1OOj6S8ks71wRQ,3293 +numpy/typing/tests/data/pass/dtype.py,sha256=7LeJ9EI_R2p15v-HmSRImS-wmWC1jwlgZykBuxBZCHg,1130 +numpy/typing/tests/data/pass/einsumfunc.py,sha256=2ldOBohgKlBAiQrF6dYRdR4-uiDV1ev4B1Kfzdqxg_Y,1412 +numpy/typing/tests/data/pass/flatiter.py,sha256=2xtMPvDgfhgjZIqiN3B3Wvy6Q9oBeo9uh4UkCAQNmwg,190 +numpy/typing/tests/data/pass/fromnumeric.py,sha256=eM6RUBjVInsShXlblqq07-I0QwoST8n6g8WWuPSgYtA,4002 +numpy/typing/tests/data/pass/index_tricks.py,sha256=Vn5iEuWlNdbr03zMEwAHvjBgI25-uCqRAJfUvRVWSp0,1556 +numpy/typing/tests/data/pass/lib_utils.py,sha256=kSeBv6kEs5O8g2h37q0ygU9WzQWDSA8zPBdhWuisHek,509 +numpy/typing/tests/data/pass/lib_version.py,sha256=TlLZK8sekCMm__WWo22FZfZc40zpczENf6y_TNjBpCw,317 +numpy/typing/tests/data/pass/literal.py,sha256=wtFjvftYilKJcuehgIWJTL0yXOO72gJKZBfrw_yjNMY,1344 +numpy/typing/tests/data/pass/mod.py,sha256=Intb9Ni_LlHhEka8kk81-601JCOl85jz_4_BQDA6iYI,1727 +numpy/typing/tests/data/pass/modules.py,sha256=r6OC-qSjkLsR3uMpZVBahowrJp42i2t3LS7gCnuRIYo,638 +numpy/typing/tests/data/pass/multiarray.py,sha256=5LonH6qk4Ps6WE8o0bNgDiFXRZIfW8fSbwFWeHkXcKw,571 +numpy/typing/tests/data/pass/ndarray_conversion.py,sha256=-1iJDSvdD86k3yJCrWf1nouQrRHStf4cheiZ5OHFE78,1720 +numpy/typing/tests/data/pass/ndarray_misc.py,sha256=0DqSoVWggfBJwP36X8zigaW4qPtu2ixvyTkXsUiQlV8,2901 +numpy/typing/tests/data/pass/ndarray_shape_manipulation.py,sha256=yaBK3hW5fe2VpvARkn_NMeF-JX-OajI8JiRWOA_Uk7Y,687 +numpy/typing/tests/data/pass/numeric.py,sha256=VCJ993mY9u8v4eNhJOJuOtc9Ics8AsAlAQtrsaTXth8,1567 +numpy/typing/tests/data/pass/numerictypes.py,sha256=puK_OSekotLPXAht2Xw6Tff-BaFaTTgikL6PG-uChdI,1020 +numpy/typing/tests/data/pass/random.py,sha256=uyiEBA6XOi3ctjK_HO_1rR3sdDa-V8_VP5I0ftSUAaQ,63320 +numpy/typing/tests/data/pass/scalars.py,sha256=jDpxnSDTMimd-M8f4vEcFqya_QL_rnfR6r9Xe0ToH-s,3761 +numpy/typing/tests/data/pass/simple.py,sha256=ChlX7VN0SPpa877JwPRb-JpBUgpwhDHJuQLbRBY75FM,2855 +numpy/typing/tests/data/pass/simple_py3.py,sha256=OBpoDmf5u4bRblugokiOZzufESsEmoU03MqipERrjLg,102 +numpy/typing/tests/data/pass/ufunc_config.py,sha256=l1JiZe3VXYLzgvbuk42Mk6fd_nvLsc9aJ233W0_MLm0,1170 +numpy/typing/tests/data/pass/ufunclike.py,sha256=vhQJlqPAZegEyC882iIU0uCqoA7ijTOZP1Vij-MrgEI,1085 +numpy/typing/tests/data/pass/ufuncs.py,sha256=xGuFo9gOvT04Pj7iNOhRIAIj_NHSTykIFyGV2Oj_0yA,479 +numpy/typing/tests/data/pass/warnings_and_errors.py,sha256=CtuAoGmmjsiWw34jjPsFUMZDrmMsVssQ6KHBWsDk7Jw,179 +numpy/typing/tests/data/reveal/__pycache__/arithmetic.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/array_constructors.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/arrayprint.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/arrayterator.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/bitwise_ops.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/comparisons.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/constants.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/datasource.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/dtype.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/einsumfunc.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/flatiter.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/fromnumeric.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/index_tricks.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/lib_utils.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/lib_version.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/mod.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/modules.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/multiarray.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/nbit_base_example.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/ndarray_conversion.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/ndarray_misc.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/ndarray_shape_manipulation.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/nditer.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/numeric.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/numerictypes.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/random.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/scalars.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/ufunc_config.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/ufunclike.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/ufuncs.cpython-39.pyc,, +numpy/typing/tests/data/reveal/__pycache__/warnings_and_errors.cpython-39.pyc,, +numpy/typing/tests/data/reveal/arithmetic.py,sha256=Xim4Jb3kt39RtbV6NOnNXUDzDX887gfuvgx086LAmgc,25390 +numpy/typing/tests/data/reveal/array_constructors.py,sha256=DYwXZdw1C3Ot2MSY1DvDzQFJ6CqF2pSPLGKLfeDBhyk,4589 +numpy/typing/tests/data/reveal/arrayprint.py,sha256=o6DS6E7YIXGlWzaFxTAbBcdiSySg13iTbROsTzKAREA,678 +numpy/typing/tests/data/reveal/arrayterator.py,sha256=ATcxXnl_aYs5mQxccvgfKT5jUpEOwmF8swOfjXoqT2g,1244 +numpy/typing/tests/data/reveal/bitwise_ops.py,sha256=AW_jmZNW1n4-pdPdbMySpA3L3qlvs5gGC1HGVayFG4w,3840 +numpy/typing/tests/data/reveal/comparisons.py,sha256=K_85kegbZelh_QNLGzsjNvQ3NMbcTOLYQow3Vv0429E,9654 +numpy/typing/tests/data/reveal/constants.py,sha256=KA1cM3BLqwq4cjoFt2-oOkOEyEj05kv1Zm_usnN7POI,1794 +numpy/typing/tests/data/reveal/datasource.py,sha256=K4XZlEsYAbbzpD3KkkECzLH1fjategKa4PCwF1N8o3g,578 +numpy/typing/tests/data/reveal/dtype.py,sha256=gFvfL4JCIFz-sU2FepCl1xclvAjnfpx5Y7Ltlx5hrR0,2821 +numpy/typing/tests/data/reveal/einsumfunc.py,sha256=clWTnKUPN2u6TvLsWDevkuYS7dgvrPbXz0wOIFkuOEQ,1949 +numpy/typing/tests/data/reveal/flatiter.py,sha256=pwbKiKj3B_aemfzwX6Hu0AHmCFvC9Tda639PSogVz4I,841 +numpy/typing/tests/data/reveal/fromnumeric.py,sha256=ZY8if_aSa9dRqUbaTCjqoy-XJtr_5eNAwlLLL2DYNZY,10395 +numpy/typing/tests/data/reveal/index_tricks.py,sha256=ZF0GDpvQNNnbXwheAU6zziAzlHR3xvog-aY5xiI2kSw,3769 +numpy/typing/tests/data/reveal/lib_utils.py,sha256=nxEEZ_LPmND57SYRxtNPXr3uvYm3XsaTcMgK2hj0z4Q,953 +numpy/typing/tests/data/reveal/lib_version.py,sha256=3uI1jeWbiyC2CEQ5-eqDkUZmb9871IywI6ZjetQUiUY,623 +numpy/typing/tests/data/reveal/mod.py,sha256=6owAApFyZXf1Wce9gfJBzXqjau2HaggLz_7E3p-KixU,6520 +numpy/typing/tests/data/reveal/modules.py,sha256=DWI9lkbJ3vvPlhrXViB_pPmaQHr_zcq_DPiiCbu_IkU,1986 +numpy/typing/tests/data/reveal/multiarray.py,sha256=1gL16uoTEbS7dpP-scXybNWP2gklogGMAzEEoQZVj64,1008 +numpy/typing/tests/data/reveal/nbit_base_example.py,sha256=0n7NdwejO71wVgEx3TDGm9T9I8TRfNWU0oZDvWpRcGk,496 +numpy/typing/tests/data/reveal/ndarray_conversion.py,sha256=mTbv2cNYU05dH7wxuPWFlhhMi4N0adi5ocF6MUyNCtc,1630 +numpy/typing/tests/data/reveal/ndarray_misc.py,sha256=Q1DXZ2nvcGyto6YSLxb1eeV1VC-YkfsaCS5C-pOazek,7100 +numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py,sha256=hRbNwpFuiWKPZ5YcOlFeER72qLOJzHle_IZQ0wj-Rfk,1041 +numpy/typing/tests/data/reveal/nditer.py,sha256=oHrsKqqnHv1igD5i_RLzT7XpLoWC-iCW2sRIFW9ww7o,500 +numpy/typing/tests/data/reveal/numeric.py,sha256=jlQtLpBLiQM_EMNDvYL_W8e2UgRupzJ4xDkblAqicV4,3150 +numpy/typing/tests/data/reveal/numerictypes.py,sha256=aT2iqUOmWu1N2JmoyHPyHz-2eRH8dKoMNMrnKEfzU5M,1392 +numpy/typing/tests/data/reveal/random.py,sha256=mImxcWeV2f1s_EzLx9o3cikWr-IuF29xYLCxmZGYYP0,151111 +numpy/typing/tests/data/reveal/scalars.py,sha256=Oo1qE8NEms0RuDrXHaPRKTnTw_MP0PwY0mdnmqPz9Hs,5600 +numpy/typing/tests/data/reveal/ufunc_config.py,sha256=dcIwOjSa4B2DWffbRTCFsgRUxQJWkfeKlTAgIlcLDeQ,1416 +numpy/typing/tests/data/reveal/ufunclike.py,sha256=qcTXiridQjA8D_iEMbtntM63x2I6DeeNGbdWFOld6R4,1624 +numpy/typing/tests/data/reveal/ufuncs.py,sha256=i5oqMNB_CODJBPFJPRaN0_tiCB2NP-ww1AJ9w04FRM0,3065 +numpy/typing/tests/data/reveal/warnings_and_errors.py,sha256=vgKMl6HOqJqp0kijIx9bOVY1p_0yNxZqnNJQHl4Bw3U,438 +numpy/typing/tests/test_generic_alias.py,sha256=FoVGJeDJk8QmGjuyqI2myesnCECWP4gQ1vnjEYnSn0U,4583 +numpy/typing/tests/test_isfile.py,sha256=Q-BAJNKYJfNGM_Lsoh1KO2zaPpaSW8d0A3bLPItWJNU,889 +numpy/typing/tests/test_runtime.py,sha256=kxCPFSTl8WpP-DA8sJVcpETAIQHUtb_05ayjZdKeP8o,2766 +numpy/typing/tests/test_typing.py,sha256=7rZdCjdUI6JB1XP_5218DTzs8dE7O3Bf7oDYmJL4Zmg,12442 +numpy/typing/tests/test_typing_extensions.py,sha256=r1WI4YHU8ZtvG1SpcxkAXcm5sJvzxtvjETp1goRT2E4,1037 +numpy/version.py,sha256=9Vcn7bdDnrdNjgeHFQ3xETxJR8iklhVpHlvfAyT7GsY,383 diff --git a/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/WHEEL b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d1267fcc974927e309e205f066155c34747bb9ab --- /dev/null +++ b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: false +Tag: cp39-cp39-win_amd64 + diff --git a/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/entry_points.txt b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..1010c24b6e61237ed9193dbd50e279ee1f057b24 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +f2py = numpy.f2py.f2py2e:main + diff --git a/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/top_level.txt b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..24ce15ab7ead32f98c7ac3edcd34bb2010ff4326 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy-1.21.2.dist-info/top_level.txt @@ -0,0 +1 @@ +numpy diff --git a/MLPY/Lib/site-packages/numpy/.libs/libopenblas.XWYDX2IKJW2NMTWSFYNGFUWKQU3LYTCZ.gfortran-win_amd64.dll b/MLPY/Lib/site-packages/numpy/.libs/libopenblas.XWYDX2IKJW2NMTWSFYNGFUWKQU3LYTCZ.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..6f3a282b575ad9f86332f541313cdbd378ba7f3d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/.libs/libopenblas.XWYDX2IKJW2NMTWSFYNGFUWKQU3LYTCZ.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b5a211815d41b2d08a38c44fea9fb8977bae24948502ed92362fda5500ff6ce +size 34522650 diff --git a/MLPY/Lib/site-packages/numpy/LICENSE.txt b/MLPY/Lib/site-packages/numpy/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..04738ae230b92fda8fe06d6e4d0627d6e02ce793 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/LICENSE.txt @@ -0,0 +1,938 @@ + +---- + +This binary distribution of NumPy also bundles the following software: + + +Name: OpenBLAS +Files: extra-dll\libopenb*.dll +Description: bundled as a dynamically linked library +Availability: https://github.com/xianyi/OpenBLAS/ +License: 3-clause BSD + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: extra-dll\libopenb*.dll +Description: bundled in OpenBLAS +Availability: https://github.com/xianyi/OpenBLAS/ +License 3-clause BSD + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: extra-dll\*.dll +Description: statically linked, in DLL files compiled with gfortran only +Availability: https://gcc.gnu.org/viewcvs/gcc/ +License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + + +Name: Microsoft Visual C++ Runtime Files +Files: extra-dll\msvcp140.dll +License: MSVC + https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime + + Subject to the License Terms for the software, you may copy and + distribute with your program any of the files within the followng + folder and its subfolders except as noted below. You may not modify + these files. + + C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist + + You may not distribute the contents of the following folders: + + C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\debug_nonredist + C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\onecore\debug_nonredist + + Subject to the License Terms for the software, you may copy and + distribute the following files with your program in your program’s + application local folder or by deploying them into the Global + Assembly Cache (GAC): + + VC\atlmfc\lib\mfcmifc80.dll + VC\atlmfc\lib\amd64\mfcmifc80.dll + + +Name: Microsoft Visual C++ Runtime Files +Files: extra-dll\msvc*90.dll, extra-dll\Microsoft.VC90.CRT.manifest +License: MSVC + For your convenience, we have provided the following folders for + use when redistributing VC++ runtime files. Subject to the license + terms for the software, you may redistribute the folder + (unmodified) in the application local folder as a sub-folder with + no change to the folder name. You may also redistribute all the + files (*.dll and *.manifest) within a folder, listed below the + folder for your convenience, as an entire set. + + \VC\redist\x86\Microsoft.VC90.ATL\ + atl90.dll + Microsoft.VC90.ATL.manifest + \VC\redist\ia64\Microsoft.VC90.ATL\ + atl90.dll + Microsoft.VC90.ATL.manifest + \VC\redist\amd64\Microsoft.VC90.ATL\ + atl90.dll + Microsoft.VC90.ATL.manifest + \VC\redist\x86\Microsoft.VC90.CRT\ + msvcm90.dll + msvcp90.dll + msvcr90.dll + Microsoft.VC90.CRT.manifest + \VC\redist\ia64\Microsoft.VC90.CRT\ + msvcm90.dll + msvcp90.dll + msvcr90.dll + Microsoft.VC90.CRT.manifest + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/MLPY/Lib/site-packages/numpy/__config__.py b/MLPY/Lib/site-packages/numpy/__config__.py new file mode 100644 index 0000000000000000000000000000000000000000..d3923fc4b16ca1581c79b4283de3bd9e477fb6c8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/__config__.py @@ -0,0 +1,98 @@ +# This file is generated by numpy's setup.py +# It contains system_info results at the time of building this package. +__all__ = ["get_info","show"] + + +import os +import sys + +extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') + +if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + if sys.version_info >= (3, 8): + os.add_dll_directory(extra_dll_dir) + else: + os.environ.setdefault('PATH', '') + os.environ['PATH'] += os.pathsep + extra_dll_dir + +blas_mkl_info={} +blis_info={} +openblas_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas_info'], 'libraries': ['openblas_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]} +blas_opt_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas_info'], 'libraries': ['openblas_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]} +lapack_mkl_info={} +openblas_lapack_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas_lapack_info'], 'libraries': ['openblas_lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]} +lapack_opt_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas_lapack_info'], 'libraries': ['openblas_lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]} + +def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + +def show(): + """ + Show libraries in the system on which NumPy was built. + + Print information about various resources (libraries, library + directories, include directories, etc.) in the system on which + NumPy was built. + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + Classes specifying the information to be printed are defined + in the `numpy.distutils.system_info` module. + + Information may include: + + * ``language``: language used to write the libraries (mostly + C or f77) + * ``libraries``: names of libraries found in the system + * ``library_dirs``: directories containing the libraries + * ``include_dirs``: directories containing library header files + * ``src_dirs``: directories containing library source files + * ``define_macros``: preprocessor macros used by + ``distutils.setup`` + * ``baseline``: minimum CPU features required + * ``found``: dispatched features supported in the system + * ``not found``: dispatched features that are not supported + in the system + + Examples + -------- + >>> import numpy as np + >>> np.show_config() + blas_opt_info: + language = c + define_macros = [('HAVE_CBLAS', None)] + libraries = ['openblas', 'openblas'] + library_dirs = ['/usr/local/lib'] + """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + + print("Supported SIMD extensions in this NumPy install:") + print(" baseline = %s" % (','.join(__cpu_baseline__))) + print(" found = %s" % (','.join(features_found))) + print(" not found = %s" % (','.join(features_not_found))) + diff --git a/MLPY/Lib/site-packages/numpy/__init__.cython-30.pxd b/MLPY/Lib/site-packages/numpy/__init__.cython-30.pxd new file mode 100644 index 0000000000000000000000000000000000000000..4d9f4d106c7cac97d3f299c7402907a993c71918 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/__init__.cython-30.pxd @@ -0,0 +1,1053 @@ +# NumPy static imports for Cython >= 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. This is done automatically by Cython 3.0+ if a call +# is not detected inside of the module. +# +# Author: Dag Sverre Seljebotn +# + +from cpython.ref cimport Py_INCREF +from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */ + """ + + +cdef extern from "Python.h": + ctypedef Py_ssize_t Py_intptr_t + +cdef extern from "numpy/arrayobject.h": + ctypedef Py_intptr_t npy_intp + ctypedef size_t npy_uintp + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_UPDATEIFCOPY + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_UPDATEIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS + + npy_intp NPY_MAX_ELSIZE + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef char flags + cdef int type_num + cdef int itemsize "elsize" + cdef int alignment + cdef object fields + cdef tuple names + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + cdef PyArray_ArrayDescr* subarray + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + # NOTE: no field declarations since direct access is deprecated since NumPy 1.7 + # Instead, we use properties that map to the corresponding C-API functions. + + @property + cdef inline PyObject* base(self) nogil: + """Returns a borrowed reference to the object owning the data/memory. + """ + return PyArray_BASE(self) + + @property + cdef inline dtype descr(self): + """Returns an owned reference to the dtype of the array. + """ + return PyArray_DESCR(self) + + @property + cdef inline int ndim(self) nogil: + """Returns the number of dimensions in the array. + """ + return PyArray_NDIM(self) + + @property + cdef inline npy_intp *shape(self) nogil: + """Returns a pointer to the dimensions/shape of the array. + The number of elements matches the number of dimensions of the array (ndim). + Can return NULL for 0-dimensional arrays. + """ + return PyArray_DIMS(self) + + @property + cdef inline npy_intp *strides(self) nogil: + """Returns a pointer to the strides of the array. + The number of elements matches the number of dimensions of the array (ndim). + """ + return PyArray_STRIDES(self) + + @property + cdef inline npy_intp size(self) nogil: + """Returns the total size (in number of elements) of the array. + """ + return PyArray_SIZE(self) + + @property + cdef inline char* data(self) nogil: + """The pointer to the data buffer as a char*. + This is provided for legacy reasons to avoid direct struct field access. + For new code that needs this access, you probably want to cast the result + of `PyArray_DATA()` instead, which returns a 'void*'. + """ + return PyArray_BYTES(self) + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + float real + float imag + + ctypedef struct npy_cdouble: + double real + double imag + + ctypedef struct npy_clongdouble: + long double real + long double imag + + ctypedef struct npy_complex64: + float real + float imag + + ctypedef struct npy_complex128: + double real + double imag + + ctypedef struct npy_complex160: + long double real + long double imag + + ctypedef struct npy_complex192: + long double real + long double imag + + ctypedef struct npy_complex256: + long double real + long double imag + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISPYTHON(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISPYTHON(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISPYTHON(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(object, int val) + npy_intp PyArray_REFCOUNT(object) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + void PyArray_XDECREF_ERR(ndarray) + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_SetNumericOps (object) + object PyArray_GetNumericOps () + int PyArray_INCREF (ndarray) + int PyArray_XDECREF (ndarray) + void PyArray_SetStringFunction (object, int) + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CastTo (ndarray, ndarray) + int PyArray_CastAnyTo (ndarray, ndarray) + int PyArray_CanCastSafely (int, int) + npy_bool PyArray_CanCastTo (dtype, dtype) + int PyArray_ObjectType (object, int) + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + object PyArray_ScalarFromObject (object) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + object PyArray_FromDims (int, int *, int) + #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_MoveInto (ndarray, ndarray) + int PyArray_CopyInto (ndarray, ndarray) + int PyArray_CopyAnyInto (ndarray, ndarray) + int PyArray_CopyObject (ndarray, object) + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) + int PyArray_Dump (object, object, int) + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) + void PyArray_FillObjectArray (ndarray, object) + int PyArray_FillWithScalar (ndarray, object) + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + object PyArray_NewFlagsObject (object) + npy_bool PyArray_CanCastScalar (type, type) + #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t) + int PyArray_RemoveSmallest (broadcast) + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) + void PyArray_Item_XDECREF (char *, dtype) + object PyArray_FieldNames (object) + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + #int PyArray_As1D (object*, char **, int *, int) + #int PyArray_As2D (object*, char ***, int *, int *, int) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_CopyAndTranspose (object) + object PyArray_Correlate (object, object, int) + int PyArray_TypestrConvert (int, int) + #int PyArray_DescrConverter (object, dtype*) + #int PyArray_DescrConverter2 (object, dtype*) + int PyArray_IntpConverter (object, PyArray_Dims *) + #int PyArray_BufferConverter (object, chunk) + int PyArray_AxisConverter (object, int *) + int PyArray_BoolConverter (object, npy_bool *) + int PyArray_ByteorderConverter (object, char *) + int PyArray_OrderConverter (object, NPY_ORDER *) + unsigned char PyArray_EquivTypes (dtype, dtype) + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_TypeNumFromName (char *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) + #int PyArray_OutputConverter (object, ndarray*) + object PyArray_BroadcastToShape (object, npy_intp *, int) + void _PyArray_SigintHandler (int) + void* _PyArray_GetSigintBuf () + #int PyArray_DescrAlignConverter (object, dtype*) + #int PyArray_DescrAlignConverter2 (object, dtype*) + int PyArray_SearchsideConverter (object, void *) + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_CompareString (char *, char *, size_t) + int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead. + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +# The int types are mapped a bit surprising -- +# numpy.int corresponds to 'l' and numpy.long to 'q' +ctypedef npy_long int_t +ctypedef npy_longlong long_t +ctypedef npy_longlong longlong_t + +ctypedef npy_ulong uint_t +ctypedef npy_ulonglong ulong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef npy_cfloat cfloat_t +ctypedef npy_cdouble cdouble_t +ctypedef npy_clongdouble clongdouble_t + +ctypedef npy_cdouble complex_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_ERR_IGNORE + UFUNC_ERR_WARN + UFUNC_ERR_RAISE + UFUNC_ERR_CALL + UFUNC_ERR_PRINT + UFUNC_ERR_LOG + UFUNC_MASK_DIVIDEBYZERO + UFUNC_MASK_OVERFLOW + UFUNC_MASK_UNDERFLOW + UFUNC_MASK_INVALID + UFUNC_SHIFT_DIVIDEBYZERO + UFUNC_SHIFT_OVERFLOW + UFUNC_SHIFT_UNDERFLOW + UFUNC_SHIFT_INVALID + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + UFUNC_ERR_DEFAULT + UFUNC_ERR_DEFAULT2 + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **) + int PyUFunc_checkfperr \ + (int, PyObject *, int *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_handlefperr \ + (int, PyObject *, int, int *) + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy.core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base diff --git a/MLPY/Lib/site-packages/numpy/__init__.pxd b/MLPY/Lib/site-packages/numpy/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..420ac684b72f63ec97b882dcf088e2802f4a8c7d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/__init__.pxd @@ -0,0 +1,1018 @@ +# NumPy static imports for Cython < 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. +# +# Author: Dag Sverre Seljebotn +# + +DEF _buffer_format_string_len = 255 + +cimport cpython.buffer as pybuf +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject +from cpython.buffer cimport PyObject_GetBuffer +from cpython.type cimport type +cimport libc.stdio as stdio + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + bint PyObject_TypeCheck(object obj, PyTypeObject* type) + +cdef extern from "numpy/arrayobject.h": + ctypedef Py_intptr_t npy_intp + ctypedef size_t npy_uintp + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_UPDATEIFCOPY + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_UPDATEIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS + + npy_intp NPY_MAX_ELSIZE + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef char flags + cdef int type_num + cdef int itemsize "elsize" + cdef int alignment + cdef object fields + cdef tuple names + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + cdef PyArray_ArrayDescr* subarray + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + cdef int numiter + cdef npy_intp size, index + cdef int nd + cdef npy_intp *dimensions + cdef void **iters + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + cdef: + # Only taking a few of the most commonly used and stable fields. + # One should use PyArray_* macros instead to access the C fields. + char *data + int ndim "nd" + npy_intp *shape "dimensions" + npy_intp *strides + dtype descr # deprecated since NumPy 1.7 ! + PyObject* base # NOT PUBLIC, DO NOT USE ! + + + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + float real + float imag + + ctypedef struct npy_cdouble: + double real + double imag + + ctypedef struct npy_clongdouble: + long double real + long double imag + + ctypedef struct npy_complex64: + float real + float imag + + ctypedef struct npy_complex128: + double real + double imag + + ctypedef struct npy_complex160: + long double real + long double imag + + ctypedef struct npy_complex192: + long double real + long double imag + + ctypedef struct npy_complex256: + long double real + long double imag + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + int PyArray_FLAGS(ndarray) nogil + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISPYTHON(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISPYTHON(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISPYTHON(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(object, int val) + npy_intp PyArray_REFCOUNT(object) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + void PyArray_XDECREF_ERR(ndarray) + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_SetNumericOps (object) + object PyArray_GetNumericOps () + int PyArray_INCREF (ndarray) + int PyArray_XDECREF (ndarray) + void PyArray_SetStringFunction (object, int) + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CastTo (ndarray, ndarray) + int PyArray_CastAnyTo (ndarray, ndarray) + int PyArray_CanCastSafely (int, int) + npy_bool PyArray_CanCastTo (dtype, dtype) + int PyArray_ObjectType (object, int) + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + object PyArray_ScalarFromObject (object) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + object PyArray_FromDims (int, int *, int) + #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_MoveInto (ndarray, ndarray) + int PyArray_CopyInto (ndarray, ndarray) + int PyArray_CopyAnyInto (ndarray, ndarray) + int PyArray_CopyObject (ndarray, object) + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) + int PyArray_Dump (object, object, int) + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) + void PyArray_FillObjectArray (ndarray, object) + int PyArray_FillWithScalar (ndarray, object) + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + object PyArray_NewFlagsObject (object) + npy_bool PyArray_CanCastScalar (type, type) + #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t) + int PyArray_RemoveSmallest (broadcast) + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) + void PyArray_Item_XDECREF (char *, dtype) + object PyArray_FieldNames (object) + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + #int PyArray_As1D (object*, char **, int *, int) + #int PyArray_As2D (object*, char ***, int *, int *, int) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_CopyAndTranspose (object) + object PyArray_Correlate (object, object, int) + int PyArray_TypestrConvert (int, int) + #int PyArray_DescrConverter (object, dtype*) + #int PyArray_DescrConverter2 (object, dtype*) + int PyArray_IntpConverter (object, PyArray_Dims *) + #int PyArray_BufferConverter (object, chunk) + int PyArray_AxisConverter (object, int *) + int PyArray_BoolConverter (object, npy_bool *) + int PyArray_ByteorderConverter (object, char *) + int PyArray_OrderConverter (object, NPY_ORDER *) + unsigned char PyArray_EquivTypes (dtype, dtype) + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_TypeNumFromName (char *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) + #int PyArray_OutputConverter (object, ndarray*) + object PyArray_BroadcastToShape (object, npy_intp *, int) + void _PyArray_SigintHandler (int) + void* _PyArray_GetSigintBuf () + #int PyArray_DescrAlignConverter (object, dtype*) + #int PyArray_DescrAlignConverter2 (object, dtype*) + int PyArray_SearchsideConverter (object, void *) + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_CompareString (char *, char *, size_t) + int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead. + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +# The int types are mapped a bit surprising -- +# numpy.int corresponds to 'l' and numpy.long to 'q' +ctypedef npy_long int_t +ctypedef npy_longlong long_t +ctypedef npy_longlong longlong_t + +ctypedef npy_ulong uint_t +ctypedef npy_ulonglong ulong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef npy_cfloat cfloat_t +ctypedef npy_cdouble cdouble_t +ctypedef npy_clongdouble clongdouble_t + +ctypedef npy_cdouble complex_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_ERR_IGNORE + UFUNC_ERR_WARN + UFUNC_ERR_RAISE + UFUNC_ERR_CALL + UFUNC_ERR_PRINT + UFUNC_ERR_LOG + UFUNC_MASK_DIVIDEBYZERO + UFUNC_MASK_OVERFLOW + UFUNC_MASK_UNDERFLOW + UFUNC_MASK_INVALID + UFUNC_SHIFT_DIVIDEBYZERO + UFUNC_SHIFT_OVERFLOW + UFUNC_SHIFT_UNDERFLOW + UFUNC_SHIFT_INVALID + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + UFUNC_ERR_DEFAULT + UFUNC_ERR_DEFAULT2 + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **) + int PyUFunc_checkfperr \ + (int, PyObject *, int *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_handlefperr \ + (int, PyObject *, int, int *) + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy.core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef extern from *: + # Leave a marker that the NumPy declarations came from this file + # See https://github.com/cython/cython/issues/3573 + """ + /* NumPy API declarations from "numpy/__init__.pxd" */ + """ + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base diff --git a/MLPY/Lib/site-packages/numpy/__init__.py b/MLPY/Lib/site-packages/numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dab3a145cfe6ea03dbd4e676be263af9eeb97e09 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/__init__.py @@ -0,0 +1,429 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as `np`:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +To search for documents containing a keyword, do:: + + >>> np.lookfor('keyword') + ... # doctest: +SKIP + +General-purpose documents like a glossary and help on the basic concepts +of numpy are available under the ``doc`` sub-module:: + + >>> from numpy import doc + >>> help(doc) + ... # doctest: +SKIP + +Available subpackages +--------------------- +doc + Topical documentation on broadcasting, indexing, etc. +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + NumPy testing tools +f2py + Fortran to Python Interface Generator. +distutils + Enhancements to distutils with support for + Fortran compilers support and more. + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +dual + Overwrite certain functions with high-performance SciPy tools. + Note: `numpy.dual` is deprecated. Use the functions from NumPy or Scipy + directly instead of importing them from `numpy.dual`. +matlib + Make everything matrices. +__version__ + NumPy version string + +Viewing documentation using IPython +----------------------------------- +Start IPython with the NumPy profile (``ipython -p numpy``), which will +import `numpy` under the alias `np`. Then, use the ``cpaste`` command to +paste examples into the shell. To see which functions are available in +`numpy`, type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" +import sys +import warnings + +from ._globals import ( + ModuleDeprecationWarning, VisibleDeprecationWarning, _NoValue +) + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ +except NameError: + __NUMPY_SETUP__ = False + +if __NUMPY_SETUP__: + sys.stderr.write('Running from numpy source directory.\n') +else: + try: + from numpy.__config__ import show as show_config + except ImportError as e: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + + __all__ = ['ModuleDeprecationWarning', + 'VisibleDeprecationWarning'] + + # get the version using versioneer + from ._version import get_versions + vinfo = get_versions() + __version__ = vinfo.get("closest-tag", vinfo["version"]) + __git_version__ = vinfo.get("full-revisionid") + del get_versions, vinfo + + # mapping of {name: (value, deprecation_msg)} + __deprecated_attrs__ = {} + + # Allow distributors to run custom init code + from . import _distributor_init + + from . import core + from .core import * + from . import compat + from . import lib + # NOTE: to be revisited following future namespace cleanup. + # See gh-14454 and gh-15672 for discussion. + from .lib import * + + from . import linalg + from . import fft + from . import polynomial + from . import random + from . import ctypeslib + from . import ma + from . import matrixlib as _mat + from .matrixlib import * + + # Deprecations introduced in NumPy 1.20.0, 2020-06-06 + import builtins as _builtins + + _msg = ( + "`np.{n}` is a deprecated alias for the builtin `{n}`. " + "To silence this warning, use `{n}` by itself. Doing this will not " + "modify any behavior and is safe. {extended_msg}\n" + "Deprecated in NumPy 1.20; for more details and guidance: " + "https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + _specific_msg = ( + "If you specifically wanted the numpy scalar type, use `np.{}` here.") + + _int_extended_msg = ( + "When replacing `np.{}`, you may wish to use e.g. `np.int64` " + "or `np.int32` to specify the precision. If you wish to review " + "your current use, check the release note link for " + "additional information.") + + _type_info = [ + ("object", ""), # The NumPy scalar only exists by name. + ("bool", _specific_msg.format("bool_")), + ("float", _specific_msg.format("float64")), + ("complex", _specific_msg.format("complex128")), + ("str", _specific_msg.format("str_")), + ("int", _int_extended_msg.format("int"))] + + __deprecated_attrs__.update({ + n: (getattr(_builtins, n), _msg.format(n=n, extended_msg=extended_msg)) + for n, extended_msg in _type_info + }) + # Numpy 1.20.0, 2020-10-19 + __deprecated_attrs__["typeDict"] = ( + core.numerictypes.typeDict, + "`np.typeDict` is a deprecated alias for `np.sctypeDict`." + ) + + _msg = ( + "`np.{n}` is a deprecated alias for `np.compat.{n}`. " + "To silence this warning, use `np.compat.{n}` by itself. " + "In the likely event your code does not need to work on Python 2 " + "you can use the builtin `{n2}` for which `np.compat.{n}` is itself " + "an alias. Doing this will not modify any behaviour and is safe. " + "{extended_msg}\n" + "Deprecated in NumPy 1.20; for more details and guidance: " + "https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + __deprecated_attrs__["long"] = ( + getattr(compat, "long"), + _msg.format(n="long", n2="int", + extended_msg=_int_extended_msg.format("long"))) + + __deprecated_attrs__["unicode"] = ( + getattr(compat, "unicode"), + _msg.format(n="unicode", n2="str", + extended_msg=_specific_msg.format("str_"))) + + del _msg, _specific_msg, _int_extended_msg, _type_info, _builtins + + from .core import round, abs, max, min + # now that numpy modules are imported, can initialize limits + core.getlimits._register_known_types() + + __all__.extend(['__version__', 'show_config']) + __all__.extend(core.__all__) + __all__.extend(_mat.__all__) + __all__.extend(lib.__all__) + __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) + + # These are exported by np.core, but are replaced by the builtins below + # remove them to ensure that we don't end up with `np.long == np.int_`, + # which would be a breaking change. + del long, unicode + __all__.remove('long') + __all__.remove('unicode') + + # Remove things that are in the numpy.lib but not in the numpy namespace + # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace) + # that prevents adding more things to the main namespace by accident. + # The list below will grow until the `from .lib import *` fixme above is + # taken care of + __all__.remove('Arrayterator') + del Arrayterator + + # These names were removed in NumPy 1.20. For at least one release, + # attempts to access these names in the numpy namespace will trigger + # a warning, and calling the function will raise an exception. + _financial_names = ['fv', 'ipmt', 'irr', 'mirr', 'nper', 'npv', 'pmt', + 'ppmt', 'pv', 'rate'] + __expired_functions__ = { + name: (f'In accordance with NEP 32, the function {name} was removed ' + 'from NumPy version 1.20. A replacement for this function ' + 'is available in the numpy_financial library: ' + 'https://pypi.org/project/numpy-financial') + for name in _financial_names} + + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + + # oldnumeric and numarray were removed in 1.9. In case some packages import + # but do not use them, we define them here for backward compatibility. + oldnumeric = 'removed' + numarray = 'removed' + + if sys.version_info[:2] >= (3, 7): + # module level getattr is only supported in 3.7 onwards + # https://www.python.org/dev/peps/pep-0562/ + def __getattr__(attr): + # Warn for expired attributes, and return a dummy function + # that always raises an exception. + try: + msg = __expired_functions__[attr] + except KeyError: + pass + else: + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + def _expired(*args, **kwds): + raise RuntimeError(msg) + + return _expired + + # Emit warnings for deprecated attributes + try: + val, msg = __deprecated_attrs__[attr] + except KeyError: + pass + else: + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return val + + # Importing Tester requires importing all of UnitTest which is not a + # cheap import Since it is mainly used in test suits, we lazy import it + # here to save on the order of 10 ms of import time for most users + # + # The previous way Tester was imported also had a side effect of adding + # the full `numpy.testing` namespace + if attr == 'testing': + import numpy.testing as testing + return testing + elif attr == 'Tester': + from .testing import Tester + return Tester + + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + + def __dir__(): + return list(globals().keys() | {'Tester', 'testing'}) + + else: + # We don't actually use this ourselves anymore, but I'm not 100% sure that + # no-one else in the world is using it (though I hope not) + from .testing import Tester + + # We weren't able to emit a warning about these, so keep them around + globals().update({ + k: v + for k, (v, msg) in __deprecated_attrs__.items() + }) + + + # Pytest testing + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases e.g. with wrong BLAS ABI that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - 2.0) < 1e-5: + raise AssertionError() + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in, or by mixing " + "package managers (pip, conda, apt, ...). Search closed " + "numpy issues for similar problems.") + raise RuntimeError(msg.format(__file__)) from None + + _sanity_check() + del _sanity_check + + def _mac_os_check(): + """ + Quick Sanity check for Mac OS look for accelerate build bugs. + Testing numpy polyfit calls init_dgelsd(LAPACK) + """ + try: + c = array([3., 2., 1.]) + x = linspace(0, 2, 5) + y = polyval(c, x) + _ = polyfit(x, y, 2, cov=True) + except ValueError: + pass + + import sys + if sys.platform == "darwin": + with warnings.catch_warnings(record=True) as w: + _mac_os_check() + # Throw runtime error, if the test failed Check for warning and error_message + error_message = "" + if len(w) > 0: + error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message)) + msg = ( + "Polyfit sanity test emitted a warning, most likely due " + "to using a buggy Accelerate backend. If you compiled " + "yourself, more information is available at " + "https://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries " + "Otherwise report this to the vendor " + "that provided NumPy.\n{}\n".format(error_message)) + raise RuntimeError(msg) + del _mac_os_check + + # We usually use madvise hugepages support, but on some old kernels it + # is slow and thus better avoided. + # Specifically kernel version 4.6 had a bug fix which probably fixed this: + # https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff + import os + use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None) + if sys.platform == "linux" and use_hugepage is None: + # If there is an issue with parsing the kernel version, + # set use_hugepages to 0. Usage of LooseVersion will handle + # the kernel version parsing better, but avoided since it + # will increase the import time. See: #16679 for related discussion. + try: + use_hugepage = 1 + kernel_version = os.uname().release.split(".")[:2] + kernel_version = tuple(int(v) for v in kernel_version) + if kernel_version < (4, 6): + use_hugepage = 0 + except ValueError: + use_hugepages = 0 + elif use_hugepage is None: + # This is not Linux, so it should not matter, just enable anyway + use_hugepage = 1 + else: + use_hugepage = int(use_hugepage) + + # Note that this will currently only make a difference on Linux + core.multiarray._set_madvise_hugepage(use_hugepage) + + # Give a warning if NumPy is reloaded or imported on a sub-interpreter + # We do this from python, since the C-module may not be reloaded and + # it is tidier organized. + core.multiarray._multiarray_umath._reload_guard() + +from ._version import get_versions +__version__ = get_versions()['version'] +del get_versions diff --git a/MLPY/Lib/site-packages/numpy/__init__.pyi b/MLPY/Lib/site-packages/numpy/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..395d69c1d4bd123c6301817c87d8f4b260a968e5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/__init__.pyi @@ -0,0 +1,3741 @@ +import builtins +import os +import sys +import mmap +import ctypes as ct +import array as _array +import datetime as dt +from abc import abstractmethod +from types import TracebackType +from contextlib import ContextDecorator + +from numpy.core._internal import _ctypes +from numpy.typing import ( + # Arrays + ArrayLike, + NDArray, + _SupportsArray, + _NestedSequence, + _RecursiveSequence, + _SupportsArray, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + _ArrayLikeObject_co, + + # DTypes + DTypeLike, + _SupportsDType, + _VoidDTypeLike, + + # Shapes + _Shape, + _ShapeLike, + + # Scalars + _CharLike_co, + _BoolLike_co, + _IntLike_co, + _FloatLike_co, + _ComplexLike_co, + _TD64Like_co, + _NumberLike_co, + _ScalarLike_co, + + # `number` precision + NBitBase, + _256Bit, + _128Bit, + _96Bit, + _80Bit, + _64Bit, + _32Bit, + _16Bit, + _8Bit, + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitInt, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, + + # Character codes + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, + + # Ufuncs + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, +) + +from numpy.typing._callable import ( + _BoolOp, + _BoolBitOp, + _BoolSub, + _BoolTrueDiv, + _BoolMod, + _BoolDivMod, + _TD64Div, + _IntTrueDiv, + _UnsignedIntOp, + _UnsignedIntBitOp, + _UnsignedIntMod, + _UnsignedIntDivMod, + _SignedIntOp, + _SignedIntBitOp, + _SignedIntMod, + _SignedIntDivMod, + _FloatOp, + _FloatMod, + _FloatDivMod, + _ComplexOp, + _NumberOp, + _ComparisonOp, +) + +# NOTE: Numpy's mypy plugin is used for removing the types unavailable +# to the specific platform +from numpy.typing._extended_precision import ( + uint128 as uint128, + uint256 as uint256, + int128 as int128, + int256 as int256, + float80 as float80, + float96 as float96, + float128 as float128, + float256 as float256, + complex160 as complex160, + complex192 as complex192, + complex256 as complex256, + complex512 as complex512, +) + +from typing import ( + Any, + ByteString, + Callable, + Container, + Callable, + Dict, + Generic, + IO, + Iterable, + Iterator, + List, + Mapping, + NoReturn, + Optional, + overload, + Sequence, + Sized, + SupportsComplex, + SupportsFloat, + SupportsInt, + Text, + Tuple, + Type, + TypeVar, + Union, +) + +if sys.version_info >= (3, 8): + from typing import Literal as L, Protocol, SupportsIndex, Final +else: + from typing_extensions import Literal as L, Protocol, SupportsIndex, Final + +# Ensures that the stubs are picked up +from numpy import ( + char as char, + ctypeslib as ctypeslib, + fft as fft, + lib as lib, + linalg as linalg, + ma as ma, + matrixlib as matrixlib, + polynomial as polynomial, + random as random, + rec as rec, + testing as testing, + version as version, +) + +from numpy.core.function_base import ( + linspace as linspace, + logspace as logspace, + geomspace as geomspace, +) + +from numpy.core.fromnumeric import ( + take as take, + reshape as reshape, + choose as choose, + repeat as repeat, + put as put, + swapaxes as swapaxes, + transpose as transpose, + partition as partition, + argpartition as argpartition, + sort as sort, + argsort as argsort, + argmax as argmax, + argmin as argmin, + searchsorted as searchsorted, + resize as resize, + squeeze as squeeze, + diagonal as diagonal, + trace as trace, + ravel as ravel, + nonzero as nonzero, + shape as shape, + compress as compress, + clip as clip, + sum as sum, + all as all, + any as any, + cumsum as cumsum, + ptp as ptp, + amax as amax, + amin as amin, + prod as prod, + cumprod as cumprod, + ndim as ndim, + size as size, + around as around, + mean as mean, + std as std, + var as var, +) + +from numpy.core._asarray import ( + asarray as asarray, + asanyarray as asanyarray, + ascontiguousarray as ascontiguousarray, + asfortranarray as asfortranarray, + require as require, +) + +from numpy.core._type_aliases import ( + sctypes as sctypes, + sctypeDict as sctypeDict, +) + +from numpy.core._ufunc_config import ( + seterr as seterr, + geterr as geterr, + setbufsize as setbufsize, + getbufsize as getbufsize, + seterrcall as seterrcall, + geterrcall as geterrcall, + _SupportsWrite, + _ErrKind, + _ErrFunc, + _ErrDictOptional, +) + +from numpy.core.arrayprint import ( + set_printoptions as set_printoptions, + get_printoptions as get_printoptions, + array2string as array2string, + format_float_scientific as format_float_scientific, + format_float_positional as format_float_positional, + array_repr as array_repr, + array_str as array_str, + set_string_function as set_string_function, + printoptions as printoptions, +) + +from numpy.core.einsumfunc import ( + einsum as einsum, + einsum_path as einsum_path, +) + +from numpy.core.numeric import ( + zeros_like as zeros_like, + ones as ones, + ones_like as ones_like, + empty_like as empty_like, + full as full, + full_like as full_like, + count_nonzero as count_nonzero, + isfortran as isfortran, + argwhere as argwhere, + flatnonzero as flatnonzero, + correlate as correlate, + convolve as convolve, + outer as outer, + tensordot as tensordot, + roll as roll, + rollaxis as rollaxis, + moveaxis as moveaxis, + cross as cross, + indices as indices, + fromfunction as fromfunction, + isscalar as isscalar, + binary_repr as binary_repr, + base_repr as base_repr, + identity as identity, + allclose as allclose, + isclose as isclose, + array_equal as array_equal, + array_equiv as array_equiv, +) + +from numpy.core.numerictypes import ( + maximum_sctype as maximum_sctype, + issctype as issctype, + obj2sctype as obj2sctype, + issubclass_ as issubclass_, + issubsctype as issubsctype, + issubdtype as issubdtype, + sctype2char as sctype2char, + find_common_type as find_common_type, + nbytes as nbytes, + cast as cast, + ScalarType as ScalarType, + typecodes as typecodes, +) + +from numpy.core.shape_base import ( + atleast_1d as atleast_1d, + atleast_2d as atleast_2d, + atleast_3d as atleast_3d, + block as block, + hstack as hstack, + stack as stack, + vstack as vstack, +) + +from numpy.lib import ( + emath as emath, +) + +from numpy.lib.arraypad import ( + pad as pad, +) + +from numpy.lib.arraysetops import ( + ediff1d as ediff1d, + intersect1d as intersect1d, + setxor1d as setxor1d, + union1d as union1d, + setdiff1d as setdiff1d, + unique as unique, + in1d as in1d, + isin as isin, +) + +from numpy.lib.arrayterator import ( + Arrayterator as Arrayterator, +) + +from numpy.lib.function_base import ( + select as select, + piecewise as piecewise, + trim_zeros as trim_zeros, + copy as copy, + iterable as iterable, + percentile as percentile, + diff as diff, + gradient as gradient, + angle as angle, + unwrap as unwrap, + sort_complex as sort_complex, + disp as disp, + flip as flip, + rot90 as rot90, + extract as extract, + place as place, + asarray_chkfinite as asarray_chkfinite, + average as average, + bincount as bincount, + digitize as digitize, + cov as cov, + corrcoef as corrcoef, + msort as msort, + median as median, + sinc as sinc, + hamming as hamming, + hanning as hanning, + bartlett as bartlett, + blackman as blackman, + kaiser as kaiser, + trapz as trapz, + i0 as i0, + add_newdoc as add_newdoc, + add_docstring as add_docstring, + meshgrid as meshgrid, + delete as delete, + insert as insert, + append as append, + interp as interp, + add_newdoc_ufunc as add_newdoc_ufunc, + quantile as quantile, +) + +from numpy.lib.index_tricks import ( + ravel_multi_index as ravel_multi_index, + unravel_index as unravel_index, + mgrid as mgrid, + ogrid as ogrid, + r_ as r_, + c_ as c_, + s_ as s_, + index_exp as index_exp, + ix_ as ix_, + fill_diagonal as fill_diagonal, + diag_indices as diag_indices, + diag_indices_from as diag_indices_from, +) + +from numpy.lib.nanfunctions import ( + nansum as nansum, + nanmax as nanmax, + nanmin as nanmin, + nanargmax as nanargmax, + nanargmin as nanargmin, + nanmean as nanmean, + nanmedian as nanmedian, + nanpercentile as nanpercentile, + nanvar as nanvar, + nanstd as nanstd, + nanprod as nanprod, + nancumsum as nancumsum, + nancumprod as nancumprod, + nanquantile as nanquantile, +) + +from numpy.lib.npyio import ( + savetxt as savetxt, + loadtxt as loadtxt, + genfromtxt as genfromtxt, + recfromtxt as recfromtxt, + recfromcsv as recfromcsv, + load as load, + loads as loads, + save as save, + savez as savez, + savez_compressed as savez_compressed, + packbits as packbits, + unpackbits as unpackbits, + fromregex as fromregex, +) + +from numpy.lib.polynomial import ( + poly as poly, + roots as roots, + polyint as polyint, + polyder as polyder, + polyadd as polyadd, + polysub as polysub, + polymul as polymul, + polydiv as polydiv, + polyval as polyval, + polyfit as polyfit, +) + +from numpy.lib.shape_base import ( + column_stack as column_stack, + row_stack as row_stack, + dstack as dstack, + array_split as array_split, + split as split, + hsplit as hsplit, + vsplit as vsplit, + dsplit as dsplit, + apply_over_axes as apply_over_axes, + expand_dims as expand_dims, + apply_along_axis as apply_along_axis, + kron as kron, + tile as tile, + get_array_wrap as get_array_wrap, + take_along_axis as take_along_axis, + put_along_axis as put_along_axis, +) + +from numpy.lib.stride_tricks import ( + broadcast_to as broadcast_to, + broadcast_arrays as broadcast_arrays, + broadcast_shapes as broadcast_shapes, +) + +from numpy.lib.twodim_base import ( + diag as diag, + diagflat as diagflat, + eye as eye, + fliplr as fliplr, + flipud as flipud, + tri as tri, + triu as triu, + tril as tril, + vander as vander, + histogram2d as histogram2d, + mask_indices as mask_indices, + tril_indices as tril_indices, + tril_indices_from as tril_indices_from, + triu_indices as triu_indices, + triu_indices_from as triu_indices_from, +) + +from numpy.lib.type_check import ( + mintypecode as mintypecode, + asfarray as asfarray, + real as real, + imag as imag, + iscomplex as iscomplex, + isreal as isreal, + iscomplexobj as iscomplexobj, + isrealobj as isrealobj, + nan_to_num as nan_to_num, + real_if_close as real_if_close, + typename as typename, + common_type as common_type, +) + +from numpy.lib.ufunclike import ( + fix as fix, + isposinf as isposinf, + isneginf as isneginf, +) + +from numpy.lib.utils import ( + issubclass_ as issubclass_, + issubsctype as issubsctype, + issubdtype as issubdtype, + deprecate as deprecate, + deprecate_with_doc as deprecate_with_doc, + get_include as get_include, + info as info, + source as source, + who as who, + lookfor as lookfor, + byte_bounds as byte_bounds, + safe_eval as safe_eval, +) + +__all__: List[str] +__path__: List[str] +__version__: str +__git_version__: str + +# TODO: Move placeholders to their respective module once +# their annotations are properly implemented +# +# Placeholders for classes +# TODO: Remove `__getattr__` once the classes are stubbed out +class MachAr: + def __init__( + self, + float_conv: Any = ..., + int_conv: Any = ..., + float_to_float: Any = ..., + float_to_str: Any = ..., + title: Any = ..., + ) -> None: ... + def __getattr__(self, key: str) -> Any: ... + +class busdaycalendar: + def __new__(cls, weekmask: Any = ..., holidays: Any = ...) -> Any: ... + def __getattr__(self, key: str) -> Any: ... + +class chararray(ndarray[_ShapeType, _DType_co]): + def __new__( + subtype, + shape: Any, + itemsize: Any = ..., + unicode: Any = ..., + buffer: Any = ..., + offset: Any = ..., + strides: Any = ..., + order: Any = ..., + ) -> Any: ... + def __array_finalize__(self, obj): ... + def argsort(self, axis=..., kind=..., order=...): ... + def capitalize(self): ... + def center(self, width, fillchar=...): ... + def count(self, sub, start=..., end=...): ... + def decode(self, encoding=..., errors=...): ... + def encode(self, encoding=..., errors=...): ... + def endswith(self, suffix, start=..., end=...): ... + def expandtabs(self, tabsize=...): ... + def find(self, sub, start=..., end=...): ... + def index(self, sub, start=..., end=...): ... + def isalnum(self): ... + def isalpha(self): ... + def isdigit(self): ... + def islower(self): ... + def isspace(self): ... + def istitle(self): ... + def isupper(self): ... + def join(self, seq): ... + def ljust(self, width, fillchar=...): ... + def lower(self): ... + def lstrip(self, chars=...): ... + def partition(self, sep): ... + def replace(self, old, new, count=...): ... + def rfind(self, sub, start=..., end=...): ... + def rindex(self, sub, start=..., end=...): ... + def rjust(self, width, fillchar=...): ... + def rpartition(self, sep): ... + def rsplit(self, sep=..., maxsplit=...): ... + def rstrip(self, chars=...): ... + def split(self, sep=..., maxsplit=...): ... + def splitlines(self, keepends=...): ... + def startswith(self, prefix, start=..., end=...): ... + def strip(self, chars=...): ... + def swapcase(self): ... + def title(self): ... + def translate(self, table, deletechars=...): ... + def upper(self): ... + def zfill(self, width): ... + def isnumeric(self): ... + def isdecimal(self): ... + +class finfo: + def __new__(cls, dtype: Any) -> Any: ... + def __getattr__(self, key: str) -> Any: ... + +class format_parser: + def __init__( + self, + formats: Any, + names: Any, + titles: Any, + aligned: Any = ..., + byteorder: Any = ..., + ) -> None: ... + +class iinfo: + def __init__(self, int_type: Any) -> None: ... + def __getattr__(self, key: str) -> Any: ... + +class matrix(ndarray[_ShapeType, _DType_co]): + def __new__( + subtype, + data: Any, + dtype: Any = ..., + copy: Any = ..., + ) -> Any: ... + def __array_finalize__(self, obj): ... + def __getitem__(self, index): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __imul__(self, other): ... + def __pow__(self, other): ... + def __ipow__(self, other): ... + def __rpow__(self, other): ... + def tolist(self): ... + def sum(self, axis=..., dtype=..., out=...): ... + def squeeze(self, axis=...): ... + def flatten(self, order=...): ... + def mean(self, axis=..., dtype=..., out=...): ... + def std(self, axis=..., dtype=..., out=..., ddof=...): ... + def var(self, axis=..., dtype=..., out=..., ddof=...): ... + def prod(self, axis=..., dtype=..., out=...): ... + def any(self, axis=..., out=...): ... + def all(self, axis=..., out=...): ... + def max(self, axis=..., out=...): ... + def argmax(self, axis=..., out=...): ... + def min(self, axis=..., out=...): ... + def argmin(self, axis=..., out=...): ... + def ptp(self, axis=..., out=...): ... + def ravel(self, order=...): ... + @property + def T(self): ... + @property + def I(self): ... + @property + def A(self): ... + @property + def A1(self): ... + @property + def H(self): ... + def getT(self): ... + def getA(self): ... + def getA1(self): ... + def getH(self): ... + def getI(self): ... + +class memmap(ndarray[_ShapeType, _DType_co]): + def __new__( + subtype, + filename: Any, + dtype: Any = ..., + mode: Any = ..., + offset: Any = ..., + shape: Any = ..., + order: Any = ..., + ) -> Any: ... + def __getattr__(self, key: str) -> Any: ... + +class nditer: + def __new__( + cls, + op: Any, + flags: Any = ..., + op_flags: Any = ..., + op_dtypes: Any = ..., + order: Any = ..., + casting: Any = ..., + op_axes: Any = ..., + itershape: Any = ..., + buffersize: Any = ..., + ) -> Any: ... + def __getattr__(self, key: str) -> Any: ... + def __enter__(self) -> nditer: ... + def __exit__( + self, + exc_type: None | Type[BaseException], + exc_value: None | BaseException, + traceback: None | TracebackType, + ) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + def __next__(self) -> Any: ... + def __len__(self) -> int: ... + def __copy__(self) -> nditer: ... + def __getitem__(self, index: SupportsIndex | slice) -> Any: ... + def __setitem__(self, index: SupportsIndex | slice, value: Any) -> None: ... + def __delitem__(self, key: SupportsIndex | slice) -> None: ... + + +class poly1d: + def __init__( + self, + c_or_r: Any, + r: Any = ..., + variable: Any = ..., + ) -> None: ... + def __call__(self, val: Any) -> Any: ... + __hash__: Any + @property + def coeffs(self): ... + @coeffs.setter + def coeffs(self, value): ... + @property + def c(self): ... + @c.setter + def c(self, value): ... + @property + def coef(self): ... + @coef.setter + def coef(self, value): ... + @property + def coefficients(self): ... + @coefficients.setter + def coefficients(self, value): ... + @property + def variable(self): ... + @property + def order(self): ... + @property + def o(self): ... + @property + def roots(self): ... + @property + def r(self): ... + def __array__(self, t=...): ... + def __len__(self): ... + def __neg__(self): ... + def __pos__(self): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __add__(self, other): ... + def __radd__(self, other): ... + def __pow__(self, val): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __div__(self, other): ... + def __truediv__(self, other): ... + def __rdiv__(self, other): ... + def __rtruediv__(self, other): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __getitem__(self, val): ... + def __setitem__(self, key, val): ... + def __iter__(self): ... + def integ(self, m=..., k=...): ... + def deriv(self, m=...): ... + +class recarray(ndarray[_ShapeType, _DType_co]): + def __new__( + subtype, + shape: Any, + dtype: Any = ..., + buf: Any = ..., + offset: Any = ..., + strides: Any = ..., + formats: Any = ..., + names: Any = ..., + titles: Any = ..., + byteorder: Any = ..., + aligned: Any = ..., + order: Any = ..., + ) -> Any: ... + def __array_finalize__(self, obj): ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def field(self, attr, val=...): ... + +class record(void): + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def pprint(self): ... + +class vectorize: + pyfunc: Any + cache: Any + signature: Any + otypes: Any + excluded: Any + __doc__: Any + def __init__( + self, + pyfunc, + otypes: Any = ..., + doc: Any = ..., + excluded: Any = ..., + cache: Any = ..., + signature: Any = ..., + ) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + +# Placeholders for Python-based functions +def asmatrix(data, dtype=...): ... +def asscalar(a): ... +def cumproduct(*args, **kwargs): ... +def histogram(a, bins=..., range=..., normed=..., weights=..., density=...): ... +def histogram_bin_edges(a, bins=..., range=..., weights=...): ... +def histogramdd(sample, bins=..., range=..., normed=..., weights=..., density=...): ... +def mat(data, dtype=...): ... +def max(a, axis=..., out=..., keepdims=..., initial=..., where=...): ... +def min(a, axis=..., out=..., keepdims=..., initial=..., where=...): ... +def product(*args, **kwargs): ... +def round(a, decimals=..., out=...): ... +def round_(a, decimals=..., out=...): ... +def show_config(): ... + +# Placeholders for C-based functions +# TODO: Sort out which parameters are positional-only +@overload +def arange(stop, dtype=..., *, like=...): ... +@overload +def arange(start, stop, step=..., dtype=..., *, like=...): ... +def busday_count( + begindates, + enddates, + weekmask=..., + holidays=..., + busdaycal=..., + out=..., +): ... +def busday_offset( + dates, + offsets, + roll=..., + weekmask=..., + holidays=..., + busdaycal=..., + out=..., +): ... +def can_cast(from_, to, casting=...): ... +def compare_chararrays(a, b, cmp_op, rstrip): ... +def concatenate(__a, axis=..., out=..., dtype=..., casting=...): ... +def copyto(dst, src, casting=..., where=...): ... +def datetime_as_string(arr, unit=..., timezone=..., casting=...): ... +def datetime_data(__dtype): ... +def dot(a, b, out=...): ... +def frombuffer(buffer, dtype=..., count=..., offset=..., *, like=...): ... +def fromfile( + file, dtype=..., count=..., sep=..., offset=..., *, like=... +): ... +def fromiter(iter, dtype, count=..., *, like=...): ... +def frompyfunc(func, nin, nout, * identity): ... +def fromstring(string, dtype=..., count=..., sep=..., *, like=...): ... +def geterrobj(): ... +def inner(a, b): ... +def is_busday( + dates, weekmask=..., holidays=..., busdaycal=..., out=... +): ... +def lexsort(keys, axis=...): ... +def may_share_memory(a, b, max_work=...): ... +def min_scalar_type(a): ... +def nested_iters(*args, **kwargs): ... # TODO: Sort out parameters +def promote_types(type1, type2): ... +def putmask(a, mask, values): ... +def result_type(*arrays_and_dtypes): ... +def seterrobj(errobj): ... +def shares_memory(a, b, max_work=...): ... +def vdot(a, b): ... +@overload +def where(__condition): ... +@overload +def where(__condition, __x, __y): ... + +_NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray) +_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) +_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"] + +class dtype(Generic[_DTypeScalar_co]): + names: Optional[Tuple[str, ...]] + # Overload for subclass of generic + @overload + def __new__( + cls, + dtype: Type[_DTypeScalar_co], + align: bool = ..., + copy: bool = ..., + ) -> dtype[_DTypeScalar_co]: ... + # Overloads for string aliases, Python types, and some assorted + # other special cases. Order is sometimes important because of the + # subtype relationships + # + # bool < int < float < complex < object + # + # so we have to make sure the overloads for the narrowest type is + # first. + # Builtin types + @overload + def __new__(cls, dtype: Type[bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... + @overload + def __new__(cls, dtype: Type[int], align: bool = ..., copy: bool = ...) -> dtype[int_]: ... + @overload + def __new__(cls, dtype: Optional[Type[float]], align: bool = ..., copy: bool = ...) -> dtype[float_]: ... + @overload + def __new__(cls, dtype: Type[complex], align: bool = ..., copy: bool = ...) -> dtype[complex_]: ... + @overload + def __new__(cls, dtype: Type[str], align: bool = ..., copy: bool = ...) -> dtype[str_]: ... + @overload + def __new__(cls, dtype: Type[bytes], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... + + # `unsignedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _UInt8Codes | Type[ct.c_uint8], align: bool = ..., copy: bool = ...) -> dtype[uint8]: ... + @overload + def __new__(cls, dtype: _UInt16Codes | Type[ct.c_uint16], align: bool = ..., copy: bool = ...) -> dtype[uint16]: ... + @overload + def __new__(cls, dtype: _UInt32Codes | Type[ct.c_uint32], align: bool = ..., copy: bool = ...) -> dtype[uint32]: ... + @overload + def __new__(cls, dtype: _UInt64Codes | Type[ct.c_uint64], align: bool = ..., copy: bool = ...) -> dtype[uint64]: ... + @overload + def __new__(cls, dtype: _UByteCodes | Type[ct.c_ubyte], align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ... + @overload + def __new__(cls, dtype: _UShortCodes | Type[ct.c_ushort], align: bool = ..., copy: bool = ...) -> dtype[ushort]: ... + @overload + def __new__(cls, dtype: _UIntCCodes | Type[ct.c_uint], align: bool = ..., copy: bool = ...) -> dtype[uintc]: ... + + # NOTE: We're assuming here that `uint_ptr_t == size_t`, + # an assumption that does not hold in rare cases (same for `ssize_t`) + @overload + def __new__(cls, dtype: _UIntPCodes | Type[ct.c_void_p] | Type[ct.c_size_t], align: bool = ..., copy: bool = ...) -> dtype[uintp]: ... + @overload + def __new__(cls, dtype: _UIntCodes | Type[ct.c_ulong], align: bool = ..., copy: bool = ...) -> dtype[uint]: ... + @overload + def __new__(cls, dtype: _ULongLongCodes | Type[ct.c_ulonglong], align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ... + + # `signedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Int8Codes | Type[ct.c_int8], align: bool = ..., copy: bool = ...) -> dtype[int8]: ... + @overload + def __new__(cls, dtype: _Int16Codes | Type[ct.c_int16], align: bool = ..., copy: bool = ...) -> dtype[int16]: ... + @overload + def __new__(cls, dtype: _Int32Codes | Type[ct.c_int32], align: bool = ..., copy: bool = ...) -> dtype[int32]: ... + @overload + def __new__(cls, dtype: _Int64Codes | Type[ct.c_int64], align: bool = ..., copy: bool = ...) -> dtype[int64]: ... + @overload + def __new__(cls, dtype: _ByteCodes | Type[ct.c_byte], align: bool = ..., copy: bool = ...) -> dtype[byte]: ... + @overload + def __new__(cls, dtype: _ShortCodes | Type[ct.c_short], align: bool = ..., copy: bool = ...) -> dtype[short]: ... + @overload + def __new__(cls, dtype: _IntCCodes | Type[ct.c_int], align: bool = ..., copy: bool = ...) -> dtype[intc]: ... + @overload + def __new__(cls, dtype: _IntPCodes | Type[ct.c_ssize_t], align: bool = ..., copy: bool = ...) -> dtype[intp]: ... + @overload + def __new__(cls, dtype: _IntCodes | Type[ct.c_long], align: bool = ..., copy: bool = ...) -> dtype[int_]: ... + @overload + def __new__(cls, dtype: _LongLongCodes | Type[ct.c_longlong], align: bool = ..., copy: bool = ...) -> dtype[longlong]: ... + + # `floating` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Float16Codes, align: bool = ..., copy: bool = ...) -> dtype[float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes, align: bool = ..., copy: bool = ...) -> dtype[float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes, align: bool = ..., copy: bool = ...) -> dtype[float64]: ... + @overload + def __new__(cls, dtype: _HalfCodes, align: bool = ..., copy: bool = ...) -> dtype[half]: ... + @overload + def __new__(cls, dtype: _SingleCodes | Type[ct.c_float], align: bool = ..., copy: bool = ...) -> dtype[single]: ... + @overload + def __new__(cls, dtype: _DoubleCodes | Type[ct.c_double], align: bool = ..., copy: bool = ...) -> dtype[double]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | Type[ct.c_longdouble], align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations + @overload + def __new__(cls, dtype: _Complex64Codes, align: bool = ..., copy: bool = ...) -> dtype[complex64]: ... + @overload + def __new__(cls, dtype: _Complex128Codes, align: bool = ..., copy: bool = ...) -> dtype[complex128]: ... + @overload + def __new__(cls, dtype: _CSingleCodes, align: bool = ..., copy: bool = ...) -> dtype[csingle]: ... + @overload + def __new__(cls, dtype: _CDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[cdouble]: ... + @overload + def __new__(cls, dtype: _CLongDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[clongdouble]: ... + + # Miscellaneous string-based representations and ctypes + @overload + def __new__(cls, dtype: _BoolCodes | Type[ct.c_bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ... + @overload + def __new__(cls, dtype: _TD64Codes, align: bool = ..., copy: bool = ...) -> dtype[timedelta64]: ... + @overload + def __new__(cls, dtype: _DT64Codes, align: bool = ..., copy: bool = ...) -> dtype[datetime64]: ... + @overload + def __new__(cls, dtype: _StrCodes, align: bool = ..., copy: bool = ...) -> dtype[str_]: ... + @overload + def __new__(cls, dtype: _BytesCodes | Type[ct.c_char], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ... + @overload + def __new__(cls, dtype: _VoidCodes, align: bool = ..., copy: bool = ...) -> dtype[void]: ... + @overload + def __new__(cls, dtype: _ObjectCodes | Type[ct.py_object], align: bool = ..., copy: bool = ...) -> dtype[object_]: ... + + # dtype of a dtype is the same dtype + @overload + def __new__( + cls, + dtype: dtype[_DTypeScalar_co], + align: bool = ..., + copy: bool = ..., + ) -> dtype[_DTypeScalar_co]: ... + @overload + def __new__( + cls, + dtype: _SupportsDType[dtype[_DTypeScalar_co]], + align: bool = ..., + copy: bool = ..., + ) -> dtype[_DTypeScalar_co]: ... + # Handle strings that can't be expressed as literals; i.e. s1, s2, ... + @overload + def __new__( + cls, + dtype: str, + align: bool = ..., + copy: bool = ..., + ) -> dtype[Any]: ... + # Catchall overload for void-likes + @overload + def __new__( + cls, + dtype: _VoidDTypeLike, + align: bool = ..., + copy: bool = ..., + ) -> dtype[void]: ... + # Catchall overload for object-likes + @overload + def __new__( + cls, + dtype: Type[object], + align: bool = ..., + copy: bool = ..., + ) -> dtype[object_]: ... + + @overload + def __getitem__(self: dtype[void], key: List[str]) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: Union[str, int]) -> dtype[Any]: ... + + # NOTE: In the future 1-based multiplications will also yield `void` dtypes + @overload + def __mul__(self, value: L[0]) -> None: ... # type: ignore[misc] + @overload + def __mul__(self: _DType, value: L[1]) -> _DType: ... + @overload + def __mul__(self, value: int) -> dtype[void]: ... + + # NOTE: `__rmul__` seems to be broken when used in combination with + # literals as of mypy 0.800. Set the return-type to `Any` for now. + def __rmul__(self, value: int) -> Any: ... + + def __gt__(self, other: DTypeLike) -> bool: ... + def __ge__(self, other: DTypeLike) -> bool: ... + def __lt__(self, other: DTypeLike) -> bool: ... + def __le__(self, other: DTypeLike) -> bool: ... + @property + def alignment(self) -> int: ... + @property + def base(self: _DType) -> _DType: ... + @property + def byteorder(self) -> str: ... + @property + def char(self) -> str: ... + @property + def descr(self) -> List[Union[Tuple[str, str], Tuple[str, str, _Shape]]]: ... + @property + def fields( + self, + ) -> Optional[Mapping[str, Union[Tuple[dtype[Any], int], Tuple[dtype[Any], int, Any]]]]: ... + @property + def flags(self) -> int: ... + @property + def hasobject(self) -> bool: ... + @property + def isbuiltin(self) -> int: ... + @property + def isnative(self) -> bool: ... + @property + def isalignedstruct(self) -> bool: ... + @property + def itemsize(self) -> int: ... + @property + def kind(self) -> str: ... + @property + def metadata(self) -> Optional[Mapping[str, Any]]: ... + @property + def name(self) -> str: ... + @property + def names(self) -> Optional[Tuple[str, ...]]: ... + @property + def num(self) -> int: ... + @property + def shape(self) -> _Shape: ... + @property + def ndim(self) -> int: ... + @property + def subdtype(self: _DType) -> Optional[Tuple[_DType, _Shape]]: ... + def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ... + # Leave str and type for end to avoid having to use `builtins.str` + # everywhere. See https://github.com/python/mypy/issues/3775 + @property + def str(self) -> builtins.str: ... + @property + def type(self) -> Type[_DTypeScalar_co]: ... + +class _flagsobj: + aligned: bool + updateifcopy: bool + writeable: bool + writebackifcopy: bool + @property + def behaved(self) -> bool: ... + @property + def c_contiguous(self) -> bool: ... + @property + def carray(self) -> bool: ... + @property + def contiguous(self) -> bool: ... + @property + def f_contiguous(self) -> bool: ... + @property + def farray(self) -> bool: ... + @property + def fnc(self) -> bool: ... + @property + def forc(self) -> bool: ... + @property + def fortran(self) -> bool: ... + @property + def num(self) -> int: ... + @property + def owndata(self) -> bool: ... + def __getitem__(self, key: str) -> bool: ... + def __setitem__(self, key: str, value: bool) -> None: ... + +_ArrayLikeInt = Union[ + int, + integer, + Sequence[Union[int, integer]], + Sequence[Sequence[Any]], # TODO: wait for support for recursive types + ndarray +] + +_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter) + +class flatiter(Generic[_NdArraySubClass]): + @property + def base(self) -> _NdArraySubClass: ... + @property + def coords(self) -> _Shape: ... + @property + def index(self) -> int: ... + def copy(self) -> _NdArraySubClass: ... + def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ... + def __next__(self: flatiter[ndarray[Any, dtype[_ScalarType]]]) -> _ScalarType: ... + def __len__(self) -> int: ... + @overload + def __getitem__( + self: flatiter[ndarray[Any, dtype[_ScalarType]]], + key: Union[int, integer], + ) -> _ScalarType: ... + @overload + def __getitem__( + self, key: Union[_ArrayLikeInt, slice, ellipsis], + ) -> _NdArraySubClass: ... + @overload + def __array__(self: flatiter[ndarray[Any, _DType]], __dtype: None = ...) -> ndarray[Any, _DType]: ... + @overload + def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ... + +_OrderKACF = Optional[L["K", "A", "C", "F"]] +_OrderACF = Optional[L["A", "C", "F"]] +_OrderCF = Optional[L["C", "F"]] + +_ModeKind = L["raise", "wrap", "clip"] +_PartitionKind = L["introselect"] +_SortKind = L["quicksort", "mergesort", "heapsort", "stable"] +_SortSide = L["left", "right"] + +_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) + +class _ArrayOrScalarCommon: + @property + def T(self: _ArraySelf) -> _ArraySelf: ... + @property + def data(self) -> memoryview: ... + @property + def flags(self) -> _flagsobj: ... + @property + def itemsize(self) -> int: ... + @property + def nbytes(self) -> int: ... + def __bool__(self) -> bool: ... + def __bytes__(self) -> bytes: ... + def __str__(self) -> str: ... + def __repr__(self) -> str: ... + def __copy__(self: _ArraySelf) -> _ArraySelf: ... + def __deepcopy__(self: _ArraySelf, __memo: Optional[dict] = ...) -> _ArraySelf: ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def astype( + self: _ArraySelf, + dtype: DTypeLike, + order: _OrderKACF = ..., + casting: _Casting = ..., + subok: bool = ..., + copy: bool = ..., + ) -> _ArraySelf: ... + def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ... + def dump(self, file: str) -> None: ... + def dumps(self) -> bytes: ... + def getfield( + self: _ArraySelf, dtype: DTypeLike, offset: int = ... + ) -> _ArraySelf: ... + def tobytes(self, order: _OrderKACF = ...) -> bytes: ... + # NOTE: `tostring()` is deprecated and therefore excluded + # def tostring(self, order=...): ... + def tofile( + self, fid: Union[IO[bytes], str, bytes, os.PathLike[Any]], sep: str = ..., format: str = ... + ) -> None: ... + # generics and 0d arrays return builtin scalars + def tolist(self) -> Any: ... + @overload + def view(self, type: Type[_NdArraySubClass]) -> _NdArraySubClass: ... + @overload + def view(self: _ArraySelf, dtype: DTypeLike = ...) -> _ArraySelf: ... + @overload + def view( + self, dtype: DTypeLike, type: Type[_NdArraySubClass] + ) -> _NdArraySubClass: ... + + # TODO: Add proper signatures + def __getitem__(self, key) -> Any: ... + @property + def __array_interface__(self): ... + @property + def __array_priority__(self): ... + @property + def __array_struct__(self): ... + def __array_wrap__(array, context=...): ... + def __setstate__(self, __state): ... + # a `bool_` is returned when `keepdims=True` and `self` is a 0d array + + @overload + def all( + self, + axis: None = ..., + out: None = ..., + keepdims: L[False] = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: Optional[_ShapeLike] = ..., + out: None = ..., + keepdims: bool = ..., + ) -> Any: ... + @overload + def all( + self, + axis: Optional[_ShapeLike] = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + ) -> _NdArraySubClass: ... + + @overload + def any( + self, + axis: None = ..., + out: None = ..., + keepdims: L[False] = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: Optional[_ShapeLike] = ..., + out: None = ..., + keepdims: bool = ..., + ) -> Any: ... + @overload + def any( + self, + axis: Optional[_ShapeLike] = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + ) -> _NdArraySubClass: ... + + @overload + def argmax( + self, + axis: None = ..., + out: None = ..., + ) -> intp: ... + @overload + def argmax( + self, + axis: _ShapeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def argmax( + self, + axis: Optional[_ShapeLike] = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def argmin( + self, + axis: None = ..., + out: None = ..., + ) -> intp: ... + @overload + def argmin( + self, + axis: _ShapeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def argmin( + self, + axis: Optional[_ShapeLike] = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + def argsort( + self, + axis: Optional[SupportsIndex] = ..., + kind: Optional[_SortKind] = ..., + order: Union[None, str, Sequence[str]] = ..., + ) -> ndarray: ... + + @overload + def choose( + self, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray: ... + @overload + def choose( + self, + choices: ArrayLike, + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + + @overload + def clip( + self, + min: ArrayLike = ..., + max: Optional[ArrayLike] = ..., + out: None = ..., + **kwargs: Any, + ) -> ndarray: ... + @overload + def clip( + self, + min: None = ..., + max: ArrayLike = ..., + out: None = ..., + **kwargs: Any, + ) -> ndarray: ... + @overload + def clip( + self, + min: ArrayLike = ..., + max: Optional[ArrayLike] = ..., + out: _NdArraySubClass = ..., + **kwargs: Any, + ) -> _NdArraySubClass: ... + @overload + def clip( + self, + min: None = ..., + max: ArrayLike = ..., + out: _NdArraySubClass = ..., + **kwargs: Any, + ) -> _NdArraySubClass: ... + + @overload + def compress( + self, + a: ArrayLike, + axis: Optional[SupportsIndex] = ..., + out: None = ..., + ) -> ndarray: ... + @overload + def compress( + self, + a: ArrayLike, + axis: Optional[SupportsIndex] = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + def conj(self: _ArraySelf) -> _ArraySelf: ... + + def conjugate(self: _ArraySelf) -> _ArraySelf: ... + + @overload + def cumprod( + self, + axis: Optional[SupportsIndex] = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> ndarray: ... + @overload + def cumprod( + self, + axis: Optional[SupportsIndex] = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def cumsum( + self, + axis: Optional[SupportsIndex] = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> ndarray: ... + @overload + def cumsum( + self, + axis: Optional[SupportsIndex] = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def max( + self, + axis: Optional[_ShapeLike] = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def max( + self, + axis: Optional[_ShapeLike] = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def mean( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + ) -> Any: ... + @overload + def mean( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + ) -> _NdArraySubClass: ... + + @overload + def min( + self, + axis: Optional[_ShapeLike] = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def min( + self, + axis: Optional[_ShapeLike] = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + def newbyteorder( + self: _ArraySelf, + __new_order: _ByteOrder = ..., + ) -> _ArraySelf: ... + + @overload + def prod( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def prod( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def ptp( + self, + axis: Optional[_ShapeLike] = ..., + out: None = ..., + keepdims: bool = ..., + ) -> Any: ... + @overload + def ptp( + self, + axis: Optional[_ShapeLike] = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + ) -> _NdArraySubClass: ... + + @overload + def round( + self: _ArraySelf, + decimals: SupportsIndex = ..., + out: None = ..., + ) -> _ArraySelf: ... + @overload + def round( + self, + decimals: SupportsIndex = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def std( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: int = ..., + keepdims: bool = ..., + ) -> Any: ... + @overload + def std( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ddof: int = ..., + keepdims: bool = ..., + ) -> _NdArraySubClass: ... + + @overload + def sum( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def sum( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def var( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: int = ..., + keepdims: bool = ..., + ) -> Any: ... + @overload + def var( + self, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ddof: int = ..., + keepdims: bool = ..., + ) -> _NdArraySubClass: ... + +_DType = TypeVar("_DType", bound=dtype[Any]) +_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_NumberType = TypeVar("_NumberType", bound=number[Any]) +_BufferType = Union[ndarray, bytes, bytearray, memoryview] + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_2Tuple = Tuple[_T, _T] +_Casting = L["no", "equiv", "safe", "same_kind", "unsafe"] + +_ArrayUInt_co = NDArray[Union[bool_, unsignedinteger[Any]]] +_ArrayInt_co = NDArray[Union[bool_, integer[Any]]] +_ArrayFloat_co = NDArray[Union[bool_, integer[Any], floating[Any]]] +_ArrayComplex_co = NDArray[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]] +_ArrayNumber_co = NDArray[Union[bool_, number[Any]]] +_ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]] + +class _SupportsItem(Protocol[_T_co]): + def item(self, __args: Any) -> _T_co: ... + +class _SupportsReal(Protocol[_T_co]): + @property + def real(self) -> _T_co: ... + +class _SupportsImag(Protocol[_T_co]): + @property + def imag(self) -> _T_co: ... + +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): + @property + def base(self) -> Optional[ndarray]: ... + @property + def ndim(self) -> int: ... + @property + def size(self) -> int: ... + @property + def real( + self: NDArray[_SupportsReal[_ScalarType]], # type: ignore[type-var] + ) -> ndarray[_ShapeType, dtype[_ScalarType]]: ... + @real.setter + def real(self, value: ArrayLike) -> None: ... + @property + def imag( + self: NDArray[_SupportsImag[_ScalarType]], # type: ignore[type-var] + ) -> ndarray[_ShapeType, dtype[_ScalarType]]: ... + @imag.setter + def imag(self, value: ArrayLike) -> None: ... + def __new__( + cls: Type[_ArraySelf], + shape: _ShapeLike, + dtype: DTypeLike = ..., + buffer: _BufferType = ..., + offset: int = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> _ArraySelf: ... + @overload + def __array__(self, __dtype: None = ...) -> ndarray[Any, _DType_co]: ... + @overload + def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ... + @property + def ctypes(self) -> _ctypes[int]: ... + @property + def shape(self) -> _Shape: ... + @shape.setter + def shape(self, value: _ShapeLike) -> None: ... + @property + def strides(self) -> _Shape: ... + @strides.setter + def strides(self, value: _ShapeLike) -> None: ... + def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ... + def fill(self, value: Any) -> None: ... + @property + def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ... + + # Use the same output type as that of the underlying `generic` + @overload + def item( + self: ndarray[Any, dtype[_SupportsItem[_T]]], # type: ignore[type-var] + *args: SupportsIndex, + ) -> _T: ... + @overload + def item( + self: ndarray[Any, dtype[_SupportsItem[_T]]], # type: ignore[type-var] + __args: Tuple[SupportsIndex, ...], + ) -> _T: ... + + @overload + def itemset(self, __value: Any) -> None: ... + @overload + def itemset(self, __item: _ShapeLike, __value: Any) -> None: ... + + @overload + def resize(self, __new_shape: _ShapeLike, *, refcheck: bool = ...) -> None: ... + @overload + def resize(self, *new_shape: SupportsIndex, refcheck: bool = ...) -> None: ... + + def setflags( + self, write: bool = ..., align: bool = ..., uic: bool = ... + ) -> None: ... + + def squeeze( + self, + axis: Union[SupportsIndex, Tuple[SupportsIndex, ...]] = ..., + ) -> ndarray[Any, _DType_co]: ... + + def swapaxes( + self, + axis1: SupportsIndex, + axis2: SupportsIndex, + ) -> ndarray[Any, _DType_co]: ... + + @overload + def transpose(self: _ArraySelf, __axes: _ShapeLike) -> _ArraySelf: ... + @overload + def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ... + + def argpartition( + self, + kth: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + kind: _PartitionKind = ..., + order: Union[None, str, Sequence[str]] = ..., + ) -> ndarray[Any, dtype[intp]]: ... + + def diagonal( + self, + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + ) -> ndarray[Any, _DType_co]: ... + + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. + @overload + def dot(self, b: _ScalarLike_co, out: None = ...) -> ndarray: ... + @overload + def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + @overload + def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ... + + # `nonzero()` is deprecated for 0d arrays/generics + def nonzero(self) -> Tuple[ndarray[Any, dtype[intp]], ...]: ... + + def partition( + self, + kth: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + kind: _PartitionKind = ..., + order: Union[None, str, Sequence[str]] = ..., + ) -> None: ... + + # `put` is technically available to `generic`, + # but is pointless as `generic`s are immutable + def put( + self, + ind: _ArrayLikeInt_co, + v: ArrayLike, + mode: _ModeKind = ..., + ) -> None: ... + + @overload + def searchsorted( # type: ignore[misc] + self, # >= 1D array + v: _ScalarLike_co, # 0D array-like + side: _SortSide = ..., + sorter: Optional[_ArrayLikeInt_co] = ..., + ) -> intp: ... + @overload + def searchsorted( + self, # >= 1D array + v: ArrayLike, + side: _SortSide = ..., + sorter: Optional[_ArrayLikeInt_co] = ..., + ) -> ndarray[Any, dtype[intp]]: ... + + def setfield( + self, + val: ArrayLike, + dtype: DTypeLike, + offset: SupportsIndex = ..., + ) -> None: ... + + def sort( + self, + axis: SupportsIndex = ..., + kind: Optional[_SortKind] = ..., + order: Union[None, str, Sequence[str]] = ..., + ) -> None: ... + + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def take( # type: ignore[misc] + self: ndarray[Any, dtype[_ScalarType]], + indices: _IntLike_co, + axis: Optional[SupportsIndex] = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarType: ... + @overload + def take( # type: ignore[misc] + self, + indices: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[Any, _DType_co]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + ) -> ndarray[Any, _DType_co]: ... + + def flatten( + self, + order: _OrderKACF = ..., + ) -> ndarray[Any, _DType_co]: ... + + def ravel( + self, + order: _OrderKACF = ..., + ) -> ndarray[Any, _DType_co]: ... + + @overload + def reshape( + self, __shape: _ShapeLike, *, order: _OrderACF = ... + ) -> ndarray[Any, _DType_co]: ... + @overload + def reshape( + self, *shape: SupportsIndex, order: _OrderACF = ... + ) -> ndarray[Any, _DType_co]: ... + + # Dispatch to the underlying `generic` via protocols + def __int__( + self: ndarray[Any, dtype[SupportsInt]], # type: ignore[type-var] + ) -> int: ... + + def __float__( + self: ndarray[Any, dtype[SupportsFloat]], # type: ignore[type-var] + ) -> float: ... + + def __complex__( + self: ndarray[Any, dtype[SupportsComplex]], # type: ignore[type-var] + ) -> complex: ... + + def __index__( + self: ndarray[Any, dtype[SupportsIndex]], # type: ignore[type-var] + ) -> int: ... + + def __len__(self) -> int: ... + def __setitem__(self, key, value): ... + def __iter__(self) -> Any: ... + def __contains__(self, key) -> bool: ... + + # The last overload is for catching recursive objects whose + # nesting is too deep. + # The first overload is for catching `bytes` (as they are a subtype of + # `Sequence[int]`) and `str`. As `str` is a recusive sequence of + # strings, it will pass through the final overload otherwise + + @overload + def __lt__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... + @overload + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... + @overload + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... + @overload + def __lt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... + @overload + def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... + @overload + def __lt__( + self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> NDArray[bool_]: ... + + @overload + def __le__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... + @overload + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... + @overload + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... + @overload + def __le__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... + @overload + def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... + @overload + def __le__( + self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> NDArray[bool_]: ... + + @overload + def __gt__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... + @overload + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... + @overload + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... + @overload + def __gt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... + @overload + def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... + @overload + def __gt__( + self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> NDArray[bool_]: ... + + @overload + def __ge__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... + @overload + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... + @overload + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... + @overload + def __ge__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... + @overload + def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... + @overload + def __ge__( + self: NDArray[Union[number[Any], datetime64, timedelta64, bool_]], + other: _RecursiveSequence, + ) -> NDArray[bool_]: ... + + # Unary ops + @overload + def __abs__(self: NDArray[bool_]) -> NDArray[bool_]: ... + @overload + def __abs__(self: NDArray[complexfloating[_NBit1, _NBit1]]) -> NDArray[floating[_NBit1]]: ... + @overload + def __abs__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + @overload + def __abs__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + @overload + def __abs__(self: NDArray[object_]) -> Any: ... + + @overload + def __invert__(self: NDArray[bool_]) -> NDArray[bool_]: ... + @overload + def __invert__(self: NDArray[_IntType]) -> NDArray[_IntType]: ... + @overload + def __invert__(self: NDArray[object_]) -> Any: ... + + @overload + def __pos__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + @overload + def __pos__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + @overload + def __pos__(self: NDArray[object_]) -> Any: ... + + @overload + def __neg__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + @overload + def __neg__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + @overload + def __neg__(self: NDArray[object_]) -> Any: ... + + # Binary ops + # NOTE: `ndarray` does not implement `__imatmul__` + @overload + def __matmul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __matmul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __matmul__( + self: _ArrayNumber_co, + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rmatmul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rmatmul__( + self: _ArrayNumber_co, + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __mod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + @overload + def __mod__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __mod__( + self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rmod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + @overload + def __rmod__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rmod__( + self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __divmod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __divmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... + @overload + def __divmod__( + self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], + other: _RecursiveSequence, + ) -> _2Tuple[Any]: ... + + @overload + def __rdivmod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rdivmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayTD64_co, other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> Tuple[NDArray[int64], NDArray[timedelta64]]: ... + @overload + def __rdivmod__( + self: NDArray[Union[bool_, integer[Any], floating[Any], timedelta64]], + other: _RecursiveSequence, + ) -> _2Tuple[Any]: ... + + @overload + def __add__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __add__( + self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __radd__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __radd__( + self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __sub__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __sub__( + self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rsub__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc] + @overload + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rsub__( + self: NDArray[Union[bool_, number[Any], timedelta64, datetime64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __mul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __mul__( + self: NDArray[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rmul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rmul__( + self: NDArray[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __floordiv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[int64]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __floordiv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __floordiv__( + self: NDArray[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rfloordiv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rfloordiv__( + self: NDArray[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __pow__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __pow__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __pow__( + self: NDArray[Union[bool_, number[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rpow__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rpow__( + self: NDArray[Union[bool_, number[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __truediv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + @overload + def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __truediv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __truediv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __truediv__( + self: NDArray[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rtruediv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rtruediv__( + self: NDArray[Union[bool_, number[Any], timedelta64]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __lshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __lshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __lshift__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rlshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rlshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rlshift__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rshift__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rrshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rrshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rrshift__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __and__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __and__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __and__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rand__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rand__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rand__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __xor__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __xor__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __xor__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __rxor__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rxor__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __rxor__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __or__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __or__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __or__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + @overload + def __ror__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __ror__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + @overload + def __ror__( + self: NDArray[Union[bool_, integer[Any]]], + other: _RecursiveSequence, + ) -> Any: ... + + # `np.generic` does not support inplace operations + @overload # type: ignore[misc] + def __iadd__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __iadd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __iadd__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __isub__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __isub__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __imul__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __imul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __imul__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __itruediv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + @overload + def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __itruediv__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ifloordiv__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + @overload + def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __ifloordiv__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ipow__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __ipow__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __imod__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __imod__(self: NDArray[timedelta64], other: _NestedSequence[_SupportsArray[dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + @overload + def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __imod__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ilshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __ilshift__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __irshift__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __irshift__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __iand__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __iand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __iand__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ixor__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ixor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __ixor__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + @overload # type: ignore[misc] + def __ior__(self: NDArray[Any], other: _NestedSequence[Union[str, bytes]]) -> NoReturn: ... + @overload + def __ior__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + @overload + def __ior__(self: NDArray[_ScalarType], other: _RecursiveSequence) -> NDArray[_ScalarType]: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DType_co: ... + +# NOTE: while `np.generic` is not technically an instance of `ABCMeta`, +# the `@abstractmethod` decorator is herein used to (forcefully) deny +# the creation of `np.generic` instances. +# The `# type: ignore` comments are necessary to silence mypy errors regarding +# the missing `ABCMeta` metaclass. + +# See https://github.com/numpy/numpy-stubs/pull/80 for more details. + +_ScalarType = TypeVar("_ScalarType", bound=generic) +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +class generic(_ArrayOrScalarCommon): + @abstractmethod + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + @overload + def __array__(self: _ScalarType, __dtype: None = ...) -> ndarray[Any, dtype[_ScalarType]]: ... + @overload + def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ... + @property + def base(self) -> None: ... + @property + def ndim(self) -> L[0]: ... + @property + def size(self) -> L[1]: ... + @property + def shape(self) -> Tuple[()]: ... + @property + def strides(self) -> Tuple[()]: ... + def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ... + @property + def flat(self: _ScalarType) -> flatiter[ndarray[Any, dtype[_ScalarType]]]: ... + def item( + self, + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., + ) -> Any: ... + + @overload + def take( # type: ignore[misc] + self: _ScalarType, + indices: _IntLike_co, + axis: Optional[SupportsIndex] = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarType: ... + @overload + def take( # type: ignore[misc] + self: _ScalarType, + indices: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[Any, dtype[_ScalarType]]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + + def repeat( + self: _ScalarType, + repeats: _ArrayLikeInt_co, + axis: Optional[SupportsIndex] = ..., + ) -> ndarray[Any, dtype[_ScalarType]]: ... + + def flatten( + self: _ScalarType, + order: _OrderKACF = ..., + ) -> ndarray[Any, dtype[_ScalarType]]: ... + + def ravel( + self: _ScalarType, + order: _OrderKACF = ..., + ) -> ndarray[Any, dtype[_ScalarType]]: ... + + @overload + def reshape( + self: _ScalarType, __shape: _ShapeLike, *, order: _OrderACF = ... + ) -> ndarray[Any, dtype[_ScalarType]]: ... + @overload + def reshape( + self: _ScalarType, *shape: SupportsIndex, order: _OrderACF = ... + ) -> ndarray[Any, dtype[_ScalarType]]: ... + + def squeeze( + self: _ScalarType, axis: Union[L[0], Tuple[()]] = ... + ) -> _ScalarType: ... + def transpose(self: _ScalarType, __axes: Tuple[()] = ...) -> _ScalarType: ... + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self: _ScalarType) -> dtype[_ScalarType]: ... + +class number(generic, Generic[_NBit1]): # type: ignore + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + def __neg__(self: _ArraySelf) -> _ArraySelf: ... + def __pos__(self: _ArraySelf) -> _ArraySelf: ... + def __abs__(self: _ArraySelf) -> _ArraySelf: ... + # Ensure that objects annotated as `number` support arithmetic operations + __add__: _NumberOp + __radd__: _NumberOp + __sub__: _NumberOp + __rsub__: _NumberOp + __mul__: _NumberOp + __rmul__: _NumberOp + __floordiv__: _NumberOp + __rfloordiv__: _NumberOp + __pow__: _NumberOp + __rpow__: _NumberOp + __truediv__: _NumberOp + __rtruediv__: _NumberOp + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + +class bool_(generic): + def __init__(self, __value: object = ...) -> None: ... + def item( + self, + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., + ) -> bool: ... + def tolist(self) -> bool: ... + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + def __abs__(self: _ArraySelf) -> _ArraySelf: ... + __add__: _BoolOp[bool_] + __radd__: _BoolOp[bool_] + __sub__: _BoolSub + __rsub__: _BoolSub + __mul__: _BoolOp[bool_] + __rmul__: _BoolOp[bool_] + __floordiv__: _BoolOp[int8] + __rfloordiv__: _BoolOp[int8] + __pow__: _BoolOp[int8] + __rpow__: _BoolOp[int8] + __truediv__: _BoolTrueDiv + __rtruediv__: _BoolTrueDiv + def __invert__(self) -> bool_: ... + __lshift__: _BoolBitOp[int8] + __rlshift__: _BoolBitOp[int8] + __rshift__: _BoolBitOp[int8] + __rrshift__: _BoolBitOp[int8] + __and__: _BoolBitOp[bool_] + __rand__: _BoolBitOp[bool_] + __xor__: _BoolBitOp[bool_] + __rxor__: _BoolBitOp[bool_] + __or__: _BoolBitOp[bool_] + __ror__: _BoolBitOp[bool_] + __mod__: _BoolMod + __rmod__: _BoolMod + __divmod__: _BoolDivMod + __rdivmod__: _BoolDivMod + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + +bool8 = bool_ + +class object_(generic): + def __init__(self, __value: object = ...) -> None: ... + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + # The 3 protocols below may or may not raise, + # depending on the underlying object + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + +object0 = object_ + +# The `datetime64` constructors requires an object with the three attributes below, +# and thus supports datetime duck typing +class _DatetimeScalar(Protocol): + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + +# TODO: `item`/`tolist` returns either `dt.date`, `dt.datetime` or `int` +# depending on the unit +class datetime64(generic): + @overload + def __init__( + self, + __value: Union[None, datetime64, _CharLike_co, _DatetimeScalar] = ..., + __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ..., + ) -> None: ... + @overload + def __init__( + self, + __value: int, + __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] + ) -> None: ... + def __add__(self, other: _TD64Like_co) -> datetime64: ... + def __radd__(self, other: _TD64Like_co) -> datetime64: ... + @overload + def __sub__(self, other: datetime64) -> timedelta64: ... + @overload + def __sub__(self, other: _TD64Like_co) -> datetime64: ... + def __rsub__(self, other: datetime64) -> timedelta64: ... + __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + +# Support for `__index__` was added in python 3.8 (bpo-20092) +if sys.version_info >= (3, 8): + _IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex] + _FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex] + _ComplexValue = Union[ + None, + _CharLike_co, + SupportsFloat, + SupportsComplex, + SupportsIndex, + complex, # `complex` is not a subtype of `SupportsComplex` + ] +else: + _IntValue = Union[SupportsInt, _CharLike_co] + _FloatValue = Union[None, _CharLike_co, SupportsFloat] + _ComplexValue = Union[ + None, + _CharLike_co, + SupportsFloat, + SupportsComplex, + complex, + ] + +class integer(number[_NBit1]): # type: ignore + @property + def numerator(self: _ScalarType) -> _ScalarType: ... + @property + def denominator(self) -> L[1]: ... + @overload + def __round__(self, ndigits: None = ...) -> int: ... + @overload + def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + + # NOTE: `__index__` is technically defined in the bottom-most + # sub-classes (`int64`, `uint32`, etc) + def item( + self, + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., + ) -> int: ... + def tolist(self) -> int: ... + def __index__(self) -> int: ... + __truediv__: _IntTrueDiv[_NBit1] + __rtruediv__: _IntTrueDiv[_NBit1] + def __mod__(self, value: _IntLike_co) -> integer: ... + def __rmod__(self, value: _IntLike_co) -> integer: ... + def __invert__(self: _IntType) -> _IntType: ... + # Ensure that objects annotated as `integer` support bit-wise operations + def __lshift__(self, other: _IntLike_co) -> integer: ... + def __rlshift__(self, other: _IntLike_co) -> integer: ... + def __rshift__(self, other: _IntLike_co) -> integer: ... + def __rrshift__(self, other: _IntLike_co) -> integer: ... + def __and__(self, other: _IntLike_co) -> integer: ... + def __rand__(self, other: _IntLike_co) -> integer: ... + def __or__(self, other: _IntLike_co) -> integer: ... + def __ror__(self, other: _IntLike_co) -> integer: ... + def __xor__(self, other: _IntLike_co) -> integer: ... + def __rxor__(self, other: _IntLike_co) -> integer: ... + +class signedinteger(integer[_NBit1]): + def __init__(self, __value: _IntValue = ...) -> None: ... + __add__: _SignedIntOp[_NBit1] + __radd__: _SignedIntOp[_NBit1] + __sub__: _SignedIntOp[_NBit1] + __rsub__: _SignedIntOp[_NBit1] + __mul__: _SignedIntOp[_NBit1] + __rmul__: _SignedIntOp[_NBit1] + __floordiv__: _SignedIntOp[_NBit1] + __rfloordiv__: _SignedIntOp[_NBit1] + __pow__: _SignedIntOp[_NBit1] + __rpow__: _SignedIntOp[_NBit1] + __lshift__: _SignedIntBitOp[_NBit1] + __rlshift__: _SignedIntBitOp[_NBit1] + __rshift__: _SignedIntBitOp[_NBit1] + __rrshift__: _SignedIntBitOp[_NBit1] + __and__: _SignedIntBitOp[_NBit1] + __rand__: _SignedIntBitOp[_NBit1] + __xor__: _SignedIntBitOp[_NBit1] + __rxor__: _SignedIntBitOp[_NBit1] + __or__: _SignedIntBitOp[_NBit1] + __ror__: _SignedIntBitOp[_NBit1] + __mod__: _SignedIntMod[_NBit1] + __rmod__: _SignedIntMod[_NBit1] + __divmod__: _SignedIntDivMod[_NBit1] + __rdivmod__: _SignedIntDivMod[_NBit1] + +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int0 = signedinteger[_NBitIntP] +int_ = signedinteger[_NBitInt] +longlong = signedinteger[_NBitLongLong] + +# TODO: `item`/`tolist` returns either `dt.timedelta` or `int` +# depending on the unit +class timedelta64(generic): + def __init__( + self, + __value: Union[None, int, _CharLike_co, dt.timedelta, timedelta64] = ..., + __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ..., + ) -> None: ... + @property + def numerator(self: _ScalarType) -> _ScalarType: ... + @property + def denominator(self) -> L[1]: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + def __neg__(self: _ArraySelf) -> _ArraySelf: ... + def __pos__(self: _ArraySelf) -> _ArraySelf: ... + def __abs__(self: _ArraySelf) -> _ArraySelf: ... + def __add__(self, other: _TD64Like_co) -> timedelta64: ... + def __radd__(self, other: _TD64Like_co) -> timedelta64: ... + def __sub__(self, other: _TD64Like_co) -> timedelta64: ... + def __rsub__(self, other: _TD64Like_co) -> timedelta64: ... + def __mul__(self, other: _FloatLike_co) -> timedelta64: ... + def __rmul__(self, other: _FloatLike_co) -> timedelta64: ... + __truediv__: _TD64Div[float64] + __floordiv__: _TD64Div[int64] + def __rtruediv__(self, other: timedelta64) -> float64: ... + def __rfloordiv__(self, other: timedelta64) -> int64: ... + def __mod__(self, other: timedelta64) -> timedelta64: ... + def __rmod__(self, other: timedelta64) -> timedelta64: ... + def __divmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... + def __rdivmod__(self, other: timedelta64) -> Tuple[int64, timedelta64]: ... + __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + +class unsignedinteger(integer[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + def __init__(self, __value: _IntValue = ...) -> None: ... + __add__: _UnsignedIntOp[_NBit1] + __radd__: _UnsignedIntOp[_NBit1] + __sub__: _UnsignedIntOp[_NBit1] + __rsub__: _UnsignedIntOp[_NBit1] + __mul__: _UnsignedIntOp[_NBit1] + __rmul__: _UnsignedIntOp[_NBit1] + __floordiv__: _UnsignedIntOp[_NBit1] + __rfloordiv__: _UnsignedIntOp[_NBit1] + __pow__: _UnsignedIntOp[_NBit1] + __rpow__: _UnsignedIntOp[_NBit1] + __lshift__: _UnsignedIntBitOp[_NBit1] + __rlshift__: _UnsignedIntBitOp[_NBit1] + __rshift__: _UnsignedIntBitOp[_NBit1] + __rrshift__: _UnsignedIntBitOp[_NBit1] + __and__: _UnsignedIntBitOp[_NBit1] + __rand__: _UnsignedIntBitOp[_NBit1] + __xor__: _UnsignedIntBitOp[_NBit1] + __rxor__: _UnsignedIntBitOp[_NBit1] + __or__: _UnsignedIntBitOp[_NBit1] + __ror__: _UnsignedIntBitOp[_NBit1] + __mod__: _UnsignedIntMod[_NBit1] + __rmod__: _UnsignedIntMod[_NBit1] + __divmod__: _UnsignedIntDivMod[_NBit1] + __rdivmod__: _UnsignedIntDivMod[_NBit1] + +uint8 = unsignedinteger[_8Bit] +uint16 = unsignedinteger[_16Bit] +uint32 = unsignedinteger[_32Bit] +uint64 = unsignedinteger[_64Bit] + +ubyte = unsignedinteger[_NBitByte] +ushort = unsignedinteger[_NBitShort] +uintc = unsignedinteger[_NBitIntC] +uintp = unsignedinteger[_NBitIntP] +uint0 = unsignedinteger[_NBitIntP] +uint = unsignedinteger[_NBitInt] +ulonglong = unsignedinteger[_NBitLongLong] + +class inexact(number[_NBit1]): # type: ignore + def __getnewargs__(self: inexact[_64Bit]) -> Tuple[float, ...]: ... + +_IntType = TypeVar("_IntType", bound=integer) +_FloatType = TypeVar('_FloatType', bound=floating) + +class floating(inexact[_NBit1]): + def __init__(self, __value: _FloatValue = ...) -> None: ... + def item( + self, + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., + ) -> float: ... + def tolist(self) -> float: ... + def is_integer(self: float64) -> bool: ... + def hex(self: float64) -> str: ... + @classmethod + def fromhex(cls: Type[float64], __string: str) -> float64: ... + def as_integer_ratio(self) -> Tuple[int, int]: ... + if sys.version_info >= (3, 9): + def __ceil__(self: float64) -> int: ... + def __floor__(self: float64) -> int: ... + def __trunc__(self: float64) -> int: ... + def __getnewargs__(self: float64) -> Tuple[float]: ... + def __getformat__(self: float64, __typestr: L["double", "float"]) -> str: ... + @overload + def __round__(self, ndigits: None = ...) -> int: ... + @overload + def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + __add__: _FloatOp[_NBit1] + __radd__: _FloatOp[_NBit1] + __sub__: _FloatOp[_NBit1] + __rsub__: _FloatOp[_NBit1] + __mul__: _FloatOp[_NBit1] + __rmul__: _FloatOp[_NBit1] + __truediv__: _FloatOp[_NBit1] + __rtruediv__: _FloatOp[_NBit1] + __floordiv__: _FloatOp[_NBit1] + __rfloordiv__: _FloatOp[_NBit1] + __pow__: _FloatOp[_NBit1] + __rpow__: _FloatOp[_NBit1] + __mod__: _FloatMod[_NBit1] + __rmod__: _FloatMod[_NBit1] + __divmod__: _FloatDivMod[_NBit1] + __rdivmod__: _FloatDivMod[_NBit1] + +float16 = floating[_16Bit] +float32 = floating[_32Bit] +float64 = floating[_64Bit] + +half = floating[_NBitHalf] +single = floating[_NBitSingle] +double = floating[_NBitDouble] +float_ = floating[_NBitDouble] +longdouble = floating[_NBitLongDouble] +longfloat = floating[_NBitLongDouble] + +# The main reason for `complexfloating` having two typevars is cosmetic. +# It is used to clarify why `complex128`s precision is `_64Bit`, the latter +# describing the two 64 bit floats representing its real and imaginary component + +class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): + def __init__(self, __value: _ComplexValue = ...) -> None: ... + def item( + self, + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., + ) -> complex: ... + def tolist(self) -> complex: ... + @property + def real(self) -> floating[_NBit1]: ... # type: ignore[override] + @property + def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override] + def __getnewargs__(self: complex128) -> Tuple[float, float]: ... + # NOTE: Deprecated + # def __round__(self, ndigits=...): ... + __add__: _ComplexOp[_NBit1] + __radd__: _ComplexOp[_NBit1] + __sub__: _ComplexOp[_NBit1] + __rsub__: _ComplexOp[_NBit1] + __mul__: _ComplexOp[_NBit1] + __rmul__: _ComplexOp[_NBit1] + __truediv__: _ComplexOp[_NBit1] + __rtruediv__: _ComplexOp[_NBit1] + __floordiv__: _ComplexOp[_NBit1] + __rfloordiv__: _ComplexOp[_NBit1] + __pow__: _ComplexOp[_NBit1] + __rpow__: _ComplexOp[_NBit1] + +complex64 = complexfloating[_32Bit, _32Bit] +complex128 = complexfloating[_64Bit, _64Bit] + +csingle = complexfloating[_NBitSingle, _NBitSingle] +singlecomplex = complexfloating[_NBitSingle, _NBitSingle] +cdouble = complexfloating[_NBitDouble, _NBitDouble] +complex_ = complexfloating[_NBitDouble, _NBitDouble] +cfloat = complexfloating[_NBitDouble, _NBitDouble] +clongdouble = complexfloating[_NBitLongDouble, _NBitLongDouble] +clongfloat = complexfloating[_NBitLongDouble, _NBitLongDouble] +longcomplex = complexfloating[_NBitLongDouble, _NBitLongDouble] + +class flexible(generic): ... # type: ignore + +# TODO: `item`/`tolist` returns either `bytes` or `tuple` +# depending on whether or not it's used as an opaque bytes sequence +# or a structure +class void(flexible): + def __init__(self, __value: Union[_IntLike_co, bytes]) -> None: ... + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + def setfield( + self, val: ArrayLike, dtype: DTypeLike, offset: int = ... + ) -> None: ... + def __getitem__(self, key: SupportsIndex) -> Any: ... + def __setitem__(self, key: SupportsIndex, value: ArrayLike) -> None: ... + +void0 = void + +class character(flexible): # type: ignore + def __int__(self) -> int: ... + def __float__(self) -> float: ... + +# NOTE: Most `np.bytes_` / `np.str_` methods return their +# builtin `bytes` / `str` counterpart + +class bytes_(character, bytes): + @overload + def __init__(self, __value: object = ...) -> None: ... + @overload + def __init__( + self, __value: str, encoding: str = ..., errors: str = ... + ) -> None: ... + def item( + self, + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., + ) -> bytes: ... + def tolist(self) -> bytes: ... + +string_ = bytes_ +bytes0 = bytes_ + +class str_(character, str): + @overload + def __init__(self, __value: object = ...) -> None: ... + @overload + def __init__( + self, __value: bytes, encoding: str = ..., errors: str = ... + ) -> None: ... + def item( + self, + __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ..., + ) -> str: ... + def tolist(self) -> str: ... + +unicode_ = str_ +str0 = str_ + +def array( + object: object, + dtype: DTypeLike = ..., + *, + copy: bool = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: ArrayLike = ..., +) -> ndarray: ... +def zeros( + shape: _ShapeLike, + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + like: ArrayLike = ..., +) -> ndarray: ... +def empty( + shape: _ShapeLike, + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + like: ArrayLike = ..., +) -> ndarray: ... + +# +# Constants +# + +Inf: Final[float] +Infinity: Final[float] +NAN: Final[float] +NINF: Final[float] +NZERO: Final[float] +NaN: Final[float] +PINF: Final[float] +PZERO: Final[float] +e: Final[float] +euler_gamma: Final[float] +inf: Final[float] +infty: Final[float] +nan: Final[float] +pi: Final[float] +ALLOW_THREADS: Final[int] +BUFSIZE: Final[int] +CLIP: Final[int] +ERR_CALL: Final[int] +ERR_DEFAULT: Final[int] +ERR_IGNORE: Final[int] +ERR_LOG: Final[int] +ERR_PRINT: Final[int] +ERR_RAISE: Final[int] +ERR_WARN: Final[int] +FLOATING_POINT_SUPPORT: Final[int] +FPE_DIVIDEBYZERO: Final[int] +FPE_INVALID: Final[int] +FPE_OVERFLOW: Final[int] +FPE_UNDERFLOW: Final[int] +MAXDIMS: Final[int] +MAY_SHARE_BOUNDS: Final[int] +MAY_SHARE_EXACT: Final[int] +RAISE: Final[int] +SHIFT_DIVIDEBYZERO: Final[int] +SHIFT_INVALID: Final[int] +SHIFT_OVERFLOW: Final[int] +SHIFT_UNDERFLOW: Final[int] +UFUNC_BUFSIZE_DEFAULT: Final[int] +WRAP: Final[int] +tracemalloc_domain: Final[int] + +little_endian: Final[bool] +True_: Final[bool_] +False_: Final[bool_] + +UFUNC_PYVALS_NAME: Final[str] + +newaxis: None + +# See `npt._ufunc` for more concrete nin-/nout-specific stubs +class ufunc: + @property + def __name__(self) -> str: ... + @property + def __doc__(self) -> str: ... + __call__: Callable[..., Any] + @property + def nin(self) -> int: ... + @property + def nout(self) -> int: ... + @property + def nargs(self) -> int: ... + @property + def ntypes(self) -> int: ... + @property + def types(self) -> List[str]: ... + # Broad return type because it has to encompass things like + # + # >>> np.logical_and.identity is True + # True + # >>> np.add.identity is 0 + # True + # >>> np.sin.identity is None + # True + # + # and any user-defined ufuncs. + @property + def identity(self) -> Any: ... + # This is None for ufuncs and a string for gufuncs. + @property + def signature(self) -> Optional[str]: ... + # The next four methods will always exist, but they will just + # raise a ValueError ufuncs with that don't accept two input + # arguments and return one output argument. Because of that we + # can't type them very precisely. + reduce: Any + accumulate: Any + reduce: Any + outer: Any + # Similarly at won't be defined for ufuncs that return multiple + # outputs, so we can't type it very precisely. + at: Any + +# Parameters: `__name__`, `ntypes` and `identity` +absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] +add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] +arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] +bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] +ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] +conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] +cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] +cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] +degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] +divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] +equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] +exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] +exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] +expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] +fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] +float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] +floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] +fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] +fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] +fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] +frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] +gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] +hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] +isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] +isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] +isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] +lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] +less: _UFunc_Nin2_Nout1[L['less'], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] +log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] +log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] +log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] +log: _UFunc_Nin1_Nout1[L['log'], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None] +maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] +minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] +mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] +multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] +positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] +power: _UFunc_Nin2_Nout1[L['power'], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] +radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] +remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] +rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] +sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] +signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] +sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] +sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] +spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] +square: _UFunc_Nin1_Nout1[L['square'], L[18], None] +subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] +tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] +tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] +true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] + +abs = absolute + +# Warnings +class ModuleDeprecationWarning(DeprecationWarning): ... +class VisibleDeprecationWarning(UserWarning): ... +class ComplexWarning(RuntimeWarning): ... +class RankWarning(UserWarning): ... + +# Errors +class TooHardError(RuntimeError): ... + +class AxisError(ValueError, IndexError): + def __init__( + self, axis: int, ndim: Optional[int] = ..., msg_prefix: Optional[str] = ... + ) -> None: ... + +_CallType = TypeVar("_CallType", bound=Union[_ErrFunc, _SupportsWrite]) + +class errstate(Generic[_CallType], ContextDecorator): + call: _CallType + kwargs: _ErrDictOptional + + # Expand `**kwargs` into explicit keyword-only arguments + def __init__( + self, + *, + call: _CallType = ..., + all: Optional[_ErrKind] = ..., + divide: Optional[_ErrKind] = ..., + over: Optional[_ErrKind] = ..., + under: Optional[_ErrKind] = ..., + invalid: Optional[_ErrKind] = ..., + ) -> None: ... + def __enter__(self) -> None: ... + def __exit__( + self, + __exc_type: Optional[Type[BaseException]], + __exc_value: Optional[BaseException], + __traceback: Optional[TracebackType], + ) -> None: ... + +class ndenumerate(Generic[_ScalarType]): + iter: flatiter[NDArray[_ScalarType]] + @overload + def __new__( + cls, arr: _NestedSequence[_SupportsArray[dtype[_ScalarType]]], + ) -> ndenumerate[_ScalarType]: ... + @overload + def __new__(cls, arr: _NestedSequence[str]) -> ndenumerate[str_]: ... + @overload + def __new__(cls, arr: _NestedSequence[bytes]) -> ndenumerate[bytes_]: ... + @overload + def __new__(cls, arr: _NestedSequence[bool]) -> ndenumerate[bool_]: ... + @overload + def __new__(cls, arr: _NestedSequence[int]) -> ndenumerate[int_]: ... + @overload + def __new__(cls, arr: _NestedSequence[float]) -> ndenumerate[float_]: ... + @overload + def __new__(cls, arr: _NestedSequence[complex]) -> ndenumerate[complex_]: ... + @overload + def __new__(cls, arr: _RecursiveSequence) -> ndenumerate[Any]: ... + def __next__(self: ndenumerate[_ScalarType]) -> Tuple[_Shape, _ScalarType]: ... + def __iter__(self: _T) -> _T: ... + +class ndindex: + def __init__(self, *shape: SupportsIndex) -> None: ... + def __iter__(self: _T) -> _T: ... + def __next__(self) -> _Shape: ... + +class DataSource: + def __init__( + self, + destpath: Union[None, str, os.PathLike[str]] = ..., + ) -> None: ... + def __del__(self) -> None: ... + def abspath(self, path: str) -> str: ... + def exists(self, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open( + self, + path: str, + mode: str = ..., + encoding: Optional[str] = ..., + newline: Optional[str] = ..., + ) -> IO[Any]: ... + +# TODO: The type of each `__next__` and `iters` return-type depends +# on the length and dtype of `args`; we can't describe this behavior yet +# as we lack variadics (PEP 646). +class broadcast: + def __new__(cls, *args: ArrayLike) -> broadcast: ... + @property + def index(self) -> int: ... + @property + def iters(self) -> Tuple[flatiter[Any], ...]: ... + @property + def nd(self) -> int: ... + @property + def ndim(self) -> int: ... + @property + def numiter(self) -> int: ... + @property + def shape(self) -> _Shape: ... + @property + def size(self) -> int: ... + def __next__(self) -> Tuple[Any, ...]: ... + def __iter__(self: _T) -> _T: ... + def reset(self) -> None: ... diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/__config__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/__config__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9501cd924afbb0d160601bdcfa590d2fe12a85a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/__config__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60fa102be80f634a260ed1a16320fafd128db732 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/_distributor_init.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/_distributor_init.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7ad42af580bc27a006565eeb5d89050e63dd833 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/_distributor_init.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/_globals.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/_globals.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..881cf249af89078643401899ccef5efd1ae54132 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/_globals.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/_pytesttester.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/_pytesttester.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6825553c3bda75e9b86b565daf92c938651aa3b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/_pytesttester.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/_version.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/_version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f938463d1643334279782ae8a42d3a2af44ed6de Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/_version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/conftest.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/conftest.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..947a0fcb3df021f55b78833f31c2106da8a9d4e4 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/conftest.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/ctypeslib.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/ctypeslib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee4f2f55aea4ad001982df83adb701db21683807 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/ctypeslib.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/dual.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/dual.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df3032d2636ac0e650d279d82334236a7acf0216 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/dual.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/matlib.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/matlib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edf688284e11619398f9294570be29a559ef2ab7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/matlib.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea967096b1a06b5459aeb0d8f5a8a0888be430a3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/__pycache__/version.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/__pycache__/version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91193b50e26e107ae8fed822d6ba5d1706c124ae Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/__pycache__/version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/_distributor_init.py b/MLPY/Lib/site-packages/numpy/_distributor_init.py new file mode 100644 index 0000000000000000000000000000000000000000..9b51979c78700aef2750a8780b65db9da017df21 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/_distributor_init.py @@ -0,0 +1,32 @@ + +''' +Helper to preload windows dlls to prevent dll not found errors. +Once a DLL is preloaded, its namespace is made available to any +subsequent DLL. This file originated in the numpy-wheels repo, +and is created as part of the scripts that build the wheel. +''' +import os +import glob +if os.name == 'nt': + # convention for storing / loading the DLL from + # numpy/.libs/, if present + try: + from ctypes import WinDLL + basedir = os.path.dirname(__file__) + except: + pass + else: + libs_dir = os.path.abspath(os.path.join(basedir, '.libs')) + DLL_filenames = [] + if os.path.isdir(libs_dir): + for filename in glob.glob(os.path.join(libs_dir, + '*openblas*dll')): + # NOTE: would it change behavior to load ALL + # DLLs at this path vs. the name restriction? + WinDLL(os.path.abspath(filename)) + DLL_filenames.append(filename) + if len(DLL_filenames) > 1: + import warnings + warnings.warn("loaded more than 1 DLL from .libs:" + "\n%s" % "\n".join(DLL_filenames), + stacklevel=1) diff --git a/MLPY/Lib/site-packages/numpy/_globals.py b/MLPY/Lib/site-packages/numpy/_globals.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ec4214eaa339cb3496f0cb9df9420dcef710d7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/_globals.py @@ -0,0 +1,91 @@ +""" +Module defining global singleton classes. + +This module raises a RuntimeError if an attempt to reload it is made. In that +way the identities of the classes defined here are fixed and will remain so +even if numpy itself is reloaded. In particular, a function like the following +will still work correctly after numpy is reloaded:: + + def foo(arg=np._NoValue): + if arg is np._NoValue: + ... + +That was not the case when the singleton classes were defined in the numpy +``__init__.py`` file. See gh-7844 for a discussion of the reload problem that +motivated this module. + +""" +__ALL__ = [ + 'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue' + ] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class ModuleDeprecationWarning(DeprecationWarning): + """Module deprecation warning. + + The nose tester turns ordinary Deprecation warnings into test failures. + That makes it hard to deprecate whole modules, because they get + imported by default. So this is a special Deprecation warning that the + nose tester will let pass without making tests fail. + + """ + + +ModuleDeprecationWarning.__module__ = 'numpy' + + +class VisibleDeprecationWarning(UserWarning): + """Visible deprecation warning. + + By default, python will not show deprecation warnings, so this class + can be used when a very visible warning is helpful, for example because + the usage is most likely a user bug. + + """ + + +VisibleDeprecationWarning.__module__ = 'numpy' + + +class _NoValueType: + """Special keyword value. + + The instance of this class may be used as the default value assigned to a + keyword if no other obvious default (e.g., `None`) is suitable, + + Common reasons for using this keyword are: + + - A new keyword is added to a function, and that function forwards its + inputs to another function or method which can be defined outside of + NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims`` + keyword was added that could only be forwarded if the user explicitly + specified ``keepdims``; downstream array libraries may not have added + the same keyword, so adding ``x.std(..., keepdims=keepdims)`` + unconditionally could have broken previously working code. + - A keyword is being deprecated, and a deprecation warning must only be + emitted when the keyword is used. + + """ + __instance = None + def __new__(cls): + # ensure that only one instance exists + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance + + # needed for python 2 to preserve identity through a pickle + def __reduce__(self): + return (self.__class__, ()) + + def __repr__(self): + return "" + + +_NoValue = _NoValueType() diff --git a/MLPY/Lib/site-packages/numpy/_pytesttester.py b/MLPY/Lib/site-packages/numpy/_pytesttester.py new file mode 100644 index 0000000000000000000000000000000000000000..23193d74c513d6e1d00fb25bb0263cf4f6ebaa88 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/_pytesttester.py @@ -0,0 +1,201 @@ +""" +Pytest test running. + +This module implements the ``test()`` function for NumPy modules. The usual +boiler plate for doing that is to put the following in the module +``__init__.py`` file:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + +Warnings filtering and other runtime settings should be dealt with in the +``pytest.ini`` file in the numpy repo root. The behavior of the test depends on +whether or not that file is found as follows: + +* ``pytest.ini`` is present (develop mode) + All warnings except those explicitly filtered out are raised as error. +* ``pytest.ini`` is absent (release mode) + DeprecationWarnings and PendingDeprecationWarnings are ignored, other + warnings are passed through. + +In practice, tests run from the numpy repo are run in develop mode. That +includes the standard ``python runtests.py`` invocation. + +This module is imported by every numpy subpackage, so lies at the top level to +simplify circular import issues. For the same reason, it contains no numpy +imports at module scope, instead importing numpy within function calls. +""" +import sys +import os + +__all__ = ['PytestTester'] + + + +def _show_numpy_info(): + import numpy as np + + print("NumPy version %s" % np.__version__) + relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous + print("NumPy relaxed strides checking option:", relaxed_strides) + info = np.lib.utils._opt_info() + print("NumPy CPU features: ", (info if info else 'nothing enabled')) + + + +class PytestTester: + """ + Pytest test runner. + + A test function is typically added to a package's __init__.py like so:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__).test + del PytestTester + + Calling this test function finds and runs all tests associated with the + module and all its sub-modules. + + Attributes + ---------- + module_name : str + Full path to the package to test. + + Parameters + ---------- + module_name : module name + The name of the module to test. + + Notes + ----- + Unlike the previous ``nose``-based implementation, this class is not + publicly exposed as it performs some ``numpy``-specific warning + suppression. + + """ + def __init__(self, module_name): + self.module_name = module_name + + def __call__(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, durations=-1, tests=None): + """ + Run tests for module using pytest. + + Parameters + ---------- + label : {'fast', 'full'}, optional + Identifies the tests to run. When set to 'fast', tests decorated + with `pytest.mark.slow` are skipped, when 'full', the slow marker + is ignored. + verbose : int, optional + Verbosity value for test outputs, in the range 1-3. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytests. + doctests : bool, optional + .. note:: Not supported + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + Requires installation of (pip) pytest-cov. + durations : int, optional + If < 0, do nothing, If 0, report time of all tests, if > 0, + report the time of the slowest `timer` tests. Default is -1. + tests : test or list of tests + Tests to be executed with pytest '--pyargs' + + Returns + ------- + result : bool + Return True on success, false otherwise. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for + it. For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + ... + 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds + >>> result + True + + """ + import pytest + import warnings + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + # setup the pytest arguments + pytest_args = ["-l"] + + # offset verbosity. The "-q" cancels a "-v". + pytest_args += ["-q"] + + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + with warnings.catch_warnings(): + warnings.simplefilter("always") + from numpy.distutils import cpuinfo + + # Filter out annoying import messages. Want these in both develop and + # release mode. + pytest_args += [ + "-W ignore:Not importing directory", + "-W ignore:numpy.dtype size changed", + "-W ignore:numpy.ufunc size changed", + "-W ignore::UserWarning:cpuinfo", + ] + + # When testing matrices, ignore their PendingDeprecationWarnings + pytest_args += [ + "-W ignore:the matrix subclass is not", + "-W ignore:Importing from numpy.matlib is", + ] + + if doctests: + raise ValueError("Doctests not supported") + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose > 1: + pytest_args += ["-" + "v"*(verbose - 1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + # not importing at the top level to avoid circular import of module + from numpy.testing import IS_PYPY + if IS_PYPY: + pytest_args += ["-m", "not slow and not slow_pypy"] + else: + pytest_args += ["-m", "not slow"] + + elif label != "full": + pytest_args += ["-m", label] + + if durations >= 0: + pytest_args += ["--durations=%s" % durations] + + if tests is None: + tests = [self.module_name] + + pytest_args += ["--pyargs"] + list(tests) + + # run tests. + _show_numpy_info() + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return code == 0 diff --git a/MLPY/Lib/site-packages/numpy/_version.py b/MLPY/Lib/site-packages/numpy/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..cab0a12a608ad777b2c4e8886c2b7a68e179b3cb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/_version.py @@ -0,0 +1,21 @@ + +# This file was generated by 'versioneer.py' (0.19) from +# revision-control system data, or from the parent directory name of an +# unpacked source archive. Distribution tarballs contain a pre-generated copy +# of this file. + +import json + +version_json = ''' +{ + "date": "2021-08-15T12:15:47-0600", + "dirty": false, + "error": null, + "full-revisionid": "2fe48d2d98a85c8ea3f3d5caffd952ea69e99335", + "version": "1.21.2" +} +''' # END VERSION_JSON + + +def get_versions(): + return json.loads(version_json) diff --git a/MLPY/Lib/site-packages/numpy/char.pyi b/MLPY/Lib/site-packages/numpy/char.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2bd3d8d420710bafa04132511a97b6dbd308d4bc --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/char.pyi @@ -0,0 +1,59 @@ +from typing import Any, List + +from numpy import ( + chararray as chararray, +) + +__all__: List[str] + +def equal(x1, x2): ... +def not_equal(x1, x2): ... +def greater_equal(x1, x2): ... +def less_equal(x1, x2): ... +def greater(x1, x2): ... +def less(x1, x2): ... +def str_len(a): ... +def add(x1, x2): ... +def multiply(a, i): ... +def mod(a, values): ... +def capitalize(a): ... +def center(a, width, fillchar=...): ... +def count(a, sub, start=..., end=...): ... +def decode(a, encoding=..., errors=...): ... +def encode(a, encoding=..., errors=...): ... +def endswith(a, suffix, start=..., end=...): ... +def expandtabs(a, tabsize=...): ... +def find(a, sub, start=..., end=...): ... +def index(a, sub, start=..., end=...): ... +def isalnum(a): ... +def isalpha(a): ... +def isdigit(a): ... +def islower(a): ... +def isspace(a): ... +def istitle(a): ... +def isupper(a): ... +def join(sep, seq): ... +def ljust(a, width, fillchar=...): ... +def lower(a): ... +def lstrip(a, chars=...): ... +def partition(a, sep): ... +def replace(a, old, new, count=...): ... +def rfind(a, sub, start=..., end=...): ... +def rindex(a, sub, start=..., end=...): ... +def rjust(a, width, fillchar=...): ... +def rpartition(a, sep): ... +def rsplit(a, sep=..., maxsplit=...): ... +def rstrip(a, chars=...): ... +def split(a, sep=..., maxsplit=...): ... +def splitlines(a, keepends=...): ... +def startswith(a, prefix, start=..., end=...): ... +def strip(a, chars=...): ... +def swapcase(a): ... +def title(a): ... +def translate(a, table, deletechars=...): ... +def upper(a): ... +def zfill(a, width): ... +def isnumeric(a): ... +def isdecimal(a): ... +def array(obj, itemsize=..., copy=..., unicode=..., order=...): ... +def asarray(obj, itemsize=..., unicode=..., order=...): ... diff --git a/MLPY/Lib/site-packages/numpy/compat/__init__.py b/MLPY/Lib/site-packages/numpy/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0f02af49420beb2c18c0bd61b69e1796c56f9145 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/compat/__init__.py @@ -0,0 +1,18 @@ +""" +Compatibility module. + +This module contains duplicated code from Python itself or 3rd party +extensions, which may be included for the following reasons: + + * compatibility + * we may only need a small subset of the copied library/module + +""" +from . import _inspect +from . import py3k +from ._inspect import getargspec, formatargspec +from .py3k import * + +__all__ = [] +__all__.extend(_inspect.__all__) +__all__.extend(py3k.__all__) diff --git a/MLPY/Lib/site-packages/numpy/compat/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/compat/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10363d6385aebbdd6f1d6ed1f46a5b12adba073a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/compat/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/compat/__pycache__/_inspect.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/compat/__pycache__/_inspect.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..847508b9d5394442e9d913023f580c303d39153c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/compat/__pycache__/_inspect.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/compat/__pycache__/py3k.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/compat/__pycache__/py3k.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48cb78812cebc255221bc44140407bd0dee5e7ec Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/compat/__pycache__/py3k.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/compat/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/compat/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbc0bc57caea3c488af66620d0acd4bfe564c94a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/compat/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/compat/_inspect.py b/MLPY/Lib/site-packages/numpy/compat/_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..f8f68a676b3b4140c3a29f764b242a9c3d92fd97 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/compat/_inspect.py @@ -0,0 +1,191 @@ +"""Subset of inspect module from upstream python + +We use this instead of upstream because upstream inspect is slow to import, and +significantly contributes to numpy import times. Importing this copy has almost +no overhead. + +""" +import types + +__all__ = ['getargspec', 'formatargspec'] + +# ----------------------------------------------------------- type-checking +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + im_class class object in which this method belongs + im_func function object containing implementation of method + im_self instance to which this method is bound, or None + + """ + return isinstance(object, types.MethodType) + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + func_code code object containing compiled function bytecode + func_defaults tuple of any default values for arguments + func_doc (same as __doc__) + func_globals global namespace in which this function was defined + func_name (same as __name__) + + """ + return isinstance(object, types.FunctionType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + + """ + return isinstance(object, types.CodeType) + +# ------------------------------------------------ argument list extraction +# These constants are from Python's compile.h. +CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where 'args' is + a list of argument names (possibly containing nested lists), and + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + + """ + + if not iscode(co): + raise TypeError('arg is not a code object') + + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + + # The following acrobatics are for anonymous (tuple) arguments. + # Which we do not need to support, so remove to avoid importing + # the dis module. + for i in range(nargs): + if args[i][:1] in ['', '.']: + raise TypeError("tuple function arguments are not supported") + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, varkw + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('arg is not a Python function') + args, varargs, varkw = getargs(func.__code__) + return args, varargs, varkw, func.__defaults__ + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame. + + """ + args, varargs, varkw = getargs(frame.f_code) + return args, varargs, varkw, frame.f_locals + +def joinseq(seq): + if len(seq) == 1: + return '(' + seq[0] + ',)' + else: + return '(' + ', '.join(seq) + ')' + +def strseq(object, convert, join=joinseq): + """Recursively walk a sequence, stringifying each element. + + """ + if type(object) in [list, tuple]: + return join([strseq(_o, convert, join) for _o in object]) + else: + return convert(object) + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargspec. + + The first four arguments are (args, varargs, varkw, defaults). The + other four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i in range(len(args)): + spec = strseq(args[i], formatarg, join) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(varargs)) + if varkw is not None: + specs.append(formatvarkw(varkw)) + return '(' + ', '.join(specs) + ')' + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [strseq(arg, convert, join) for arg in args] + + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' diff --git a/MLPY/Lib/site-packages/numpy/compat/py3k.py b/MLPY/Lib/site-packages/numpy/compat/py3k.py new file mode 100644 index 0000000000000000000000000000000000000000..2b5991e4b826725930e45e8c7802c61739fcf360 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/compat/py3k.py @@ -0,0 +1,139 @@ +""" +Python 3.X compatibility tools. + +While this file was originally intended for Python 2 -> 3 transition, +it is now used to create a compatibility layer between different +minor versions of Python 3. + +While the active version of numpy may not support a given version of python, we +allow downstream libraries to continue to use these shims for forward +compatibility with numpy while they transition their code to newer versions of +Python. +""" +__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', + 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', + 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', + 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', + 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] + +import sys +import os +from pathlib import Path +import io + +import abc +from abc import ABC as abc_ABC + +try: + import pickle5 as pickle +except ImportError: + import pickle + +long = int +integer_types = (int,) +basestring = str +unicode = str +bytes = bytes + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') + +def asstr(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + +def isfileobj(f): + return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) + +def open_latin1(filename, mode='r'): + return open(filename, mode=mode, encoding='iso-8859-1') + +def sixu(s): + return s + +strchar = 'U' + +def getexception(): + return sys.exc_info()[1] + +def asbytes_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asbytes_nested(y) for y in x] + else: + return asbytes(x) + +def asunicode_nested(x): + if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): + return [asunicode_nested(y) for y in x] + else: + return asunicode(x) + +def is_pathlib_path(obj): + """ + Check whether obj is a `pathlib.Path` object. + + Prefer using ``isinstance(obj, os.PathLike)`` instead of this function. + """ + return isinstance(obj, Path) + +# from Python 3.7 +class contextlib_nullcontext: + """Context manager that does no additional processing. + + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True + + .. note:: + Prefer using `contextlib.nullcontext` instead of this context manager. + """ + + def __init__(self, enter_result=None): + self.enter_result = enter_result + + def __enter__(self): + return self.enter_result + + def __exit__(self, *excinfo): + pass + + +def npy_load_module(name, fn, info=None): + """ + Load a module. + + .. versionadded:: 1.11.2 + + Parameters + ---------- + name : str + Full module name. + fn : str + Path to module file. + info : tuple, optional + Only here for backward compatibility with Python 2.*. + + Returns + ------- + mod : module + + """ + # Explicitly lazy import this to avoid paying the cost + # of importing importlib at startup + from importlib.machinery import SourceFileLoader + return SourceFileLoader(name, fn).load_module() + + +os_fspath = os.fspath +os_PathLike = os.PathLike diff --git a/MLPY/Lib/site-packages/numpy/compat/setup.py b/MLPY/Lib/site-packages/numpy/compat/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..4c285180a295a7ad87f25dcba17d9097c675bc1c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/compat/setup.py @@ -0,0 +1,10 @@ +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('compat', parent_package, top_path) + config.add_subpackage('tests') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/MLPY/Lib/site-packages/numpy/compat/tests/__init__.py b/MLPY/Lib/site-packages/numpy/compat/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57d0b1f568f4c9cfc09654466dbd17d3f35c22c2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12fbcc67900dffe1432fc04066dfba0697ac8ce4 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/compat/tests/test_compat.py b/MLPY/Lib/site-packages/numpy/compat/tests/test_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..33c9dda5d4bd6eeb3a903aa39832e0cf8fb510fe --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/compat/tests/test_compat.py @@ -0,0 +1,19 @@ +from os.path import join + +from numpy.compat import isfileobj +from numpy.testing import assert_ +from numpy.testing import tempdir + + +def test_isfileobj(): + with tempdir(prefix="numpy_test_compat_") as folder: + filename = join(folder, 'a.bin') + + with open(filename, 'wb') as f: + assert_(isfileobj(f)) + + with open(filename, 'ab') as f: + assert_(isfileobj(f)) + + with open(filename, 'rb') as f: + assert_(isfileobj(f)) diff --git a/MLPY/Lib/site-packages/numpy/conftest.py b/MLPY/Lib/site-packages/numpy/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..7ce6a9fa805a1e699bb68a81f9045d701244ae2b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/conftest.py @@ -0,0 +1,119 @@ +""" +Pytest configuration and fixtures for the Numpy test suite. +""" +import os +import tempfile + +import hypothesis +import pytest +import numpy + +from numpy.core._multiarray_tests import get_fpu_mode + + +_old_fpu_mode = None +_collect_results = {} + +# Use a known and persistent tmpdir for hypothesis' caches, which +# can be automatically cleared by the OS or user. +hypothesis.configuration.set_hypothesis_home_dir( + os.path.join(tempfile.gettempdir(), ".hypothesis") +) + +# We register two custom profiles for Numpy - for details see +# https://hypothesis.readthedocs.io/en/latest/settings.html +# The first is designed for our own CI runs; the latter also +# forces determinism and is designed for use via np.test() +hypothesis.settings.register_profile( + name="numpy-profile", deadline=None, print_blob=True, +) +hypothesis.settings.register_profile( + name="np.test() profile", + deadline=None, print_blob=True, database=None, derandomize=True, + suppress_health_check=hypothesis.HealthCheck.all(), +) +# Note that the default profile is chosen based on the presence +# of pytest.ini, but can be overriden by passing the +# --hypothesis-profile=NAME argument to pytest. +_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini") +hypothesis.settings.load_profile( + "numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile" +) + + +def pytest_configure(config): + config.addinivalue_line("markers", + "valgrind_error: Tests that are known to error under valgrind.") + config.addinivalue_line("markers", + "leaks_references: Tests that are known to leak references.") + config.addinivalue_line("markers", + "slow: Tests that are very slow.") + config.addinivalue_line("markers", + "slow_pypy: Tests that are very slow on pypy.") + + +def pytest_addoption(parser): + parser.addoption("--available-memory", action="store", default=None, + help=("Set amount of memory available for running the " + "test suite. This can result to tests requiring " + "especially large amounts of memory to be skipped. " + "Equivalent to setting environment variable " + "NPY_AVAILABLE_MEM. Default: determined" + "automatically.")) + + +def pytest_sessionstart(session): + available_mem = session.config.getoption('available_memory') + if available_mem is not None: + os.environ['NPY_AVAILABLE_MEM'] = available_mem + + +#FIXME when yield tests are gone. +@pytest.hookimpl() +def pytest_itemcollected(item): + """ + Check FPU precision mode was not changed during test collection. + + The clumsy way we do it here is mainly necessary because numpy + still uses yield tests, which can execute code at test collection + time. + """ + global _old_fpu_mode + + mode = get_fpu_mode() + + if _old_fpu_mode is None: + _old_fpu_mode = mode + elif mode != _old_fpu_mode: + _collect_results[item] = (_old_fpu_mode, mode) + _old_fpu_mode = mode + + +@pytest.fixture(scope="function", autouse=True) +def check_fpu_mode(request): + """ + Check FPU precision mode was not changed during the test. + """ + old_mode = get_fpu_mode() + yield + new_mode = get_fpu_mode() + + if old_mode != new_mode: + raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" + " during the test".format(old_mode, new_mode)) + + collect_result = _collect_results.get(request.node) + if collect_result is not None: + old_mode, new_mode = collect_result + raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" + " when collecting the test".format(old_mode, + new_mode)) + + +@pytest.fixture(autouse=True) +def add_np(doctest_namespace): + doctest_namespace['np'] = numpy + +@pytest.fixture(autouse=True) +def env_setup(monkeypatch): + monkeypatch.setenv('PYTHONHASHSEED', '0') diff --git a/MLPY/Lib/site-packages/numpy/core/__init__.py b/MLPY/Lib/site-packages/numpy/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5962f745a990f3a3a91515f403c4c916f14ede70 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/__init__.py @@ -0,0 +1,166 @@ +""" +Contains the core of NumPy: ndarray, ufuncs, dtypes, etc. + +Please note that this module is private. All functions and objects +are available in the main ``numpy`` namespace - use that instead. + +""" + +from numpy.version import version as __version__ + +import os + +# disables OpenBLAS affinity setting of the main thread that limits +# python threads or processes to one core +env_added = [] +for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: + if envkey not in os.environ: + os.environ[envkey] = '1' + env_added.append(envkey) + +try: + from . import multiarray +except ImportError as exc: + import sys + msg = """ + +IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! + +Importing the numpy C-extensions failed. This error can happen for +many reasons, often due to issues with your setup or how NumPy was +installed. + +We have compiled some common reasons and troubleshooting tips at: + + https://numpy.org/devdocs/user/troubleshooting-importerror.html + +Please note and check the following: + + * The Python version is: Python%d.%d from "%s" + * The NumPy version is: "%s" + +and make sure that they are the versions you expect. +Please carefully study the documentation linked above for further help. + +Original error was: %s +""" % (sys.version_info[0], sys.version_info[1], sys.executable, + __version__, exc) + raise ImportError(msg) +finally: + for envkey in env_added: + del os.environ[envkey] +del envkey +del env_added +del os + +from . import umath + +# Check that multiarray,umath are pure python modules wrapping +# _multiarray_umath and not either of the old c-extension modules +if not (hasattr(multiarray, '_multiarray_umath') and + hasattr(umath, '_multiarray_umath')): + import sys + path = sys.modules['numpy'].__path__ + msg = ("Something is wrong with the numpy installation. " + "While importing we detected an older version of " + "numpy in {}. One method of fixing this is to repeatedly uninstall " + "numpy until none is found, then reinstall this version.") + raise ImportError(msg.format(path)) + +from . import numerictypes as nt +multiarray.set_typeDict(nt.sctypeDict) +from . import numeric +from .numeric import * +from . import fromnumeric +from .fromnumeric import * +from . import defchararray as char +from . import records as rec +from .records import record, recarray, format_parser +from .memmap import * +from .defchararray import chararray +from . import function_base +from .function_base import * +from . import machar +from .machar import * +from . import getlimits +from .getlimits import * +from . import shape_base +from .shape_base import * +from . import einsumfunc +from .einsumfunc import * +del nt + +from .fromnumeric import amax as max, amin as min, round_ as round +from .numeric import absolute as abs + +# do this after everything else, to minimize the chance of this misleadingly +# appearing in an import-time traceback +from . import _add_newdocs +from . import _add_newdocs_scalars +# add these for module-freeze analysis (like PyInstaller) +from . import _dtype_ctypes +from . import _internal +from . import _dtype +from . import _methods + +__all__ = ['char', 'rec', 'memmap'] +__all__ += numeric.__all__ +__all__ += fromnumeric.__all__ +__all__ += ['record', 'recarray', 'format_parser'] +__all__ += ['chararray'] +__all__ += function_base.__all__ +__all__ += machar.__all__ +__all__ += getlimits.__all__ +__all__ += shape_base.__all__ +__all__ += einsumfunc.__all__ + +# We used to use `np.core._ufunc_reconstruct` to unpickle. This is unnecessary, +# but old pickles saved before 1.20 will be using it, and there is no reason +# to break loading them. +def _ufunc_reconstruct(module, name): + # The `fromlist` kwarg is required to ensure that `mod` points to the + # inner-most module rather than the parent package when module name is + # nested. This makes it possible to pickle non-toplevel ufuncs such as + # scipy.special.expit for instance. + mod = __import__(module, fromlist=[name]) + return getattr(mod, name) + + +def _ufunc_reduce(func): + # Report the `__name__`. pickle will try to find the module. Note that + # pickle supports for this `__name__` to be a `__qualname__`. It may + # make sense to add a `__qualname__` to ufuncs, to allow this more + # explicitly (Numba has ufuncs as attributes). + # See also: https://github.com/dask/distributed/issues/3450 + return func.__name__ + + +def _DType_reconstruct(scalar_type): + # This is a work-around to pickle type(np.dtype(np.float64)), etc. + # and it should eventually be replaced with a better solution, e.g. when + # DTypes become HeapTypes. + return type(dtype(scalar_type)) + + +def _DType_reduce(DType): + # To pickle a DType without having to add top-level names, pickle the + # scalar type for now (and assume that reconstruction will be possible). + if DType is dtype: + return "dtype" # must pickle `np.dtype` as a singleton. + scalar_type = DType.type # pickle the scalar type for reconstruction + return _DType_reconstruct, (scalar_type,) + + +import copyreg + +copyreg.pickle(ufunc, _ufunc_reduce) +copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct) + +# Unclutter namespace (must keep _*_reconstruct for unpickling) +del copyreg +del _ufunc_reduce +del _DType_reduce + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/core/__init__.pyi b/MLPY/Lib/site-packages/numpy/core/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..72f24f32cf0d05de5858668b5dd915ab16d4e477 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/__init__.pyi @@ -0,0 +1,2 @@ +# NOTE: The `np.core` namespace is deliberately kept empty due to it +# being private (despite the lack of leading underscore) diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e207d15e8edcf36504fbe671b54c1003bd718655 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_add_newdocs.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_add_newdocs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88c332f6e79e04180fcc3c79a001a71f3ce54dfe Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_add_newdocs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_add_newdocs_scalars.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_add_newdocs_scalars.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01e39d1a662f94fd3c09aa9aa685b5750abb9c97 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_add_newdocs_scalars.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_asarray.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_asarray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13d5d2050dd7c4e834c20a1e28c2d58e94c37497 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_asarray.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_dtype.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_dtype.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..267a07ea542476aa31d68f2cdd22cb410c9183d6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_dtype.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6a4d6e9b10bc61c77a21596c006119c5b5c6d50 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_exceptions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_exceptions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a9fa98858072bc0b26386b7b4d9ac660a5383b3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_exceptions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_internal.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_internal.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..002115bd61b2057be3e3ed42ff07477719c4d842 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_internal.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_methods.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_methods.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37937ce37d83fbd77d32b0b68548ca0347745ecc Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_methods.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_string_helpers.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_string_helpers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc1ee4d64f127847d47d1274520dc7c4530254da Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_string_helpers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_type_aliases.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_type_aliases.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bf68f3299b151ac397acdc4eb20458f0cccd3c3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_type_aliases.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/_ufunc_config.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/_ufunc_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a9769c6c79506beee326706fb5ad2833a510158 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/_ufunc_config.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/arrayprint.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/arrayprint.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c921f808f2e8dc8f087c5b51c05155fba55db1d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/arrayprint.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/cversions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/cversions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c25c98e439e90f5a532572027f8cebb39ca05c5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/cversions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/defchararray.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/defchararray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a3a0a28d1e9c959dfef03475c7375f2ba362ed5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/defchararray.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/einsumfunc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/einsumfunc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59d5150dd43aa80b3458732111bbd3f89618dc83 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/einsumfunc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/fromnumeric.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/fromnumeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d5f97220bfc7f1b46151578faa7c2fa92c0dbcf Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/fromnumeric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/function_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/function_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05ac0bcfbde4b11d27cee66e93ec2ab58be9bdfc Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/function_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/generate_numpy_api.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/generate_numpy_api.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96ab06a0297207f9ff8d8c417b4a6d6ce3d43b52 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/generate_numpy_api.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/getlimits.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/getlimits.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14370c0ac24dd0e2f031b8179c49adce4748d45d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/getlimits.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/machar.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/machar.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fc41f8a407502256bfd58c094daf82b82526e73 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/machar.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/memmap.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/memmap.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32ae818ff37fe09631f19857eae51e45ba6f1dfa Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/memmap.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/multiarray.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/multiarray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a3b48ed89cc814a6ce2dd4aff7cf424874ce786 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/multiarray.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/numeric.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/numeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6464ac99d2da5d9cbaf7885a04f11bba2e68c0da Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/numeric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/numerictypes.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/numerictypes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7f4505d6754cc025d4b2456d6af449b7f2223d0 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/numerictypes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/overrides.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/overrides.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae3a5c108a5d41daf723adeed8b8d509f168db6b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/overrides.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/records.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/records.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e05132617532622b802b06541899114e00a3893 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/records.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b732fc61731d346aa6deaac7eeb9d7d11ce5f0ad Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/setup_common.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/setup_common.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b81d93bdec45e3252876a25f380cef4984a6b342 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/setup_common.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/shape_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/shape_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88f2ba1d236b46b4bb63f5d068fceac00d548b58 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/shape_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/umath.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/umath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b8aa21e78fc567b56a1d26947d831f9c822dd70 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/umath.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/__pycache__/umath_tests.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/__pycache__/umath_tests.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2c2ef8b9bffbf7723b911a66dd0b4d284183ddb Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/__pycache__/umath_tests.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/_add_newdocs.py b/MLPY/Lib/site-packages/numpy/core/_add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..b0557829e78d9c094bc091b7abc73dffdaa2510f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_add_newdocs.py @@ -0,0 +1,6530 @@ +""" +This is only meant to add docs to objects defined in C-extension modules. +The purpose is to allow easier editing of the docstrings without +requiring a re-compile. + +NOTE: Many of the methods of ndarray have corresponding functions. + If you update these docstrings, please keep also the ones in + core/fromnumeric.py, core/defmatrix.py up-to-date. + +""" + +from numpy.core.function_base import add_newdoc +from numpy.core.overrides import array_function_like_doc + +############################################################################### +# +# flatiter +# +# flatiter needs a toplevel description +# +############################################################################### + +add_newdoc('numpy.core', 'flatiter', + """ + Flat iterator object to iterate over arrays. + + A `flatiter` iterator is returned by ``x.flat`` for any array `x`. + It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in row-major, C-style order (the last + index varying the fastest). The iterator can also be indexed using + basic slicing or advanced indexing. + + See Also + -------- + ndarray.flat : Return a flat iterator over an array. + ndarray.flatten : Returns a flattened copy of an array. + + Notes + ----- + A `flatiter` iterator can not be constructed directly from Python code + by calling the `flatiter` constructor. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + >>> fl[2:4] + array([2, 3]) + + """) + +# flatiter attributes + +add_newdoc('numpy.core', 'flatiter', ('base', + """ + A reference to the array that is iterated over. + + Examples + -------- + >>> x = np.arange(5) + >>> fl = x.flat + >>> fl.base is x + True + + """)) + + + +add_newdoc('numpy.core', 'flatiter', ('coords', + """ + An N-dimensional tuple of current coordinates. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.coords + (0, 0) + >>> next(fl) + 0 + >>> fl.coords + (0, 1) + + """)) + + + +add_newdoc('numpy.core', 'flatiter', ('index', + """ + Current flat index into the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.index + 0 + >>> next(fl) + 0 + >>> fl.index + 1 + + """)) + +# flatiter functions + +add_newdoc('numpy.core', 'flatiter', ('__array__', + """__array__(type=None) Get array from iterator + + """)) + + +add_newdoc('numpy.core', 'flatiter', ('copy', + """ + copy() + + Get a copy of the iterator as a 1-D array. + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> fl = x.flat + >>> fl.copy() + array([0, 1, 2, 3, 4, 5]) + + """)) + + +############################################################################### +# +# nditer +# +############################################################################### + +add_newdoc('numpy.core', 'nditer', + """ + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0) + + Efficient multi-dimensional iterator object to iterate over arrays. + To get started using this object, see the + :ref:`introductory guide to array iteration `. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + + flags : sequence of str, optional + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. + op_flags : list of list of str, optional + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. + op_dtypes : dtype or tuple of dtype(s), optional + The required data type(s) of the operands. If copying or buffering + is enabled, the data will be converted to/from their original types. + order : {'C', 'F', 'A', 'K'}, optional + Controls the iteration order. 'C' means C order, 'F' means + Fortran order, 'A' means 'F' order if all the arrays are Fortran + contiguous, 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. This also + affects the element memory order of ``allocate`` operands, as they + are allocated to be compatible with iteration order. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy + or buffering. Setting this to 'unsafe' is not recommended, + as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + op_axes : list of list of ints, optional + If provided, is a list of ints or None for each operands. + The list of axes for an operand is a mapping from the dimensions + of the iterator to the dimensions of the operand. A value of + -1 can be placed for entries, causing that dimension to be + treated as `newaxis`. + itershape : tuple of ints, optional + The desired shape of the iterator. This allows ``allocate`` operands + with a dimension mapped by op_axes not corresponding to a dimension + of a different operand to get a value not equal to 1 for that + dimension. + buffersize : int, optional + When buffering is enabled, controls the size of the temporary + buffers. Set to 0 for the default value. + + Attributes + ---------- + dtypes : tuple of dtype(s) + The data types of the values provided in `value`. This may be + different from the operand data types if buffering is enabled. + Valid only before the iterator is closed. + finished : bool + Whether the iteration over the operands is finished or not. + has_delayed_bufalloc : bool + If True, the iterator was created with the ``delay_bufalloc`` flag, + and no reset() function was called on it yet. + has_index : bool + If True, the iterator was created with either the ``c_index`` or + the ``f_index`` flag, and the property `index` can be used to + retrieve it. + has_multi_index : bool + If True, the iterator was created with the ``multi_index`` flag, + and the property `multi_index` can be used to retrieve it. + index + When the ``c_index`` or ``f_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + and ``has_index`` is False. + iterationneedsapi : bool + Whether iteration requires access to the Python API, for example + if one of the operands is an object array. + iterindex : int + An index which matches the order of iteration. + itersize : int + Size of the iterator. + itviews + Structured view(s) of `operands` in memory, matching the reordered + and optimized iterator access pattern. Valid only before the iterator + is closed. + multi_index + When the ``multi_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + accessed and ``has_multi_index`` is False. + ndim : int + The dimensions of the iterator. + nop : int + The number of iterator operands. + operands : tuple of operand(s) + The array(s) to be iterated over. Valid only before the iterator is + closed. + shape : tuple of ints + Shape tuple, the shape of the iterator. + value + Value of ``operands`` at current iteration. Normally, this is a + tuple of array scalars, but if the flag ``external_loop`` is used, + it is a tuple of one dimensional arrays. + + Notes + ----- + `nditer` supersedes `flatiter`. The iterator implementation behind + `nditer` is also exposed by the NumPy C API. + + The Python exposure supplies two iteration interfaces, one which follows + the Python iterator protocol, and another which mirrors the C-style + do-while pattern. The native Python approach is better in most cases, but + if you need the coordinates or index of an iterator, use the C-style pattern. + + Examples + -------- + Here is how we might write an ``iter_add`` function, using the + Python iterator protocol: + + >>> def iter_add_py(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... for (a, b, c) in it: + ... addop(a, b, out=c) + ... return it.operands[2] + + Here is the same function, but following the C-style pattern: + + >>> def iter_add(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... while not it.finished: + ... addop(it[0], it[1], out=it[2]) + ... it.iternext() + ... return it.operands[2] + + Here is an example outer product function: + + >>> def outer_it(x, y, out=None): + ... mulop = np.multiply + ... it = np.nditer([x, y, out], ['external_loop'], + ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], + ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, + ... [-1] * x.ndim + list(range(y.ndim)), + ... None]) + ... with it: + ... for (a, b, c) in it: + ... mulop(a, b, out=c) + ... return it.operands[2] + + >>> a = np.arange(2)+1 + >>> b = np.arange(3)+1 + >>> outer_it(a,b) + array([[1, 2, 3], + [2, 4, 6]]) + + Here is an example function which operates like a "lambda" ufunc: + + >>> def luf(lamdaexpr, *args, **kwargs): + ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' + ... nargs = len(args) + ... op = (kwargs.get('out',None),) + args + ... it = np.nditer(op, ['buffered','external_loop'], + ... [['writeonly','allocate','no_broadcast']] + + ... [['readonly','nbo','aligned']]*nargs, + ... order=kwargs.get('order','K'), + ... casting=kwargs.get('casting','safe'), + ... buffersize=kwargs.get('buffersize',0)) + ... while not it.finished: + ... it[0] = lamdaexpr(*it[1:]) + ... it.iternext() + ... return it.operands[0] + + >>> a = np.arange(5) + >>> b = np.ones(5) + >>> luf(lambda i,j:i*i + j/2, a, b) + array([ 0.5, 1.5, 4.5, 9.5, 16.5]) + + If operand flags `"writeonly"` or `"readwrite"` are used the + operands may be views into the original data with the + `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a + context manager or the `nditer.close` method must be called before + using the result. The temporary data will be written back to the + original data when the `__exit__` function is called but not before: + + >>> a = np.arange(6, dtype='i4')[::-2] + >>> with np.nditer(a, [], + ... [['writeonly', 'updateifcopy']], + ... casting='unsafe', + ... op_dtypes=[np.dtype('f4')]) as i: + ... x = i.operands[0] + ... x[:] = [-1, -2, -3] + ... # a still unchanged here + >>> a, x + (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) + + It is important to note that once the iterator is exited, dangling + references (like `x` in the example) may or may not share data with + the original data `a`. If writeback semantics were active, i.e. if + `x.base.flags.writebackifcopy` is `True`, then exiting the iterator + will sever the connection between `x` and `a`, writing to `x` will + no longer write to `a`. If writeback semantics are not active, then + `x.data` will still point at some part of `a.data`, and writing to + one will affect the other. + + Context management and the `close` method appeared in version 1.15.0. + + """) + +# nditer methods + +add_newdoc('numpy.core', 'nditer', ('copy', + """ + copy() + + Get a copy of the iterator in its current state. + + Examples + -------- + >>> x = np.arange(10) + >>> y = x + 1 + >>> it = np.nditer([x, y]) + >>> next(it) + (array(0), array(1)) + >>> it2 = it.copy() + >>> next(it2) + (array(1), array(2)) + + """)) + +add_newdoc('numpy.core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + +add_newdoc('numpy.core', 'nditer', ('debug_print', + """ + debug_print() + + Print the current state of the `nditer` instance and debug info to stdout. + + """)) + +add_newdoc('numpy.core', 'nditer', ('enable_external_loop', + """ + enable_external_loop() + + When the "external_loop" was not used during construction, but + is desired, this modifies the iterator to behave as if the flag + was specified. + + """)) + +add_newdoc('numpy.core', 'nditer', ('iternext', + """ + iternext() + + Check whether iterations are left, and perform a single internal iteration + without returning the result. Used in the C-style pattern do-while + pattern. For an example, see `nditer`. + + Returns + ------- + iternext : bool + Whether or not there are iterations left. + + """)) + +add_newdoc('numpy.core', 'nditer', ('remove_axis', + """ + remove_axis(i) + + Removes axis `i` from the iterator. Requires that the flag "multi_index" + be enabled. + + """)) + +add_newdoc('numpy.core', 'nditer', ('remove_multi_index', + """ + remove_multi_index() + + When the "multi_index" flag was specified, this removes it, allowing + the internal iteration structure to be optimized further. + + """)) + +add_newdoc('numpy.core', 'nditer', ('reset', + """ + reset() + + Reset the iterator to its initial state. + + """)) + +add_newdoc('numpy.core', 'nested_iters', + """ + Create nditers for use in nested loops + + Create a tuple of `nditer` objects which iterate in nested loops over + different axes of the op argument. The first iterator is used in the + outermost loop, the last in the innermost loop. Advancing one will change + the subsequent iterators to point at its new element. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + + axes : list of list of int + Each item is used as an "op_axes" argument to an nditer + + flags, op_flags, op_dtypes, order, casting, buffersize (optional) + See `nditer` parameters of the same name + + Returns + ------- + iters : tuple of nditer + An nditer for each item in `axes`, outermost first + + See Also + -------- + nditer + + Examples + -------- + + Basic usage. Note how y is the "flattened" version of + [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified + the first iter's axes as [1] + + >>> a = np.arange(12).reshape(2, 3, 2) + >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) + >>> for x in i: + ... print(i.multi_index) + ... for y in j: + ... print('', j.multi_index, y) + (0,) + (0, 0) 0 + (0, 1) 1 + (1, 0) 6 + (1, 1) 7 + (1,) + (0, 0) 2 + (0, 1) 3 + (1, 0) 8 + (1, 1) 9 + (2,) + (0, 0) 4 + (0, 1) 5 + (1, 0) 10 + (1, 1) 11 + + """) + +add_newdoc('numpy.core', 'nditer', ('close', + """ + close() + + Resolve all writeback semantics in writeable operands. + + .. versionadded:: 1.15.0 + + See Also + -------- + + :ref:`nditer-context-manager` + + """)) + + +############################################################################### +# +# broadcast +# +############################################################################### + +add_newdoc('numpy.core', 'broadcast', + """ + Produce an object that mimics broadcasting. + + Parameters + ---------- + in1, in2, ... : array_like + Input parameters. + + Returns + ------- + b : broadcast object + Broadcast the input parameters against one another, and + return an object that encapsulates the result. + Amongst others, it has ``shape`` and ``nd`` properties, and + may be used as an iterator. + + See Also + -------- + broadcast_arrays + broadcast_to + broadcast_shapes + + Examples + -------- + + Manually adding two vectors, using broadcasting: + + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + + >>> out = np.empty(b.shape) + >>> out.flat = [u+v for (u,v) in b] + >>> out + array([[5., 6., 7.], + [6., 7., 8.], + [7., 8., 9.]]) + + Compare against built-in broadcasting: + + >>> x + y + array([[5, 6, 7], + [6, 7, 8], + [7, 8, 9]]) + + """) + +# attributes + +add_newdoc('numpy.core', 'broadcast', ('index', + """ + current index in broadcasted result + + Examples + -------- + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (1, 5), (1, 6)) + >>> b.index + 3 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('iters', + """ + tuple of iterators along ``self``'s "components." + + Returns a tuple of `numpy.flatiter` objects, one for each "component" + of ``self``. + + See Also + -------- + numpy.flatiter + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> row, col = b.iters + >>> next(row), next(col) + (1, 4) + + """)) + +add_newdoc('numpy.core', 'broadcast', ('ndim', + """ + Number of dimensions of broadcasted result. Alias for `nd`. + + .. versionadded:: 1.12.0 + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.ndim + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('nd', + """ + Number of dimensions of broadcasted result. For code intended for NumPy + 1.12.0 and later the more consistent `ndim` is preferred. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.nd + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('numiter', + """ + Number of iterators possessed by the broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.numiter + 2 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('shape', + """ + Shape of broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.shape + (3, 3) + + """)) + +add_newdoc('numpy.core', 'broadcast', ('size', + """ + Total size of broadcasted result. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.size + 9 + + """)) + +add_newdoc('numpy.core', 'broadcast', ('reset', + """ + reset() + + Reset the broadcasted result's iterator(s). + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (2, 4), (3, 4)) + >>> b.index + 3 + >>> b.reset() + >>> b.index + 0 + + """)) + +############################################################################### +# +# numpy functions +# +############################################################################### + +add_newdoc('numpy.core.multiarray', 'array', + """ + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, + like=None) + + Create an array. + + Parameters + ---------- + object : array_like + An array, any object exposing the array interface, an object whose + __array__ method returns an array, or any (nested) sequence. + dtype : data-type, optional + The desired data-type for the array. If not given, then the type will + be determined as the minimum type required to hold the objects in the + sequence. + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy will + only be made if __array__ returns a copy, if obj is a nested sequence, + or if a copy is needed to satisfy any of the other requirements + (`dtype`, `order`, etc.). + order : {'K', 'A', 'C', 'F'}, optional + Specify the memory layout of the array. If object is not an array, the + newly created array will be in C order (row major) unless 'F' is + specified, in which case it will be in Fortran order (column major). + If object is an array the following holds. + + ===== ========= =================================================== + order no copy copy=True + ===== ========= =================================================== + 'K' unchanged F & C order preserved, otherwise most similar order + 'A' unchanged F order if input is F and not C, otherwise C order + 'C' C order C order + 'F' F order F order + ===== ========= =================================================== + + When ``copy=False`` and a copy is made for other reasons, the result is + the same as if ``copy=True``, with some exceptions for 'A', see the + Notes section. The default order is 'K'. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting + array should have. Ones will be pre-pended to the shape as + needed to meet this requirement. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + An array object satisfying the specified requirements. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + + Notes + ----- + When order is 'A' and `object` is an array in neither 'C' nor 'F' order, + and a copy is forced by a change in dtype, then the order of the result is + not necessarily 'C' as expected. This is likely a bug. + + Examples + -------- + >>> np.array([1, 2, 3]) + array([1, 2, 3]) + + Upcasting: + + >>> np.array([1, 2, 3.0]) + array([ 1., 2., 3.]) + + More than one dimension: + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + Minimum dimensions 2: + + >>> np.array([1, 2, 3], ndmin=2) + array([[1, 2, 3]]) + + Type provided: + + >>> np.array([1, 2, 3], dtype=complex) + array([ 1.+0.j, 2.+0.j, 3.+0.j]) + + Data-type consisting of more than one element: + + >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] + array([1, 3]) + + Creating an array from sub-classes: + + >>> np.array(np.mat('1 2; 3 4')) + array([[1, 2], + [3, 4]]) + + >>> np.array(np.mat('1 2; 3 4'), subok=True) + matrix([[1, 2], + [3, 4]]) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'asarray', + """ + asarray(a, dtype=None, order=None, *, like=None) + + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray with matching dtype and order. If `a` is a + subclass of ndarray, a base class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.asarray(a, dtype=np.float32) is a + True + >>> np.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.recarray, np.ndarray) + True + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'asanyarray', + """ + asanyarray(a, dtype=None, order=None, *, like=None) + + Convert the input to an ndarray, but pass ndarray subclasses through. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes scalars, lists, lists of tuples, tuples, tuples of tuples, + tuples of lists, and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray or an ndarray subclass + Array interpretation of `a`. If `a` is an ndarray or a subclass + of ndarray, it is returned as-is and no copy is performed. + + See Also + -------- + asarray : Similar function which always returns ndarrays. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and + Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> np.asanyarray(a) + array([1, 2]) + + Instances of `ndarray` subclasses are passed through as-is: + + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) + >>> np.asanyarray(a) is a + True + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'ascontiguousarray', + """ + ascontiguousarray(a, dtype=None, *, like=None) + + Return a contiguous array (ndim >= 1) in memory (C order). + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + Data-type of returned array. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Contiguous array of same shape and content as `a`, with type `dtype` + if specified. + + See Also + -------- + asfortranarray : Convert input to an ndarray with column-major + memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> np.ascontiguousarray(x, dtype=np.float32) + array([[0., 1., 2.], + [3., 4., 5.]], dtype=float32) + >>> x.flags['C_CONTIGUOUS'] + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'asfortranarray', + """ + asfortranarray(a, dtype=None, *, like=None) + + Return an array (ndim >= 1) laid out in Fortran order in memory. + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + By default, the data-type is inferred from the input data. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The input `a` in Fortran, or column-major, order. + + See Also + -------- + ascontiguousarray : Convert input to a contiguous (C order) array. + asanyarray : Convert input to an ndarray with either row or + column-major memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> y = np.asfortranarray(x) + >>> x.flags['F_CONTIGUOUS'] + False + >>> y.flags['F_CONTIGUOUS'] + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'empty', + """ + empty(shape, dtype=float, order='C', *, like=None) + + Return a new array of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + Desired output data-type for the array, e.g, `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data of the given shape, dtype, and + order. Object arrays will be initialized to None. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + + Notes + ----- + `empty`, unlike `zeros`, does not set the array values to zero, + and may therefore be marginally faster. On the other hand, it requires + the user to manually set all the values in the array, and should be + used with caution. + + Examples + -------- + >>> np.empty([2, 2]) + array([[ -9.74499359e+001, 6.69583040e-309], + [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized + + >>> np.empty([2, 2], dtype=int) + array([[-1073741821, -1067949133], + [ 496041986, 19249760]]) #uninitialized + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'scalar', + """ + scalar(dtype, obj) + + Return a new scalar array of the given type initialized with obj. + + This function is meant mainly for pickle support. `dtype` must be a + valid data-type descriptor. If `dtype` corresponds to an object + descriptor, then `obj` can be any object, otherwise `obj` must be a + string. If `obj` is not given, it will be interpreted as None for object + type and as zeros for all other types. + + """) + +add_newdoc('numpy.core.multiarray', 'zeros', + """ + zeros(shape, dtype=float, order='C', *, like=None) + + Return a new array of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of zeros with the given shape, dtype, and order. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> np.zeros(5) + array([ 0., 0., 0., 0., 0.]) + + >>> np.zeros((5,), dtype=int) + array([0, 0, 0, 0, 0]) + + >>> np.zeros((2, 1)) + array([[ 0.], + [ 0.]]) + + >>> s = (2,2) + >>> np.zeros(s) + array([[ 0., 0.], + [ 0., 0.]]) + + >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype + array([(0, 0), (0, 0)], + dtype=[('x', '>> np.fromstring('1 2', dtype=int, sep=' ') + array([1, 2]) + >>> np.fromstring('1, 2', dtype=int, sep=',') + array([1, 2]) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'compare_chararrays', + """ + compare_chararrays(a, b, cmp_op, rstrip) + + Performs element-wise comparison of two string arrays using the + comparison operator specified by `cmp_op`. + + Parameters + ---------- + a, b : array_like + Arrays to be compared. + cmp_op : {"<", "<=", "==", ">=", ">", "!="} + Type of comparison. + rstrip : Boolean + If True, the spaces at the end of Strings are removed before the comparison. + + Returns + ------- + out : ndarray + The output array of type Boolean with the same shape as a and b. + + Raises + ------ + ValueError + If `cmp_op` is not valid. + TypeError + If at least one of `a` or `b` is a non-string array + + Examples + -------- + >>> a = np.array(["a", "b", "cde"]) + >>> b = np.array(["a", "a", "dec"]) + >>> np.compare_chararrays(a, b, ">", True) + array([False, True, False]) + + """) + +add_newdoc('numpy.core.multiarray', 'fromiter', + """ + fromiter(iter, dtype, count=-1, *, like=None) + + Create a new 1-dimensional array from an iterable object. + + Parameters + ---------- + iter : iterable object + An iterable object providing data for the array. + dtype : data-type + The data-type of the returned array. + count : int, optional + The number of items to read from *iterable*. The default is -1, + which means all data is read. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The output array. + + Notes + ----- + Specify `count` to improve performance. It allows ``fromiter`` to + pre-allocate the output array, instead of resizing it on demand. + + Examples + -------- + >>> iterable = (x*x for x in range(5)) + >>> np.fromiter(iterable, float) + array([ 0., 1., 4., 9., 16.]) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', 'fromfile', + """ + fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + + Construct an array from data in a text or binary file. + + A highly efficient way of reading binary data with a known data-type, + as well as parsing simply formatted text files. Data written using the + `tofile` method can be read using this function. + + Parameters + ---------- + file : file or str or Path + Open file object or filename. + + .. versionchanged:: 1.17.0 + `pathlib.Path` objects are now accepted. + + dtype : data-type + Data type of the returned array. + For binary files, it is used to determine the size and byte-order + of the items in the file. + Most builtin numeric types are supported and extension types may be supported. + + .. versionadded:: 1.18.0 + Complex dtypes. + + count : int + Number of items to read. ``-1`` means all items (i.e., the complete + file). + sep : str + Separator between items if file is a text file. + Empty ("") separator means the file should be treated as binary. + Spaces (" ") in the separator match zero or more whitespace characters. + A separator consisting only of spaces must match at least one + whitespace. + offset : int + The offset (in bytes) from the file's current position. Defaults to 0. + Only permitted for binary files. + + .. versionadded:: 1.17.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + See also + -------- + load, save + ndarray.tofile + loadtxt : More flexible way of loading data from a text file. + + Notes + ----- + Do not rely on the combination of `tofile` and `fromfile` for + data storage, as the binary files generated are not platform + independent. In particular, no byte-order or data-type information is + saved. Data can be stored in the platform independent ``.npy`` format + using `save` and `load` instead. + + Examples + -------- + Construct an ndarray: + + >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), + ... ('temp', float)]) + >>> x = np.zeros((1,), dtype=dt) + >>> x['time']['min'] = 10; x['temp'] = 98.25 + >>> x + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> import tempfile + >>> fname = tempfile.mkstemp()[1] + >>> x.tofile(fname) + + Read the raw data from disk: + + >>> np.fromfile(fname, dtype=dt) + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> np.save(fname, x) + >>> np.load(fname + '.npy') + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> dt = np.dtype(int) + >>> dt = dt.newbyteorder('>') + >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP + + The data of the resulting array will not be byteswapped, but will be + interpreted correctly. + + Examples + -------- + >>> s = b'hello world' + >>> np.frombuffer(s, dtype='S1', count=5, offset=6) + array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') + + >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) + array([1, 2], dtype=uint8) + >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) + array([1, 2, 3], dtype=uint8) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core', 'fastCopyAndTranspose', + """_fastCopyAndTranspose(a)""") + +add_newdoc('numpy.core.multiarray', 'correlate', + """cross_correlate(a,v, mode=0)""") + +add_newdoc('numpy.core.multiarray', 'arange', + """ + arange([start,] stop[, step,], dtype=None, *, like=None) + + Return evenly spaced values within a given interval. + + Values are generated within the half-open interval ``[start, stop)`` + (in other words, the interval including `start` but excluding `stop`). + For integer arguments the function is equivalent to the Python built-in + `range` function, but returns an ndarray rather than a list. + + When using a non-integer step, such as 0.1, the results will often not + be consistent. It is better to use `numpy.linspace` for these cases. + + Parameters + ---------- + start : integer or real, optional + Start of interval. The interval includes this value. The default + start value is 0. + stop : integer or real + End of interval. The interval does not include this value, except + in some cases where `step` is not an integer and floating point + round-off affects the length of `out`. + step : integer or real, optional + Spacing between values. For any output `out`, this is the distance + between two adjacent values, ``out[i+1] - out[i]``. The default + step size is 1. If `step` is specified as a position argument, + `start` must also be given. + dtype : dtype + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + arange : ndarray + Array of evenly spaced values. + + For floating point arguments, the length of the result is + ``ceil((stop - start)/step)``. Because of floating point overflow, + this rule may result in the last element of `out` being greater + than `stop`. + + See Also + -------- + numpy.linspace : Evenly spaced numbers with careful handling of endpoints. + numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions. + numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. + + Examples + -------- + >>> np.arange(3) + array([0, 1, 2]) + >>> np.arange(3.0) + array([ 0., 1., 2.]) + >>> np.arange(3,7) + array([3, 4, 5, 6]) + >>> np.arange(3,7,2) + array([3, 5]) + + """.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + )) + +add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', + """_get_ndarray_c_version() + + Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number. + + """) + +add_newdoc('numpy.core.multiarray', '_reconstruct', + """_reconstruct(subtype, shape, dtype) + + Construct an empty array. Used by Pickles. + + """) + + +add_newdoc('numpy.core.multiarray', 'set_string_function', + """ + set_string_function(f, repr=1) + + Internal method to set a function to be used when pretty printing arrays. + + """) + +add_newdoc('numpy.core.multiarray', 'set_numeric_ops', + """ + set_numeric_ops(op1=func1, op2=func2, ...) + + Set numerical operators for array objects. + + .. deprecated:: 1.16 + + For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`. + For ndarray subclasses, define the ``__array_ufunc__`` method and + override the relevant ufunc. + + Parameters + ---------- + op1, op2, ... : callable + Each ``op = func`` pair describes an operator to be replaced. + For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace + addition by modulus 5 addition. + + Returns + ------- + saved_ops : list of callables + A list of all operators, stored before making replacements. + + Notes + ----- + .. WARNING:: + Use with care! Incorrect usage may lead to memory errors. + + A function replacing an operator cannot make use of that operator. + For example, when replacing add, you may not use ``+``. Instead, + directly call ufuncs. + + Examples + -------- + >>> def add_mod5(x, y): + ... return np.add(x, y) % 5 + ... + >>> old_funcs = np.set_numeric_ops(add=add_mod5) + + >>> x = np.arange(12).reshape((3, 4)) + >>> x + x + array([[0, 2, 4, 1], + [3, 0, 2, 4], + [1, 3, 0, 2]]) + + >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators + + """) + +add_newdoc('numpy.core.multiarray', 'promote_types', + """ + promote_types(type1, type2) + + Returns the data type with the smallest size and smallest scalar + kind to which both ``type1`` and ``type2`` may be safely cast. + The returned data type is always in native byte order. + + This function is symmetric, but rarely associative. + + Parameters + ---------- + type1 : dtype or dtype specifier + First data type. + type2 : dtype or dtype specifier + Second data type. + + Returns + ------- + out : dtype + The promoted data type. + + Notes + ----- + .. versionadded:: 1.6.0 + + Starting in NumPy 1.9, promote_types function now returns a valid string + length when given an integer or float dtype as one argument and a string + dtype as another argument. Previously it always returned the input string + dtype, even if it wasn't long enough to store the max integer/float value + converted to a string. + + See Also + -------- + result_type, dtype, can_cast + + Examples + -------- + >>> np.promote_types('f4', 'f8') + dtype('float64') + + >>> np.promote_types('i8', 'f4') + dtype('float64') + + >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + dtype('S11') + + An example of a non-associative case: + + >>> p = np.promote_types + >>> p('S', p('i1', 'u1')) + dtype('S6') + >>> p(p('S', 'i1'), 'u1') + dtype('S4') + + """) + +add_newdoc('numpy.core.multiarray', 'c_einsum', + """ + c_einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe') + + *This documentation shadows that of the native python implementation of the `einsum` function, + except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout of the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + + Notes + ----- + .. versionadded:: 1.6.0 + + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` produces a + view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` + describes traditional matrix multiplication and is equivalent to + :py:func:`np.matmul(a,b) `. Repeated subscript labels in one + operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent + to :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `, + and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts + and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + .. versionadded:: 1.10.0 + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + """) + + +############################################################################## +# +# Documentation for ndarray attributes and methods +# +############################################################################## + + +############################################################################## +# +# ndarray object +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', + """ + ndarray(shape, dtype=float, buffer=None, offset=0, + strides=None, order=None) + + An array object represents a multidimensional, homogeneous array + of fixed-size items. An associated data-type object describes the + format of each element in the array (its byte-order, how many bytes it + occupies in memory, whether it is an integer, a floating point number, + or something else, etc.) + + Arrays should be constructed using `array`, `zeros` or `empty` (refer + to the See Also section below). The parameters given here refer to + a low-level method (`ndarray(...)`) for instantiating an array. + + For more information, refer to the `numpy` module and examine the + methods and attributes of an array. + + Parameters + ---------- + (for the __new__ method; see Notes below) + + shape : tuple of ints + Shape of created array. + dtype : data-type, optional + Any object that can be interpreted as a numpy data type. + buffer : object exposing buffer interface, optional + Used to fill the array with data. + offset : int, optional + Offset of array data in buffer. + strides : tuple of ints, optional + Strides of data in memory. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Attributes + ---------- + T : ndarray + Transpose of the array. + data : buffer + The array's elements, in memory. + dtype : dtype object + Describes the format of the elements in the array. + flags : dict + Dictionary containing information related to memory use, e.g., + 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. + flat : numpy.flatiter object + Flattened version of the array as an iterator. The iterator + allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for + assignment examples; TODO). + imag : ndarray + Imaginary part of the array. + real : ndarray + Real part of the array. + size : int + Number of elements in the array. + itemsize : int + The memory use of each array element in bytes. + nbytes : int + The total number of bytes required to store the array data, + i.e., ``itemsize * size``. + ndim : int + The array's number of dimensions. + shape : tuple of ints + Shape of the array. + strides : tuple of ints + The step-size required to move from one element to the next in + memory. For example, a contiguous ``(3, 4)`` array of type + ``int16`` in C-order has strides ``(8, 2)``. This implies that + to move from element to element in memory requires jumps of 2 bytes. + To move from row-to-row, one needs to jump 8 bytes at a time + (``2 * 4``). + ctypes : ctypes object + Class containing properties of the array needed for interaction + with ctypes. + base : ndarray + If the array is a view into another array, that array is its `base` + (unless that array is also a view). The `base` array is where the + array data is actually stored. + + See Also + -------- + array : Construct an array. + zeros : Create an array, each element of which is zero. + empty : Create an array, but leave its allocated memory unchanged (i.e., + it contains "garbage"). + dtype : Create a data-type. + numpy.typing.NDArray : A :term:`generic ` version + of ndarray. + + Notes + ----- + There are two modes of creating an array using ``__new__``: + + 1. If `buffer` is None, then only `shape`, `dtype`, and `order` + are used. + 2. If `buffer` is an object exposing the buffer interface, then + all keywords are interpreted. + + No ``__init__`` method is needed because the array is fully initialized + after the ``__new__`` method. + + Examples + -------- + These examples illustrate the low-level `ndarray` constructor. Refer + to the `See Also` section above for easier ways of constructing an + ndarray. + + First mode, `buffer` is None: + + >>> np.ndarray(shape=(2,2), dtype=float, order='F') + array([[0.0e+000, 0.0e+000], # random + [ nan, 2.5e-323]]) + + Second mode: + + >>> np.ndarray((2,), buffer=np.array([1,2,3]), + ... offset=np.int_().itemsize, + ... dtype=int) # offset = 1*itemsize, i.e. skip first element + array([2, 3]) + + """) + + +############################################################################## +# +# ndarray attributes +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', + """Array protocol: Python side.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', + """None.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', + """Array priority.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', + """Array protocol: C-struct side.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('base', + """ + Base object if memory is from some other object. + + Examples + -------- + The base of an array that owns its memory is None: + + >>> x = np.array([1,2,3,4]) + >>> x.base is None + True + + Slicing creates a view, whose memory is shared with x: + + >>> y = x[2:] + >>> y.base is x + True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', + """ + An object to simplify the interaction of the array with the ctypes + module. + + This attribute creates an object that makes it easier to use arrays + when calling shared libraries with the ctypes module. The returned + object has, among others, data, shape, and strides attributes (see + Notes below) which themselves return ctypes objects that can be used + as arguments to a shared library. + + Parameters + ---------- + None + + Returns + ------- + c : Python object + Possessing attributes data, shape, strides, etc. + + See Also + -------- + numpy.ctypeslib + + Notes + ----- + Below are the public attributes of this object which were documented + in "Guide to NumPy" (we have omitted undocumented public attributes, + as well as documented private attributes): + + .. autoattribute:: numpy.core._internal._ctypes.data + :noindex: + + .. autoattribute:: numpy.core._internal._ctypes.shape + :noindex: + + .. autoattribute:: numpy.core._internal._ctypes.strides + :noindex: + + .. automethod:: numpy.core._internal._ctypes.data_as + :noindex: + + .. automethod:: numpy.core._internal._ctypes.shape_as + :noindex: + + .. automethod:: numpy.core._internal._ctypes.strides_as + :noindex: + + If the ctypes module is not available, then the ctypes attribute + of array objects still returns something useful, but ctypes objects + are not returned and errors may be raised instead. In particular, + the object will still have the ``as_parameter`` attribute which will + return an integer equal to the data attribute. + + Examples + -------- + >>> import ctypes + >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32) + >>> x + array([[0, 1], + [2, 3]], dtype=int32) + >>> x.ctypes.data + 31962608 # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)) + <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents + c_uint(0) + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents + c_ulong(4294967296) + >>> x.ctypes.shape + # may vary + >>> x.ctypes.strides + # may vary + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('data', + """Python buffer object pointing to the start of the array's data.""")) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', + """ + Data-type of the array's elements. + + Parameters + ---------- + None + + Returns + ------- + d : numpy dtype object + + See Also + -------- + numpy.dtype + + Examples + -------- + >>> x + array([[0, 1], + [2, 3]]) + >>> x.dtype + dtype('int32') + >>> type(x.dtype) + + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', + """ + The imaginary part of the array. + + Examples + -------- + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.imag + array([ 0. , 0.70710678]) + >>> x.imag.dtype + dtype('float64') + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', + """ + Length of one array element in bytes. + + Examples + -------- + >>> x = np.array([1,2,3], dtype=np.float64) + >>> x.itemsize + 8 + >>> x = np.array([1,2,3], dtype=np.complex128) + >>> x.itemsize + 16 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', + """ + Information about the memory layout of the array. + + Attributes + ---------- + C_CONTIGUOUS (C) + The data is in a single, C-style contiguous segment. + F_CONTIGUOUS (F) + The data is in a single, Fortran-style contiguous segment. + OWNDATA (O) + The array owns the memory it uses or borrows it from another object. + WRITEABLE (W) + The data area can be written to. Setting this to False locks + the data, making it read-only. A view (slice, etc.) inherits WRITEABLE + from its base array at creation time, but a view of a writeable + array may be subsequently locked while the base array remains writeable. + (The opposite is not true, in that a view of a locked array may not + be made writeable. However, currently, locking a base object does not + lock any views that already reference it, so under that circumstance it + is possible to alter the contents of a locked array via a previously + created writeable view onto it.) Attempting to change a non-writeable + array raises a RuntimeError exception. + ALIGNED (A) + The data and all elements are aligned appropriately for the hardware. + WRITEBACKIFCOPY (X) + This array is a copy of some other array. The C-API function + PyArray_ResolveWritebackIfCopy must be called before deallocating + to the base array will be updated with the contents of this array. + UPDATEIFCOPY (U) + (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. + When this array is + deallocated, the base array will be updated with the contents of + this array. + FNC + F_CONTIGUOUS and not C_CONTIGUOUS. + FORC + F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). + BEHAVED (B) + ALIGNED and WRITEABLE. + CARRAY (CA) + BEHAVED and C_CONTIGUOUS. + FARRAY (FA) + BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. + + Notes + ----- + The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), + or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag + names are only supported in dictionary access. + + Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be + changed by the user, via direct assignment to the attribute or dictionary + entry, or by calling `ndarray.setflags`. + + The array flags cannot be set arbitrarily: + + - UPDATEIFCOPY can only be set ``False``. + - WRITEBACKIFCOPY can only be set ``False``. + - ALIGNED can only be set ``True`` if the data is truly aligned. + - WRITEABLE can only be set ``True`` if the array owns its own memory + or the ultimate owner of the memory exposes a writeable buffer + interface or is a string. + + Arrays can be both C-style and Fortran-style contiguous simultaneously. + This is clear for 1-dimensional arrays, but can also be true for higher + dimensional arrays. + + Even for contiguous arrays a stride for a given dimension + ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` + or the array has no elements. + It does *not* generally hold that ``self.strides[-1] == self.itemsize`` + for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for + Fortran-style contiguous arrays is true. + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', + """ + A 1-D iterator over the array. + + This is a `numpy.flatiter` instance, which acts similarly to, but is not + a subclass of, Python's built-in iterator object. + + See Also + -------- + flatten : Return a copy of the array collapsed into one dimension. + + flatiter + + Examples + -------- + >>> x = np.arange(1, 7).reshape(2, 3) + >>> x + array([[1, 2, 3], + [4, 5, 6]]) + >>> x.flat[3] + 4 + >>> x.T + array([[1, 4], + [2, 5], + [3, 6]]) + >>> x.T.flat[3] + 5 + >>> type(x.flat) + + + An assignment example: + + >>> x.flat = 3; x + array([[3, 3, 3], + [3, 3, 3]]) + >>> x.flat[[1,4]] = 1; x + array([[3, 1, 3], + [3, 1, 3]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', + """ + Total bytes consumed by the elements of the array. + + Notes + ----- + Does not include memory consumed by non-element attributes of the + array object. + + Examples + -------- + >>> x = np.zeros((3,5,2), dtype=np.complex128) + >>> x.nbytes + 480 + >>> np.prod(x.shape) * x.itemsize + 480 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', + """ + Number of array dimensions. + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> x.ndim + 1 + >>> y = np.zeros((2, 3, 4)) + >>> y.ndim + 3 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('real', + """ + The real part of the array. + + Examples + -------- + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.real + array([ 1. , 0.70710678]) + >>> x.real.dtype + dtype('float64') + + See Also + -------- + numpy.real : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', + """ + Tuple of array dimensions. + + The shape property is usually used to get the current shape of an array, + but may also be used to reshape the array in-place by assigning a tuple of + array dimensions to it. As with `numpy.reshape`, one of the new shape + dimensions can be -1, in which case its value is inferred from the size of + the array and the remaining dimensions. Reshaping an array in-place will + fail if a copy is required. + + Examples + -------- + >>> x = np.array([1, 2, 3, 4]) + >>> x.shape + (4,) + >>> y = np.zeros((2, 3, 4)) + >>> y.shape + (2, 3, 4) + >>> y.shape = (3, 8) + >>> y + array([[ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.]]) + >>> y.shape = (3, 6) + Traceback (most recent call last): + File "", line 1, in + ValueError: total size of new array must be unchanged + >>> np.zeros((4,2))[::2].shape = (-1,) + Traceback (most recent call last): + File "", line 1, in + AttributeError: Incompatible shape for in-place modification. Use + `.reshape()` to make a copy with the desired shape. + + See Also + -------- + numpy.reshape : similar function + ndarray.reshape : similar method + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('size', + """ + Number of elements in the array. + + Equal to ``np.prod(a.shape)``, i.e., the product of the array's + dimensions. + + Notes + ----- + `a.size` returns a standard arbitrary precision Python integer. This + may not be the case with other methods of obtaining the same value + (like the suggested ``np.prod(a.shape)``, which returns an instance + of ``np.int_``), and may be relevant if the value is used further in + calculations that may overflow a fixed size integer type. + + Examples + -------- + >>> x = np.zeros((3, 5, 2), dtype=np.complex128) + >>> x.size + 30 + >>> np.prod(x.shape) + 30 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', + """ + Tuple of bytes to step in each dimension when traversing an array. + + The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` + is:: + + offset = sum(np.array(i) * a.strides) + + A more detailed explanation of strides can be found in the + "ndarray.rst" file in the NumPy reference guide. + + Notes + ----- + Imagine an array of 32-bit integers (each 4 bytes):: + + x = np.array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]], dtype=np.int32) + + This array is stored in memory as 40 bytes, one after the other + (known as a contiguous block of memory). The strides of an array tell + us how many bytes we have to skip in memory to move to the next position + along a certain axis. For example, we have to skip 4 bytes (1 value) to + move to the next column, but 20 bytes (5 values) to get to the same + position in the next row. As such, the strides for the array `x` will be + ``(20, 4)``. + + See Also + -------- + numpy.lib.stride_tricks.as_strided + + Examples + -------- + >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) + >>> y + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + >>> y.strides + (48, 16, 4) + >>> y[1,1,1] + 17 + >>> offset=sum(y.strides * np.array((1,1,1))) + >>> offset/y.itemsize + 17 + + >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) + >>> x.strides + (32, 4, 224, 1344) + >>> i = np.array([3,5,2,2]) + >>> offset = sum(i * x.strides) + >>> x[3,5,2,2] + 813 + >>> offset / x.itemsize + 813 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('T', + """ + The transposed array. + + Same as ``self.transpose()``. + + Examples + -------- + >>> x = np.array([[1.,2.],[3.,4.]]) + >>> x + array([[ 1., 2.], + [ 3., 4.]]) + >>> x.T + array([[ 1., 3.], + [ 2., 4.]]) + >>> x = np.array([1.,2.,3.,4.]) + >>> x + array([ 1., 2., 3., 4.]) + >>> x.T + array([ 1., 2., 3., 4.]) + + See Also + -------- + transpose + + """)) + + +############################################################################## +# +# ndarray methods +# +############################################################################## + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', + """ a.__array__([dtype], /) -> reference if type unchanged, copy otherwise. + + Returns either a new reference to self if dtype is not given or a new array + of provided data type if dtype is different from the current dtype of the + array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', + """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', + """a.__array_wrap__(obj) -> Object of same type as ndarray object a. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', + """a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', + """a.__deepcopy__(memo, /) -> Deep copy of array. + + Used if :func:`copy.deepcopy` is called on an array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', + """a.__reduce__() + + For pickling. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', + """a.__setstate__(state, /) + + For unpickling. + + The `state` argument must be a sequence that contains the following + elements: + + Parameters + ---------- + version : int + optional pickle version. If omitted defaults to 0. + shape : tuple + dtype : data-type + isFortran : bool + rawdata : string or list + a binary string with the data (or a list if 'a' is an object array) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('all', + """ + a.all(axis=None, out=None, keepdims=False, *, where=True) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.all : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('any', + """ + a.any(axis=None, out=None, keepdims=False, *, where=True) + + Returns True if any of the elements of `a` evaluate to True. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.any : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', + """ + a.argmax(axis=None, out=None) + + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', + """ + a.argmin(axis=None, out=None) + + Return indices of the minimum values along the given axis. + + Refer to `numpy.argmin` for detailed documentation. + + See Also + -------- + numpy.argmin : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', + """ + a.argsort(axis=-1, kind=None, order=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', + """ + a.argpartition(kth, axis=-1, kind='introselect', order=None) + + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. + + .. versionadded:: 1.8.0 + + See Also + -------- + numpy.argpartition : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', + """ + a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) + + Copy of the array, cast to a specified type. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout order of the result. + 'C' means C order, 'F' means Fortran order, 'A' + means 'F' order if all the arrays are Fortran contiguous, + 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'unsafe' + for backwards compatibility. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + subok : bool, optional + If True, then sub-classes will be passed-through (default), otherwise + the returned array will be forced to be a base-class array. + copy : bool, optional + By default, astype always returns a newly allocated array. If this + is set to false, and the `dtype`, `order`, and `subok` + requirements are satisfied, the input array is returned instead + of a copy. + + Returns + ------- + arr_t : ndarray + Unless `copy` is False and the other conditions for returning the input + array are satisfied (see description for `copy` input parameter), `arr_t` + is a new array of the same shape as the input array, with dtype, order + given by `dtype`, `order`. + + Notes + ----- + .. versionchanged:: 1.17.0 + Casting between a simple data type and a structured one is possible only + for "unsafe" casting. Casting to multiple fields is allowed, but + casting from multiple fields is not. + + .. versionchanged:: 1.9.0 + Casting from numeric to string types in 'safe' casting mode requires + that the string dtype length is long enough to store the max + integer/float value converted. + + Raises + ------ + ComplexWarning + When casting from complex to float or int. To avoid this, + one should use ``a.real.astype(t)``. + + Examples + -------- + >>> x = np.array([1, 2, 2.5]) + >>> x + array([1. , 2. , 2.5]) + + >>> x.astype(int) + array([1, 2, 2]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', + """ + a.byteswap(inplace=False) + + Swap the bytes of the array elements + + Toggle between low-endian and big-endian data representation by + returning a byteswapped array, optionally swapped in-place. + Arrays of byte-strings are not swapped. The real and imaginary + parts of a complex number are swapped individually. + + Parameters + ---------- + inplace : bool, optional + If ``True``, swap bytes in-place, default is ``False``. + + Returns + ------- + out : ndarray + The byteswapped array. If `inplace` is ``True``, this is + a view to self. + + Examples + -------- + >>> A = np.array([1, 256, 8755], dtype=np.int16) + >>> list(map(hex, A)) + ['0x1', '0x100', '0x2233'] + >>> A.byteswap(inplace=True) + array([ 256, 1, 13090], dtype=int16) + >>> list(map(hex, A)) + ['0x100', '0x1', '0x3322'] + + Arrays of byte-strings are not swapped + + >>> A = np.array([b'ceg', b'fac']) + >>> A.byteswap() + array([b'ceg', b'fac'], dtype='|S3') + + ``A.newbyteorder().byteswap()`` produces an array with the same values + but different representation in memory + + >>> A = np.array([1, 2, 3]) + >>> A.view(np.uint8) + array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0], dtype=uint8) + >>> A.newbyteorder().byteswap(inplace=True) + array([1, 2, 3]) + >>> A.view(np.uint8) + array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, + 0, 3], dtype=uint8) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', + """ + a.choose(choices, out=None, mode='raise') + + Use an index array to construct a new array from a set of choices. + + Refer to `numpy.choose` for full documentation. + + See Also + -------- + numpy.choose : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', + """ + a.clip(min=None, max=None, out=None, **kwargs) + + Return an array whose values are limited to ``[min, max]``. + One of max or min must be given. + + Refer to `numpy.clip` for full documentation. + + See Also + -------- + numpy.clip : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', + """ + a.compress(condition, axis=None, out=None) + + Return selected slices of this array along given axis. + + Refer to `numpy.compress` for full documentation. + + See Also + -------- + numpy.compress : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', + """ + a.conj() + + Complex-conjugate all elements. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', + """ + a.conjugate() + + Return the complex conjugate, element-wise. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', + """ + a.copy(order='C') + + Return a copy of the array. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :func:`numpy.copy` are very + similar but have different default values for their order= + arguments, and this function always passes sub-classes through.) + + See also + -------- + numpy.copy : Similar function with different default behavior + numpy.copyto + + Notes + ----- + This function is the preferred method for creating an array copy. The + function :func:`numpy.copy` is similar, but it defaults to using order 'K', + and will not pass sub-classes through by default. + + Examples + -------- + >>> x = np.array([[1,2,3],[4,5,6]], order='F') + + >>> y = x.copy() + + >>> x.fill(0) + + >>> x + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y + array([[1, 2, 3], + [4, 5, 6]]) + + >>> y.flags['C_CONTIGUOUS'] + True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', + """ + a.cumprod(axis=None, dtype=None, out=None) + + Return the cumulative product of the elements along the given axis. + + Refer to `numpy.cumprod` for full documentation. + + See Also + -------- + numpy.cumprod : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', + """ + a.cumsum(axis=None, dtype=None, out=None) + + Return the cumulative sum of the elements along the given axis. + + Refer to `numpy.cumsum` for full documentation. + + See Also + -------- + numpy.cumsum : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', + """ + a.diagonal(offset=0, axis1=0, axis2=1) + + Return specified diagonals. In NumPy 1.9 the returned array is a + read-only view instead of a copy as in previous NumPy versions. In + a future version the read-only restriction will be removed. + + Refer to :func:`numpy.diagonal` for full documentation. + + See Also + -------- + numpy.diagonal : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', + """ + a.dot(b, out=None) + + Dot product of two arrays. + + Refer to `numpy.dot` for full documentation. + + See Also + -------- + numpy.dot : equivalent function + + Examples + -------- + >>> a = np.eye(2) + >>> b = np.ones((2, 2)) * 2 + >>> a.dot(b) + array([[2., 2.], + [2., 2.]]) + + This array method can be conveniently chained: + + >>> a.dot(b).dot(b) + array([[8., 8.], + [8., 8.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', + """a.dump(file) + + Dump a pickle of the array to the specified file. + The array can be read back with pickle.load or numpy.load. + + Parameters + ---------- + file : str or Path + A string naming the dump file. + + .. versionchanged:: 1.17.0 + `pathlib.Path` objects are now accepted. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', + """ + a.dumps() + + Returns the pickle of the array as a string. + pickle.loads or numpy.loads will convert the string back to an array. + + Parameters + ---------- + None + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', + """ + a.fill(value) + + Fill the array with a scalar value. + + Parameters + ---------- + value : scalar + All elements of `a` will be assigned this value. + + Examples + -------- + >>> a = np.array([1, 2]) + >>> a.fill(0) + >>> a + array([0, 0]) + >>> a = np.empty(2) + >>> a.fill(1) + >>> a + array([1., 1.]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', + """ + a.flatten(order='C') + + Return a copy of the array collapsed into one dimension. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. + 'F' means to flatten in column-major (Fortran- + style) order. 'A' means to flatten in column-major + order if `a` is Fortran *contiguous* in memory, + row-major order otherwise. 'K' means to flatten + `a` in the order the elements occur in memory. + The default is 'C'. + + Returns + ------- + y : ndarray + A copy of the input array, flattened to one dimension. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the array. + + Examples + -------- + >>> a = np.array([[1,2], [3,4]]) + >>> a.flatten() + array([1, 2, 3, 4]) + >>> a.flatten('F') + array([1, 3, 2, 4]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', + """ + a.getfield(dtype, offset=0) + + Returns a field of the given array as a certain type. + + A field is a view of the array data with a given data-type. The values in + the view are determined by the given type and the offset into the current + array in bytes. The offset needs to be such that the view dtype fits in the + array dtype; for example an array of dtype complex128 has 16-byte elements. + If taking a view with a 32-bit integer (4 bytes), the offset needs to be + between 0 and 12 bytes. + + Parameters + ---------- + dtype : str or dtype + The data type of the view. The dtype size of the view can not be larger + than that of the array itself. + offset : int + Number of bytes to skip before beginning the element view. + + Examples + -------- + >>> x = np.diag([1.+1.j]*2) + >>> x[1, 1] = 2 + 4.j + >>> x + array([[1.+1.j, 0.+0.j], + [0.+0.j, 2.+4.j]]) + >>> x.getfield(np.float64) + array([[1., 0.], + [0., 2.]]) + + By choosing an offset of 8 bytes we can select the complex part of the + array for our view: + + >>> x.getfield(np.float64, offset=8) + array([[1., 0.], + [0., 4.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('item', + """ + a.item(*args) + + Copy an element of an array to a standard Python scalar and return it. + + Parameters + ---------- + \\*args : Arguments (variable number and type) + + * none: in this case, the method only works for arrays + with one element (`a.size == 1`), which element is + copied into a standard Python scalar object and returned. + + * int_type: this argument is interpreted as a flat index into + the array, specifying which element to copy and return. + + * tuple of int_types: functions as does a single int_type argument, + except that the argument is interpreted as an nd-index into the + array. + + Returns + ------- + z : Standard Python scalar object + A copy of the specified element of the array as a suitable + Python scalar + + Notes + ----- + When the data type of `a` is longdouble or clongdouble, item() returns + a scalar array object because there is no available Python scalar that + would not lose information. Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned. + + `item` is very similar to a[args], except, instead of an array scalar, + a standard Python scalar is returned. This can be useful for speeding up + access to elements of the array and doing arithmetic on elements of the + array using Python's optimized math. + + Examples + -------- + >>> np.random.seed(123) + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[2, 2, 6], + [1, 3, 6], + [1, 0, 1]]) + >>> x.item(3) + 1 + >>> x.item(7) + 0 + >>> x.item((0, 1)) + 2 + >>> x.item((2, 2)) + 1 + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', + """ + a.itemset(*args) + + Insert scalar into an array (scalar is cast to array's dtype, if possible) + + There must be at least 1 argument, and define the last argument + as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster + than ``a[args] = item``. The item should be a scalar value and `args` + must select a single item in the array `a`. + + Parameters + ---------- + \\*args : Arguments + If one argument: a scalar, only used in case `a` is of size 1. + If two arguments: the last argument is the value to be set + and must be a scalar, the first argument specifies a single array + element location. It is either an int or a tuple. + + Notes + ----- + Compared to indexing syntax, `itemset` provides some speed increase + for placing a scalar into a particular location in an `ndarray`, + if you must do this. However, generally this is discouraged: + among other problems, it complicates the appearance of the code. + Also, when using `itemset` (and `item`) inside a loop, be sure + to assign the methods to a local variable to avoid the attribute + look-up at each loop iteration. + + Examples + -------- + >>> np.random.seed(123) + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[2, 2, 6], + [1, 3, 6], + [1, 0, 1]]) + >>> x.itemset(4, 0) + >>> x.itemset((2, 2), 9) + >>> x + array([[2, 2, 6], + [1, 0, 6], + [1, 0, 9]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('max', + """ + a.max(axis=None, out=None, keepdims=False, initial=, where=True) + + Return the maximum along a given axis. + + Refer to `numpy.amax` for full documentation. + + See Also + -------- + numpy.amax : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', + """ + a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) + + Returns the average of the array elements along given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('min', + """ + a.min(axis=None, out=None, keepdims=False, initial=, where=True) + + Return the minimum along a given axis. + + Refer to `numpy.amin` for full documentation. + + See Also + -------- + numpy.amin : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', + """ + arr.newbyteorder(new_order='S', /) + + Return the array with the same data viewed with a different byte order. + + Equivalent to:: + + arr.view(arr.dtype.newbytorder(new_order)) + + Changes are also made in all fields and sub-arrays of the array data + type. + + + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + below. `new_order` codes can be any of: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'little'} - little endian + * {'>', 'big'} - big endian + * '=' - native order, equivalent to `sys.byteorder` + * {'|', 'I'} - ignore (no change to byte order) + + The default value ('S') results in swapping the current + byte order. + + + Returns + ------- + new_arr : array + New array object with the dtype reflecting given change to the + byte order. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', + """ + a.nonzero() + + Return the indices of the elements that are non-zero. + + Refer to `numpy.nonzero` for full documentation. + + See Also + -------- + numpy.nonzero : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', + """ + a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True) + + Return the product of the array elements over the given axis + + Refer to `numpy.prod` for full documentation. + + See Also + -------- + numpy.prod : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', + """ + a.ptp(axis=None, out=None, keepdims=False) + + Peak to peak (maximum - minimum) value along a given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('put', + """ + a.put(indices, values, mode='raise') + + Set ``a.flat[n] = values[n]`` for all `n` in indices. + + Refer to `numpy.put` for full documentation. + + See Also + -------- + numpy.put : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', + """ + a.ravel([order]) + + Return a flattened array. + + Refer to `numpy.ravel` for full documentation. + + See Also + -------- + numpy.ravel : equivalent function + + ndarray.flat : a flat iterator on the array. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', + """ + a.repeat(repeats, axis=None) + + Repeat elements of an array. + + Refer to `numpy.repeat` for full documentation. + + See Also + -------- + numpy.repeat : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', + """ + a.reshape(shape, order='C') + + Returns an array containing the same data with a new shape. + + Refer to `numpy.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function + + Notes + ----- + Unlike the free function `numpy.reshape`, this method on `ndarray` allows + the elements of the shape parameter to be passed in as separate arguments. + For example, ``a.reshape(10, 11)`` is equivalent to + ``a.reshape((10, 11))``. + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', + """ + a.resize(new_shape, refcheck=True) + + Change shape and size of array in-place. + + Parameters + ---------- + new_shape : tuple of ints, or `n` ints + Shape of resized array. + refcheck : bool, optional + If False, reference count will not be checked. Default is True. + + Returns + ------- + None + + Raises + ------ + ValueError + If `a` does not own its own data or references or views to it exist, + and the data memory must be changed. + PyPy only: will always raise if the data memory must be changed, since + there is no reliable way to determine if references or views to it + exist. + + SystemError + If the `order` keyword argument is specified. This behaviour is a + bug in NumPy. + + See Also + -------- + resize : Return a new array with the specified shape. + + Notes + ----- + This reallocates space for the data area if necessary. + + Only contiguous arrays (data elements consecutive in memory) can be + resized. + + The purpose of the reference count check is to make sure you + do not use this array as a buffer for another Python object and then + reallocate the memory. However, reference counts can increase in + other ways so if you are sure that you have not shared the memory + for this array with another Python object, then you may safely set + `refcheck` to False. + + Examples + -------- + Shrinking an array: array is flattened (in the order that the data are + stored in memory), resized, and reshaped: + + >>> a = np.array([[0, 1], [2, 3]], order='C') + >>> a.resize((2, 1)) + >>> a + array([[0], + [1]]) + + >>> a = np.array([[0, 1], [2, 3]], order='F') + >>> a.resize((2, 1)) + >>> a + array([[0], + [2]]) + + Enlarging an array: as above, but missing entries are filled with zeros: + + >>> b = np.array([[0, 1], [2, 3]]) + >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple + >>> b + array([[0, 1, 2], + [3, 0, 0]]) + + Referencing an array prevents resizing... + + >>> c = a + >>> a.resize((1, 1)) + Traceback (most recent call last): + ... + ValueError: cannot resize an array that references or is referenced ... + + Unless `refcheck` is False: + + >>> a.resize((1, 1), refcheck=False) + >>> a + array([[0]]) + >>> c + array([[0]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('round', + """ + a.round(decimals=0, out=None) + + Return `a` with each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.around : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', + """ + a.searchsorted(v, side='left', sorter=None) + + Find indices where elements of v should be inserted in a to maintain order. + + For full documentation, see `numpy.searchsorted` + + See Also + -------- + numpy.searchsorted : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', + """ + a.setfield(val, dtype, offset=0) + + Put a value into a specified place in a field defined by a data-type. + + Place `val` into `a`'s field defined by `dtype` and beginning `offset` + bytes into the field. + + Parameters + ---------- + val : object + Value to be placed in field. + dtype : dtype object + Data-type of the field in which to place `val`. + offset : int, optional + The number of bytes into the field at which to place `val`. + + Returns + ------- + None + + See Also + -------- + getfield + + Examples + -------- + >>> x = np.eye(3) + >>> x.getfield(np.float64) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + >>> x.setfield(3, np.int32) + >>> x.getfield(np.int32) + array([[3, 3, 3], + [3, 3, 3], + [3, 3, 3]], dtype=int32) + >>> x + array([[1.0e+000, 1.5e-323, 1.5e-323], + [1.5e-323, 1.0e+000, 1.5e-323], + [1.5e-323, 1.5e-323, 1.0e+000]]) + >>> x.setfield(np.eye(3), np.int32) + >>> x + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', + """ + a.setflags(write=None, align=None, uic=None) + + Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), + respectively. + + These Boolean-valued flags affect how numpy interprets the memory + area used by `a` (see Notes below). The ALIGNED flag can only + be set to True if the data is actually aligned according to the type. + The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set + to True. The flag WRITEABLE can only be set to True if the array owns its + own memory, or the ultimate owner of the memory exposes a writeable buffer + interface, or is a string. (The exception for string is made so that + unpickling can be done without copying memory.) + + Parameters + ---------- + write : bool, optional + Describes whether or not `a` can be written to. + align : bool, optional + Describes whether or not `a` is aligned properly for its type. + uic : bool, optional + Describes whether or not `a` is a copy of another "base" array. + + Notes + ----- + Array flags provide information about how the memory area used + for the array is to be interpreted. There are 7 Boolean flags + in use, only four of which can be changed by the user: + WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. + + WRITEABLE (W) the data area can be written to; + + ALIGNED (A) the data and strides are aligned appropriately for the hardware + (as determined by the compiler); + + UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; + + WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced + by .base). When the C-API function PyArray_ResolveWritebackIfCopy is + called, the base array will be updated with the contents of this array. + + All flags can be accessed using the single (upper case) letter as well + as the full name. + + Examples + -------- + >>> y = np.array([[3, 1, 7], + ... [2, 0, 0], + ... [8, 5, 9]]) + >>> y + array([[3, 1, 7], + [2, 0, 0], + [8, 5, 9]]) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + >>> y.setflags(write=0, align=0) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : False + ALIGNED : False + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + >>> y.setflags(uic=1) + Traceback (most recent call last): + File "", line 1, in + ValueError: cannot set WRITEBACKIFCOPY flag to True + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', + """ + a.sort(axis=-1, kind=None, order=None) + + Sort an array in-place. Refer to `numpy.sort` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort under the covers and, in general, the + actual implementation will vary with datatype. The 'mergesort' option + is retained for backwards compatibility. + + .. versionchanged:: 1.15.0 + The 'stable' option was added. + + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + See Also + -------- + numpy.sort : Return a sorted copy of an array. + numpy.argsort : Indirect sort. + numpy.lexsort : Indirect stable sort on multiple keys. + numpy.searchsorted : Find elements in sorted array. + numpy.partition: Partial sort. + + Notes + ----- + See `numpy.sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.array([[1,4], [3,1]]) + >>> a.sort(axis=1) + >>> a + array([[1, 4], + [1, 3]]) + >>> a.sort(axis=0) + >>> a + array([[1, 3], + [1, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) + >>> a.sort(order='y') + >>> a + array([(b'c', 1), (b'a', 2)], + dtype=[('x', 'S1'), ('y', '>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) + + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', + """ + a.squeeze(axis=None) + + Remove axes of length one from `a`. + + Refer to `numpy.squeeze` for full documentation. + + See Also + -------- + numpy.squeeze : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('std', + """ + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + + Returns the standard deviation of the array elements along given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', + """ + a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) + + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', + """ + a.swapaxes(axis1, axis2) + + Return a view of the array with `axis1` and `axis2` interchanged. + + Refer to `numpy.swapaxes` for full documentation. + + See Also + -------- + numpy.swapaxes : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('take', + """ + a.take(indices, axis=None, out=None, mode='raise') + + Return an array formed from the elements of `a` at the given indices. + + Refer to `numpy.take` for full documentation. + + See Also + -------- + numpy.take : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', + """ + a.tofile(fid, sep="", format="%s") + + Write array to a file as text or binary (default). + + Data is always written in 'C' order, independent of the order of `a`. + The data produced by this method can be recovered using the function + fromfile(). + + Parameters + ---------- + fid : file or str or Path + An open file object, or a string containing a filename. + + .. versionchanged:: 1.17.0 + `pathlib.Path` objects are now accepted. + + sep : str + Separator between array items for text output. + If "" (empty), a binary file is written, equivalent to + ``file.write(a.tobytes())``. + format : str + Format string for text file output. + Each entry in the array is formatted to text by first converting + it to the closest Python type, and then using "format" % item. + + Notes + ----- + This is a convenience function for quick storage of array data. + Information on endianness and precision is lost, so this method is not a + good choice for files intended to archive data or transport data between + machines with different endianness. Some of these problems can be overcome + by outputting the data as text files, at the expense of speed and file + size. + + When fid is a file object, array contents are directly written to the + file, bypassing the file object's ``write`` method. As a result, tofile + cannot be used with files objects supporting compression (e.g., GzipFile) + or file-like objects that do not support ``fileno()`` (e.g., BytesIO). + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', + """ + a.tolist() + + Return the array as an ``a.ndim``-levels deep nested list of Python scalars. + + Return a copy of the array data as a (nested) Python list. + Data items are converted to the nearest compatible builtin Python type, via + the `~numpy.ndarray.item` function. + + If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will + not be a list at all, but a simple Python scalar. + + Parameters + ---------- + none + + Returns + ------- + y : object, or list of object, or list of list of object, or ... + The possibly nested list of array elements. + + Notes + ----- + The array may be recreated via ``a = np.array(a.tolist())``, although this + may sometimes lose precision. + + Examples + -------- + For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, + except that ``tolist`` changes numpy scalars to Python scalars: + + >>> a = np.uint32([1, 2]) + >>> a_list = list(a) + >>> a_list + [1, 2] + >>> type(a_list[0]) + + >>> a_tolist = a.tolist() + >>> a_tolist + [1, 2] + >>> type(a_tolist[0]) + + + Additionally, for a 2D array, ``tolist`` applies recursively: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> list(a) + [array([1, 2]), array([3, 4])] + >>> a.tolist() + [[1, 2], [3, 4]] + + The base case for this recursion is a 0D array: + + >>> a = np.array(1) + >>> list(a) + Traceback (most recent call last): + ... + TypeError: iteration over a 0-d array + >>> a.tolist() + 1 + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """ + a.tobytes(order='C') + + Construct Python bytes containing the raw data bytes in the array. + + Constructs Python bytes showing a copy of the raw contents of + data memory. The bytes object is produced in C-order by default. + This behavior is controlled by the ``order`` parameter. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + order : {'C', 'F', 'A'}, optional + Controls the memory layout of the bytes object. 'C' means C-order, + 'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is + Fortran contiguous, 'C' otherwise. Default is 'C'. + + Returns + ------- + s : bytes + Python bytes exhibiting a copy of `a`'s raw data. + + Examples + -------- + >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes() + b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' + >>> x.tobytes('C') == x.tobytes() + True + >>> x.tobytes('F') + b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', r""" + a.tostring(order='C') + + A compatibility alias for `tobytes`, with exactly the same behavior. + + Despite its name, it returns `bytes` not `str`\ s. + + .. deprecated:: 1.19.0 + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', + """ + a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) + + Return the sum along diagonals of the array. + + Refer to `numpy.trace` for full documentation. + + See Also + -------- + numpy.trace : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', + """ + a.transpose(*axes) + + Returns a view of the array with axes transposed. + + For a 1-D array this has no effect, as a transposed vector is simply the + same vector. To convert a 1-D array into a 2D column vector, an additional + dimension must be added. `np.atleast2d(a).T` achieves this, as does + `a[:, np.newaxis]`. + For a 2-D array, this is a standard matrix transpose. + For an n-D array, if axes are given, their order indicates how the + axes are permuted (see Examples). If axes are not provided and + ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then + ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. + + Parameters + ---------- + axes : None, tuple of ints, or `n` ints + + * None or no argument: reverses the order of the axes. + + * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s + `i`-th axis becomes `a.transpose()`'s `j`-th axis. + + * `n` ints: same as an n-tuple of the same ints (this form is + intended simply as a "convenience" alternative to the tuple form) + + Returns + ------- + out : ndarray + View of `a`, with axes suitably permuted. + + See Also + -------- + transpose : Equivalent function + ndarray.T : Array property returning the array transposed. + ndarray.reshape : Give a new shape to an array without changing its data. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.transpose() + array([[1, 3], + [2, 4]]) + >>> a.transpose((1, 0)) + array([[1, 3], + [2, 4]]) + >>> a.transpose(1, 0) + array([[1, 3], + [2, 4]]) + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('var', + """ + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + + Returns the variance of the array elements, along given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var : equivalent function + + """)) + + +add_newdoc('numpy.core.multiarray', 'ndarray', ('view', + """ + a.view([dtype][, type]) + + New view of array with the same data. + + .. note:: + Passing None for ``dtype`` is different from omitting the parameter, + since the former invokes ``dtype(None)`` which is an alias for + ``dtype('float_')``. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + Omitting it results in the view having the same data-type as `a`. + This argument can also be specified as an ndarray sub-class, which + then specifies the type of the returned object (this is equivalent to + setting the ``type`` parameter). + type : Python type, optional + Type of the returned view, e.g., ndarray or matrix. Again, omission + of the parameter results in type preservation. + + Notes + ----- + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + + + Examples + -------- + >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + + Viewing array data using a different type and dtype: + + >>> y = x.view(dtype=np.int16, type=np.matrix) + >>> y + matrix([[513]], dtype=int16) + >>> print(type(y)) + + + Creating a view on a structured array so it can be used in calculations + + >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> xv = x.view(dtype=np.int8).reshape(-1,2) + >>> xv + array([[1, 2], + [3, 4]], dtype=int8) + >>> xv.mean(0) + array([2., 3.]) + + Making changes to the view changes the underlying array + + >>> xv[0,1] = 20 + >>> x + array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) + + Using a view to convert an array to a recarray: + + >>> z = x.view(np.recarray) + >>> z.a + array([1, 3], dtype=int8) + + Views share data: + + >>> x[0] = (9, 10) + >>> z[0] + (9, 10) + + Views that change the dtype size (bytes per entry) should normally be + avoided on arrays defined by slices, transposes, fortran-ordering, etc.: + + >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) + >>> y = x[:, 0:2] + >>> y + array([[1, 2], + [4, 5]], dtype=int16) + >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) + Traceback (most recent call last): + ... + ValueError: To change to a dtype of a different size, the array must be C-contiguous + >>> z = y.copy() + >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) + array([[(1, 2)], + [(4, 5)]], dtype=[('width', '>> oct_array = np.frompyfunc(oct, 1, 1) + >>> oct_array(np.array((10, 30, 100))) + array(['0o12', '0o36', '0o144'], dtype=object) + >>> np.array((oct(10), oct(30), oct(100))) # for comparison + array(['0o12', '0o36', '0o144'], dtype='>> np.geterrobj() # first get the defaults + [8192, 521, None] + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + >>> old_bufsize = np.setbufsize(20000) + >>> old_err = np.seterr(divide='raise') + >>> old_handler = np.seterrcall(err_handler) + >>> np.geterrobj() + [8192, 521, ] + + >>> old_err = np.seterr(all='ignore') + >>> np.base_repr(np.geterrobj()[1], 8) + '0' + >>> old_err = np.seterr(divide='warn', over='log', under='call', + ... invalid='print') + >>> np.base_repr(np.geterrobj()[1], 8) + '4351' + + """) + +add_newdoc('numpy.core.umath', 'seterrobj', + """ + seterrobj(errobj) + + Set the object that defines floating-point error handling. + + The error object contains all information that defines the error handling + behavior in NumPy. `seterrobj` is used internally by the other + functions that set error handling behavior (`seterr`, `seterrcall`). + + Parameters + ---------- + errobj : list + The error object, a list containing three elements: + [internal numpy buffer size, error mask, error callback function]. + + The error mask is a single integer that holds the treatment information + on all four floating point errors. The information for each error type + is contained in three bits of the integer. If we print it in base 8, we + can see what treatment is set for "invalid", "under", "over", and + "divide" (in that order). The printed string can be interpreted with + + * 0 : 'ignore' + * 1 : 'warn' + * 2 : 'raise' + * 3 : 'call' + * 4 : 'print' + * 5 : 'log' + + See Also + -------- + geterrobj, seterr, geterr, seterrcall, geterrcall + getbufsize, setbufsize + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> old_errobj = np.geterrobj() # first get the defaults + >>> old_errobj + [8192, 521, None] + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + >>> new_errobj = [20000, 12, err_handler] + >>> np.seterrobj(new_errobj) + >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') + '14' + >>> np.geterr() + {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} + >>> np.geterrcall() is err_handler + True + + """) + + +############################################################################## +# +# compiled_base functions +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'add_docstring', + """ + add_docstring(obj, docstring) + + Add a docstring to a built-in obj if possible. + If the obj already has a docstring raise a RuntimeError + If this routine does not know how to add a docstring to the object + raise a TypeError + """) + +add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', + """ + add_ufunc_docstring(ufunc, new_docstring) + + Replace the docstring for a ufunc with new_docstring. + This method will only work if the current docstring for + the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) + + Parameters + ---------- + ufunc : numpy.ufunc + A ufunc whose current doc is NULL. + new_docstring : string + The new docstring for the ufunc. + + Notes + ----- + This method allocates memory for new_docstring on + the heap. Technically this creates a mempory leak, since this + memory will not be reclaimed until the end of the program + even if the ufunc itself is removed. However this will only + be a problem if the user is repeatedly creating ufuncs with + no documentation, adding documentation via add_newdoc_ufunc, + and then throwing away the ufunc. + """) + +add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage', + """ + _set_madvise_hugepage(enabled: bool) -> bool + + Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when + allocating the array data. Returns the previously set value. + See `global_state` for more information. + """) + +add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g', + """ + format_float_OSprintf_g(val, precision) + + Print a floating point scalar using the system's printf function, + equivalent to: + + printf("%.*g", precision, val); + + for half/float/double, or replacing 'g' by 'Lg' for longdouble. This + method is designed to help cross-validate the format_float_* methods. + + Parameters + ---------- + val : python float or numpy floating scalar + Value to format. + + precision : non-negative integer, optional + Precision given to printf. + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + format_float_positional + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', + """ + Functions that operate element by element on whole arrays. + + To see the documentation for a specific ufunc, use `info`. For + example, ``np.info(np.sin)``. Because ufuncs are written in C + (for speed) and linked into Python with NumPy's ufunc facility, + Python's help() function finds this page whenever help() is called + on a ufunc. + + A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. + + **Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)`` + + Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. + + The broadcasting rules are: + + * Dimensions of length 1 may be prepended to either array. + * Arrays may be repeated along dimensions of length 1. + + Parameters + ---------- + *x : array_like + Input arrays. + out : ndarray, None, or tuple of ndarray and None, optional + Alternate array object(s) in which to put the result; if provided, it + must have a shape that the inputs broadcast to. A tuple of arrays + (possible only as a keyword argument) must have length equal to the + number of outputs; use None for uninitialized outputs to be + allocated by the ufunc. + where : array_like, optional + This condition is broadcast over the input. At locations where the + condition is True, the `out` array will be set to the ufunc result. + Elsewhere, the `out` array will retain its original value. + Note that if an uninitialized `out` array is created via the default + ``out=None``, locations within it where the condition is False will + remain uninitialized. + **kwargs + For other keyword-only arguments, see the :ref:`ufunc docs `. + + Returns + ------- + r : ndarray or tuple of ndarray + `r` will have the shape that the arrays in `x` broadcast to; if `out` is + provided, it will be returned. If not, `r` will be allocated and + may contain uninitialized values. If the function has more than one + output, then the result will be a tuple of arrays. + + """) + + +############################################################################## +# +# ufunc attributes +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', ('identity', + """ + The identity value. + + Data attribute containing the identity element for the ufunc, if it has one. + If it does not, the attribute value is None. + + Examples + -------- + >>> np.add.identity + 0 + >>> np.multiply.identity + 1 + >>> np.power.identity + 1 + >>> print(np.exp.identity) + None + """)) + +add_newdoc('numpy.core', 'ufunc', ('nargs', + """ + The number of arguments. + + Data attribute containing the number of arguments the ufunc takes, including + optional ones. + + Notes + ----- + Typically this value will be one more than what you might expect because all + ufuncs take the optional "out" argument. + + Examples + -------- + >>> np.add.nargs + 3 + >>> np.multiply.nargs + 3 + >>> np.power.nargs + 3 + >>> np.exp.nargs + 2 + """)) + +add_newdoc('numpy.core', 'ufunc', ('nin', + """ + The number of inputs. + + Data attribute containing the number of arguments the ufunc treats as input. + + Examples + -------- + >>> np.add.nin + 2 + >>> np.multiply.nin + 2 + >>> np.power.nin + 2 + >>> np.exp.nin + 1 + """)) + +add_newdoc('numpy.core', 'ufunc', ('nout', + """ + The number of outputs. + + Data attribute containing the number of arguments the ufunc treats as output. + + Notes + ----- + Since all ufuncs can take output arguments, this will always be (at least) 1. + + Examples + -------- + >>> np.add.nout + 1 + >>> np.multiply.nout + 1 + >>> np.power.nout + 1 + >>> np.exp.nout + 1 + + """)) + +add_newdoc('numpy.core', 'ufunc', ('ntypes', + """ + The number of types. + + The number of numerical NumPy types - of which there are 18 total - on which + the ufunc can operate. + + See Also + -------- + numpy.ufunc.types + + Examples + -------- + >>> np.add.ntypes + 18 + >>> np.multiply.ntypes + 18 + >>> np.power.ntypes + 17 + >>> np.exp.ntypes + 7 + >>> np.remainder.ntypes + 14 + + """)) + +add_newdoc('numpy.core', 'ufunc', ('types', + """ + Returns a list with types grouped input->output. + + Data attribute listing the data-type "Domain-Range" groupings the ufunc can + deliver. The data-types are given using the character codes. + + See Also + -------- + numpy.ufunc.ntypes + + Examples + -------- + >>> np.add.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', + 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', + 'GG->G', 'OO->O'] + + >>> np.multiply.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', + 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', + 'GG->G', 'OO->O'] + + >>> np.power.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', + 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', + 'OO->O'] + + >>> np.exp.types + ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + + >>> np.remainder.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', + 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] + + """)) + +add_newdoc('numpy.core', 'ufunc', ('signature', + """ + Definition of the core elements a generalized ufunc operates on. + + The signature determines how the dimensions of each input/output array + are split into core and loop dimensions: + + 1. Each dimension in the signature is matched to a dimension of the + corresponding passed-in array, starting from the end of the shape tuple. + 2. Core dimensions assigned to the same label in the signature must have + exactly matching sizes, no broadcasting is performed. + 3. The core dimensions are removed from all inputs and the remaining + dimensions are broadcast together, defining the loop dimensions. + + Notes + ----- + Generalized ufuncs are used internally in many linalg functions, and in + the testing suite; the examples below are taken from these. + For ufuncs that operate on scalars, the signature is None, which is + equivalent to '()' for every argument. + + Examples + -------- + >>> np.core.umath_tests.matrix_multiply.signature + '(m,n),(n,p)->(m,p)' + >>> np.linalg._umath_linalg.det.signature + '(m,m)->()' + >>> np.add.signature is None + True # equivalent to '(),()->()' + """)) + +############################################################################## +# +# ufunc methods +# +############################################################################## + +add_newdoc('numpy.core', 'ufunc', ('reduce', + """ + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) + + Reduces `array`'s dimension by one, by applying ufunc along one axis. + + Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then + :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = + the result of iterating `j` over :math:`range(N_i)`, cumulatively applying + ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. + For a one-dimensional array, reduce produces results equivalent to: + :: + + r = op.identity # op = ufunc + for i in range(len(A)): + r = op(r, A[i]) + return r + + For example, add.reduce() is equivalent to sum(). + + Parameters + ---------- + array : array_like + The array to act on. + axis : None or int or tuple of ints, optional + Axis or axes along which a reduction is performed. + The default (`axis` = 0) is perform a reduction over the first + dimension of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is None, a reduction is performed over all the axes. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + + For operations which are either not commutative or not associative, + doing a reduction over multiple axes is not well-defined. The + ufuncs do not currently raise an exception in this case, but will + likely do so in the future. + dtype : data-type code, optional + The type used to represent the intermediate results. Defaults + to the data-type of the output array if this is provided, or + the data-type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + A location into which the result is stored. If not provided or None, + a freshly-allocated array is returned. For consistency with + ``ufunc.__call__``, if given as a keyword, this may be wrapped in a + 1-element tuple. + + .. versionchanged:: 1.13.0 + Tuples are allowed for keyword argument. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `array`. + + .. versionadded:: 1.7.0 + initial : scalar, optional + The value with which to start the reduction. + If the ufunc has no identity or the dtype is object, this defaults + to None - otherwise it defaults to ufunc.identity. + If ``None`` is given, the first element of the reduction is used, + and an error is thrown if the reduction is empty. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `array`, and selects elements to include in the reduction. Note + that for ufuncs like ``minimum`` that do not have an identity + defined, one has to pass in also ``initial``. + + .. versionadded:: 1.17.0 + + Returns + ------- + r : ndarray + The reduced array. If `out` was supplied, `r` is a reference to it. + + Examples + -------- + >>> np.multiply.reduce([2,3,5]) + 30 + + A multi-dimensional array example: + + >>> X = np.arange(8).reshape((2,2,2)) + >>> X + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.add.reduce(X, 0) + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X) # confirm: default axis value is 0 + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X, 1) + array([[ 2, 4], + [10, 12]]) + >>> np.add.reduce(X, 2) + array([[ 1, 5], + [ 9, 13]]) + + You can use the ``initial`` keyword argument to initialize the reduction + with a different value, and ``where`` to select specific elements to include: + + >>> np.add.reduce([10], initial=5) + 15 + >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) + array([14., 14.]) + >>> a = np.array([10., np.nan, 10]) + >>> np.add.reduce(a, where=~np.isnan(a)) + 20.0 + + Allows reductions of empty arrays where they would normally fail, i.e. + for ufuncs without an identity. + + >>> np.minimum.reduce([], initial=np.inf) + inf + >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False]) + array([ 1., 10.]) + >>> np.minimum.reduce([]) + Traceback (most recent call last): + ... + ValueError: zero-size array to reduction operation minimum which has no identity + """)) + +add_newdoc('numpy.core', 'ufunc', ('accumulate', + """ + accumulate(array, axis=0, dtype=None, out=None) + + Accumulate the result of applying the operator to all elements. + + For a one-dimensional array, accumulate produces results equivalent to:: + + r = np.empty(len(A)) + t = op.identity # op = the ufunc being applied to A's elements + for i in range(len(A)): + t = op(t, A[i]) + r[i] = t + return r + + For example, add.accumulate() is equivalent to np.cumsum(). + + For a multi-dimensional array, accumulate is applied along only one + axis (axis zero by default; see Examples below) so repeated use is + necessary if one wants to accumulate over multiple axes. + + Parameters + ---------- + array : array_like + The array to act on. + axis : int, optional + The axis along which to apply the accumulation; default is zero. + dtype : data-type code, optional + The data-type used to represent the intermediate results. Defaults + to the data-type of the output array if such is provided, or the + the data-type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + A location into which the result is stored. If not provided or None, + a freshly-allocated array is returned. For consistency with + ``ufunc.__call__``, if given as a keyword, this may be wrapped in a + 1-element tuple. + + .. versionchanged:: 1.13.0 + Tuples are allowed for keyword argument. + + Returns + ------- + r : ndarray + The accumulated values. If `out` was supplied, `r` is a reference to + `out`. + + Examples + -------- + 1-D array examples: + + >>> np.add.accumulate([2, 3, 5]) + array([ 2, 5, 10]) + >>> np.multiply.accumulate([2, 3, 5]) + array([ 2, 6, 30]) + + 2-D array examples: + + >>> I = np.eye(2) + >>> I + array([[1., 0.], + [0., 1.]]) + + Accumulate along axis 0 (rows), down columns: + + >>> np.add.accumulate(I, 0) + array([[1., 0.], + [1., 1.]]) + >>> np.add.accumulate(I) # no axis specified = axis zero + array([[1., 0.], + [1., 1.]]) + + Accumulate along axis 1 (columns), through rows: + + >>> np.add.accumulate(I, 1) + array([[1., 1.], + [0., 1.]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('reduceat', + """ + reduceat(array, indices, axis=0, dtype=None, out=None) + + Performs a (local) reduce with specified slices over a single axis. + + For i in ``range(len(indices))``, `reduceat` computes + ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th + generalized "row" parallel to `axis` in the final result (i.e., in a + 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if + `axis = 1`, it becomes the i-th column). There are three exceptions to this: + + * when ``i = len(indices) - 1`` (so for the last index), + ``indices[i+1] = array.shape[axis]``. + * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is + simply ``array[indices[i]]``. + * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised. + + The shape of the output depends on the size of `indices`, and may be + larger than `array` (this happens if ``len(indices) > array.shape[axis]``). + + Parameters + ---------- + array : array_like + The array to act on. + indices : array_like + Paired indices, comma separated (not colon), specifying slices to + reduce. + axis : int, optional + The axis along which to apply the reduceat. + dtype : data-type code, optional + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + A location into which the result is stored. If not provided or None, + a freshly-allocated array is returned. For consistency with + ``ufunc.__call__``, if given as a keyword, this may be wrapped in a + 1-element tuple. + + .. versionchanged:: 1.13.0 + Tuples are allowed for keyword argument. + + Returns + ------- + r : ndarray + The reduced values. If `out` was supplied, `r` is a reference to + `out`. + + Notes + ----- + A descriptive example: + + If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as + ``ufunc.reduceat(array, indices)[::2]`` where `indices` is + ``range(len(array) - 1)`` with a zero placed + in every other element: + ``indices = zeros(2 * len(array) - 1)``, + ``indices[1::2] = range(1, len(array))``. + + Don't be fooled by this attribute's name: `reduceat(array)` is not + necessarily smaller than `array`. + + Examples + -------- + To take the running sum of four successive values: + + >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] + array([ 6, 10, 14, 18]) + + A 2-D example: + + >>> x = np.linspace(0, 15, 16).reshape(4,4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + + :: + + # reduce such that the result has the following five rows: + # [row1 + row2 + row3] + # [row4] + # [row2] + # [row3] + # [row1 + row2 + row3 + row4] + + >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) + array([[12., 15., 18., 21.], + [12., 13., 14., 15.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [24., 28., 32., 36.]]) + + :: + + # reduce such that result has the following two columns: + # [col1 * col2 * col3, col4] + + >>> np.multiply.reduceat(x, [0, 3], 1) + array([[ 0., 3.], + [ 120., 7.], + [ 720., 11.], + [2184., 15.]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('outer', + r""" + outer(A, B, /, **kwargs) + + Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. + + Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of + ``op.outer(A, B)`` is an array of dimension M + N such that: + + .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = + op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) + + For `A` and `B` one-dimensional, this is equivalent to:: + + r = empty(len(A),len(B)) + for i in range(len(A)): + for j in range(len(B)): + r[i,j] = op(A[i], B[j]) # op = ufunc in question + + Parameters + ---------- + A : array_like + First array + B : array_like + Second array + kwargs : any + Arguments to pass on to the ufunc. Typically `dtype` or `out`. + See `ufunc` for a comprehensive overview of all available arguments. + + Returns + ------- + r : ndarray + Output array + + See Also + -------- + numpy.outer : A less powerful version of ``np.multiply.outer`` + that `ravel`\ s all inputs to 1D. This exists + primarily for compatibility with old code. + + tensordot : ``np.tensordot(a, b, axes=((), ()))`` and + ``np.multiply.outer(a, b)`` behave same for all + dimensions of a and b. + + Examples + -------- + >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) + array([[ 4, 5, 6], + [ 8, 10, 12], + [12, 15, 18]]) + + A multi-dimensional example: + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> A.shape + (2, 3) + >>> B = np.array([[1, 2, 3, 4]]) + >>> B.shape + (1, 4) + >>> C = np.multiply.outer(A, B) + >>> C.shape; C + (2, 3, 1, 4) + array([[[[ 1, 2, 3, 4]], + [[ 2, 4, 6, 8]], + [[ 3, 6, 9, 12]]], + [[[ 4, 8, 12, 16]], + [[ 5, 10, 15, 20]], + [[ 6, 12, 18, 24]]]]) + + """)) + +add_newdoc('numpy.core', 'ufunc', ('at', + """ + at(a, indices, b=None, /) + + Performs unbuffered in place operation on operand 'a' for elements + specified by 'indices'. For addition ufunc, this method is equivalent to + ``a[indices] += b``, except that results are accumulated for elements that + are indexed more than once. For example, ``a[[0,0]] += 1`` will only + increment the first element once because of buffering, whereas + ``add.at(a, [0,0], 1)`` will increment the first element twice. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + The array to perform in place operation on. + indices : array_like or tuple + Array like index object or slice object for indexing into first + operand. If first operand has multiple dimensions, indices can be a + tuple of array like index objects or slice objects. + b : array_like + Second operand for ufuncs requiring two operands. Operand must be + broadcastable over first operand after indexing or slicing. + + Examples + -------- + Set items 0 and 1 to their negative values: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.negative.at(a, [0, 1]) + >>> a + array([-1, -2, 3, 4]) + + Increment items 0 and 1, and increment item 2 twice: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.add.at(a, [0, 1, 2, 2], 1) + >>> a + array([2, 3, 5, 4]) + + Add items 0 and 1 in first array to second array, + and store results in first array: + + >>> a = np.array([1, 2, 3, 4]) + >>> b = np.array([1, 2]) + >>> np.add.at(a, [0, 1], b) + >>> a + array([2, 4, 3, 4]) + + """)) + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', + """ + dtype(dtype, align=False, copy=False) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + dtype + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. If a struct dtype is being created, + this also sets a sticky alignment flag ``isalignedstruct``. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + + See also + -------- + result_type + + Examples + -------- + Using array-scalar type: + + >>> np.dtype(np.int16) + dtype('int16') + + Structured type, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) + dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '>> np.dtype("i4, (2,3)f8") + dtype([('f0', '>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) + dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) + + Using dictionaries. Two fields named 'gender' and 'age': + + >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', 'S1'), ('age', 'u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', 'S25'), ('age', 'u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', + """ + The required alignment (bytes) of this data-type according to the compiler. + + More information is available in the C-API section of the manual. + + Examples + -------- + + >>> x = np.dtype('i4') + >>> x.alignment + 4 + + >>> x = np.dtype(float) + >>> x.alignment + 8 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', + """ + A character indicating the byte-order of this data-type object. + + One of: + + === ============== + '=' native + '<' little-endian + '>' big-endian + '|' not applicable + === ============== + + All built-in data-type objects have byteorder either '=' or '|'. + + Examples + -------- + + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('char', + """A unique character code for each of the 21 different built-in types. + + Examples + -------- + + >>> x = np.dtype(float) + >>> x.char + 'd' + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('descr', + """ + `__array_interface__` description of the data-type. + + The format is that required by the 'descr' key in the + `__array_interface__` attribute. + + Warning: This attribute exists specifically for `__array_interface__`, + and passing it directly to `np.dtype` will not accurately reconstruct + some dtypes (e.g., scalar and subarray dtypes). + + Examples + -------- + + >>> x = np.dtype(float) + >>> x.descr + [('', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.descr + [('name', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> print(dt.fields) + {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('flags', + """ + Bit-flags describing how this data type is to be interpreted. + + Bit-masks are in `numpy.core.multiarray` as the constants + `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, + `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation + of these flags is in C-API documentation; they are largely useful + for user-defined data-types. + + The following example demonstrates that operations on this particular + dtype requires Python C-API. + + Examples + -------- + + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.flags + 16 + >>> np.core.multiarray.NEEDS_PYAPI + 16 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', + """ + Boolean indicating whether this dtype contains any reference-counted + objects in any fields or sub-dtypes. + + Recall that what is actually in the ndarray memory representing + the Python object is the memory address of that object (a pointer). + Special handling may be required, and this attribute is useful for + distinguishing data types that may contain arbitrary Python objects + and data-types that won't. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', + """ + Integer indicating how this dtype relates to the built-in dtypes. + + Read-only. + + = ======================================================================== + 0 if this is a structured array type, with fields + 1 if this is a dtype compiled into numpy (such as ints, floats etc) + 2 if the dtype is for a user-defined numpy type + A user-defined type uses the numpy C-API machinery to extend + numpy to handle a new array type. See + :ref:`user.user-defined-data-types` in the NumPy manual. + = ======================================================================== + + Examples + -------- + >>> dt = np.dtype('i2') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype('f8') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.isbuiltin + 0 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', + """ + Boolean indicating whether the byte order of this dtype is native + to the platform. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', + """ + Boolean indicating whether the dtype is a struct which maintains + field alignment. This flag is sticky, so when combining multiple + structs together, it is preserved and produces new dtypes which + are also aligned. + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', + """ + The element size of this data-type object. + + For 18 of the 21 types this number is fixed by the data-type. + For the flexible data-types, this number can be anything. + + Examples + -------- + + >>> arr = np.array([[1, 2], [3, 4]]) + >>> arr.dtype + dtype('int64') + >>> arr.itemsize + 8 + + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.itemsize + 80 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('kind', + """ + A character code (one of 'biufcmMOSUV') identifying the general kind of data. + + = ====================== + b boolean + i signed integer + u unsigned integer + f floating-point + c complex floating-point + m timedelta + M datetime + O object + S (byte-)string + U Unicode + V void + = ====================== + + Examples + -------- + + >>> dt = np.dtype('i4') + >>> dt.kind + 'i' + >>> dt = np.dtype('f8') + >>> dt.kind + 'f' + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.kind + 'V' + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('metadata', + """ + Either ``None`` or a readonly dictionary of metadata (mappingproxy). + + The metadata field can be set using any dictionary at data-type + creation. NumPy currently has no uniform approach to propagating + metadata; although some array operations preserve it, there is no + guarantee that others will. + + .. warning:: + + Although used in certain projects, this feature was long undocumented + and is not well supported. Some aspects of metadata propagation + are expected to change in the future. + + Examples + -------- + + >>> dt = np.dtype(float, metadata={"key": "value"}) + >>> dt.metadata["key"] + 'value' + >>> arr = np.array([1, 2, 3], dtype=dt) + >>> arr.dtype.metadata + mappingproxy({'key': 'value'}) + + Adding arrays with identical datatypes currently preserves the metadata: + + >>> (arr + arr).dtype.metadata + mappingproxy({'key': 'value'}) + + But if the arrays have different dtype metadata, the metadata may be + dropped: + + >>> dt2 = np.dtype(float, metadata={"key2": "value2"}) + >>> arr2 = np.array([3, 2, 1], dtype=dt2) + >>> (arr + arr2).dtype.metadata is None + True # The metadata field is cleared so None is returned + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('name', + """ + A bit-width name for this data-type. + + Un-sized flexible data-type objects do not have this attribute. + + Examples + -------- + + >>> x = np.dtype(float) + >>> x.name + 'float64' + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.name + 'void640' + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('names', + """ + Ordered list of field names, or ``None`` if there are no fields. + + The names are ordered according to increasing byte offset. This can be + used, for example, to walk through all of the named fields in offset order. + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.names + ('name', 'grades') + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('num', + """ + A unique number for each of the 21 different built-in types. + + These are roughly ordered from least-to-most precision. + + Examples + -------- + + >>> dt = np.dtype(str) + >>> dt.num + 19 + + >>> dt = np.dtype(float) + >>> dt.num + 12 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('shape', + """ + Shape tuple of the sub-array if this data type describes a sub-array, + and ``()`` otherwise. + + Examples + -------- + + >>> dt = np.dtype(('i4', 4)) + >>> dt.shape + (4,) + + >>> dt = np.dtype(('i4', (2, 3))) + >>> dt.shape + (2, 3) + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('ndim', + """ + Number of dimensions of the sub-array if this data type describes a + sub-array, and ``0`` otherwise. + + .. versionadded:: 1.13.0 + + Examples + -------- + >>> x = np.dtype(float) + >>> x.ndim + 0 + + >>> x = np.dtype((float, 8)) + >>> x.ndim + 1 + + >>> x = np.dtype(('i4', (3, 4))) + >>> x.ndim + 2 + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('str', + """The array-protocol typestring of this data-type object.""")) + +add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', + """ + Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and + None otherwise. + + The *shape* is the fixed shape of the sub-array described by this + data type, and *item_dtype* the data type of the array. + + If a field whose dtype object has this attribute is retrieved, + then the extra dimensions implied by *shape* are tacked on to + the end of the retrieved array. + + See Also + -------- + dtype.base + + Examples + -------- + >>> x = numpy.dtype('8f') + >>> x.subdtype + (dtype('float32'), (8,)) + + >>> x = numpy.dtype('i2') + >>> x.subdtype + >>> + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('base', + """ + Returns dtype for the base element of the subarrays, + regardless of their dimension or shape. + + See Also + -------- + dtype.subdtype + + Examples + -------- + >>> x = numpy.dtype('8f') + >>> x.base + dtype('float32') + + >>> x = numpy.dtype('i2') + >>> x.base + dtype('int16') + + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('type', + """The type object used to instantiate a scalar of this data-type.""")) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', + """ + newbyteorder(new_order='S', /) + + Return a new dtype with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + below. The default value ('S') results in swapping the current + byte order. `new_order` codes can be any of: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'little'} - little endian + * {'>', 'big'} - big endian + * '=' - native order + * {'|', 'I'} - ignore (no change to byte order) + + Returns + ------- + new_dtype : dtype + New dtype object with the given change to the byte order. + + Notes + ----- + Changes are also made in all fields and sub-arrays of the data type. + + Examples + -------- + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> native_dt = np.dtype(native_code+'i2') + >>> swapped_dt = np.dtype(swapped_code+'i2') + >>> native_dt.newbyteorder('S') == swapped_dt + True + >>> native_dt.newbyteorder() == swapped_dt + True + >>> native_dt == swapped_dt.newbyteorder('S') + True + >>> native_dt == swapped_dt.newbyteorder('=') + True + >>> native_dt == swapped_dt.newbyteorder('N') + True + >>> native_dt == native_dt.newbyteorder('|') + True + >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('B') + True + + """)) + + +############################################################################## +# +# Datetime-related Methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', + """ + busdaycalendar(weekmask='1111100', holidays=None) + + A business day calendar object that efficiently stores information + defining valid days for the busday family of functions. + + The default valid days are Monday through Friday ("business days"). + A busdaycalendar object can be specified with any set of weekly + valid days, plus an optional "holiday" dates that always will be invalid. + + Once a busdaycalendar object is created, the weekmask and holidays + cannot be modified. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates, no matter which + weekday they fall upon. Holiday dates may be specified in any + order, and NaT (not-a-time) dates are ignored. This list is + saved in a normalized form that is suited for fast calculations + of valid days. + + Returns + ------- + out : busdaycalendar + A business day calendar object containing the specified + weekmask and holidays values. + + See Also + -------- + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Attributes + ---------- + Note: once a busdaycalendar object is created, you cannot modify the + weekmask or holidays. The attributes return copies of internal data. + weekmask : (copy) seven-element array of bool + holidays : (copy) sorted array of datetime64[D] + + Examples + -------- + >>> # Some important days in July + ... bdd = np.busdaycalendar( + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + >>> # Default is Monday to Friday weekdays + ... bdd.weekmask + array([ True, True, True, True, True, False, False]) + >>> # Any holidays already on the weekend are removed + ... bdd.holidays + array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') + """) + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', + """A copy of the seven-element boolean mask indicating valid days.""")) + +add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', + """A copy of the holiday array indicating additional invalid days.""")) + +add_newdoc('numpy.core.multiarray', 'normalize_axis_index', + """ + normalize_axis_index(axis, ndim, msg_prefix=None) + + Normalizes an axis index, `axis`, such that is a valid positive index into + the shape of array with `ndim` dimensions. Raises an AxisError with an + appropriate message if this is not possible. + + Used internally by all axis-checking logic. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + axis : int + The un-normalized index of the axis. Can be negative + ndim : int + The number of dimensions of the array that `axis` should be normalized + against + msg_prefix : str + A prefix to put before the message, typically the name of the argument + + Returns + ------- + normalized_axis : int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If the axis index is invalid, when `-ndim <= axis < ndim` is false. + + Examples + -------- + >>> normalize_axis_index(0, ndim=3) + 0 + >>> normalize_axis_index(1, ndim=3) + 1 + >>> normalize_axis_index(-1, ndim=3) + 2 + + >>> normalize_axis_index(3, ndim=3) + Traceback (most recent call last): + ... + AxisError: axis 3 is out of bounds for array of dimension 3 + >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') + Traceback (most recent call last): + ... + AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3 + """) + +add_newdoc('numpy.core.multiarray', 'datetime_data', + """ + datetime_data(dtype, /) + + Get information about the step size of a date or time type. + + The returned tuple can be passed as the second argument of `numpy.datetime64` and + `numpy.timedelta64`. + + Parameters + ---------- + dtype : dtype + The dtype object, which must be a `datetime64` or `timedelta64` type. + + Returns + ------- + unit : str + The :ref:`datetime unit ` on which this dtype + is based. + count : int + The number of base units in a step. + + Examples + -------- + >>> dt_25s = np.dtype('timedelta64[25s]') + >>> np.datetime_data(dt_25s) + ('s', 25) + >>> np.array(10, dt_25s).astype('timedelta64[s]') + array(250, dtype='timedelta64[s]') + + The result can be used to construct a datetime that uses the same units + as a timedelta + + >>> np.datetime64('2010', np.datetime_data(dt_25s)) + numpy.datetime64('2010-01-01T00:00:00','25s') + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'generic', + """ + Base class for numpy scalar types. + + Class from which most (all?) numpy scalar types are derived. For + consistency, exposes the same API as `ndarray`, despite many + consequent attributes being either "get-only," or completely irrelevant. + This is the class from which it is strongly suggested users should derive + custom scalar types. + + """) + +# Attributes + +def refer_to_array_attribute(attr, method=True): + docstring = """ + Scalar {} identical to the corresponding array attribute. + + Please see `ndarray.{}`. + """ + + return attr, docstring.format("method" if method else "attribute", attr) + + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('T', method=False)) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('base', method=False)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('data', + """Pointer to start of data.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', + """Get array data-descriptor.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flags', + """The integer value of flags.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flat', + """A 1-D view of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('imag', + """The imaginary part of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', + """The length of one element in bytes.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', + """The length of the scalar in bytes.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', + """The number of array dimensions.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('real', + """The real part of the scalar.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('shape', + """Tuple of array dimensions.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('size', + """The number of elements in the gentype.""")) + +add_newdoc('numpy.core.numerictypes', 'generic', ('strides', + """Tuple of bytes steps in each dimension.""")) + +# Methods + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('all')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('any')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('argmax')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('argmin')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('argsort')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('astype')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('byteswap')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('choose')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('clip')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('compress')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('conjugate')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('copy')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('cumprod')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('cumsum')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('diagonal')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('dump')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('dumps')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('fill')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('flatten')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('getfield')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('item')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('itemset')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('max')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('mean')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('min')) + +add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', + """ + newbyteorder(new_order='S', /) + + Return a new `dtype` with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + The `new_order` code can be any from the following: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'little'} - little endian + * {'>', 'big'} - big endian + * '=' - native order + * {'|', 'I'} - ignore (no change to byte order) + + Parameters + ---------- + new_order : str, optional + Byte order to force; a value from the byte order specifications + above. The default value ('S') results in swapping the current + byte order. + + + Returns + ------- + new_dtype : dtype + New `dtype` object with the given change to the byte order. + + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('nonzero')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('prod')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('ptp')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('put')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('ravel')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('repeat')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('reshape')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('resize')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('round')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('searchsorted')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('setfield')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('setflags')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('sort')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('squeeze')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('std')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('sum')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('swapaxes')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('take')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('tofile')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('tolist')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('tostring')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('trace')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('transpose')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('var')) + +add_newdoc('numpy.core.numerictypes', 'generic', + refer_to_array_attribute('view')) + + +############################################################################## +# +# Documentation for scalar type abstract base classes in type hierarchy +# +############################################################################## + + +add_newdoc('numpy.core.numerictypes', 'number', + """ + Abstract base class of all numeric scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'integer', + """ + Abstract base class of all integer scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'signedinteger', + """ + Abstract base class of all signed integer scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'unsignedinteger', + """ + Abstract base class of all unsigned integer scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'inexact', + """ + Abstract base class of all numeric scalar types with a (potentially) + inexact representation of the values in its range, such as + floating-point numbers. + + """) + +add_newdoc('numpy.core.numerictypes', 'floating', + """ + Abstract base class of all floating-point scalar types. + + """) + +add_newdoc('numpy.core.numerictypes', 'complexfloating', + """ + Abstract base class of all complex number scalar types that are made up of + floating-point numbers. + + """) + +add_newdoc('numpy.core.numerictypes', 'flexible', + """ + Abstract base class of all scalar types without predefined length. + The actual size of these types depends on the specific `np.dtype` + instantiation. + + """) + +add_newdoc('numpy.core.numerictypes', 'character', + """ + Abstract base class of all character string scalar types. + + """) diff --git a/MLPY/Lib/site-packages/numpy/core/_add_newdocs_scalars.py b/MLPY/Lib/site-packages/numpy/core/_add_newdocs_scalars.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e00e762e876400f135c87beb4e1054455e0792 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_add_newdocs_scalars.py @@ -0,0 +1,259 @@ +""" +This file is separate from ``_add_newdocs.py`` so that it can be mocked out by +our sphinx ``conf.py`` during doc builds, where we want to avoid showing +platform-dependent information. +""" +from numpy.core import dtype +from numpy.core import numerictypes as _numerictypes +from numpy.core.function_base import add_newdoc +import platform + +############################################################################## +# +# Documentation for concrete scalar classes +# +############################################################################## + +def numeric_type_aliases(aliases): + def type_aliases_gen(): + for alias, doc in aliases: + try: + alias_type = getattr(_numerictypes, alias) + except AttributeError: + # The set of aliases that actually exist varies between platforms + pass + else: + yield (alias_type, alias, doc) + return list(type_aliases_gen()) + + +possible_aliases = numeric_type_aliases([ + ('int8', '8-bit signed integer (``-128`` to ``127``)'), + ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'), + ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'), + ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'), + ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), + ('uint8', '8-bit unsigned integer (``0`` to ``255``)'), + ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'), + ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'), + ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'), + ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), + ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), + ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), + ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), + ('float96', '96-bit extended-precision floating-point number type'), + ('float128', '128-bit extended-precision floating-point number type'), + ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), + ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), + ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), + ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), + ]) + + + + +def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): + # note: `:field: value` is rST syntax which renders as field lists. + o = getattr(_numerictypes, obj) + + character_code = dtype(o).char + canonical_name_doc = "" if obj == o.__name__ else ":Canonical name: `numpy.{}`\n ".format(obj) + alias_doc = ''.join(":Alias: `numpy.{}`\n ".format(alias) for alias in fixed_aliases) + alias_doc += ''.join(":Alias on this platform ({} {}): `numpy.{}`: {}.\n ".format(platform.system(), platform.machine(), alias, doc) + for (alias_type, alias, doc) in possible_aliases if alias_type is o) + docstring = """ + {doc} + + :Character code: ``'{character_code}'`` + {canonical_name_doc}{alias_doc} + """.format(doc=doc.strip(), character_code=character_code, + canonical_name_doc=canonical_name_doc, alias_doc=alias_doc) + + add_newdoc('numpy.core.numerictypes', obj, docstring) + + +add_newdoc_for_scalar_type('bool_', ['bool8'], + """ + Boolean type (True or False), stored as a byte. + + .. warning:: + + The :class:`bool_` type is not a subclass of the :class:`int_` type + (the :class:`bool_` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. + """) + +add_newdoc_for_scalar_type('byte', [], + """ + Signed integer type, compatible with C ``char``. + """) + +add_newdoc_for_scalar_type('short', [], + """ + Signed integer type, compatible with C ``short``. + """) + +add_newdoc_for_scalar_type('intc', [], + """ + Signed integer type, compatible with C ``int``. + """) + +add_newdoc_for_scalar_type('int_', [], + """ + Signed integer type, compatible with Python `int` and C ``long``. + """) + +add_newdoc_for_scalar_type('longlong', [], + """ + Signed integer type, compatible with C ``long long``. + """) + +add_newdoc_for_scalar_type('ubyte', [], + """ + Unsigned integer type, compatible with C ``unsigned char``. + """) + +add_newdoc_for_scalar_type('ushort', [], + """ + Unsigned integer type, compatible with C ``unsigned short``. + """) + +add_newdoc_for_scalar_type('uintc', [], + """ + Unsigned integer type, compatible with C ``unsigned int``. + """) + +add_newdoc_for_scalar_type('uint', [], + """ + Unsigned integer type, compatible with C ``unsigned long``. + """) + +add_newdoc_for_scalar_type('ulonglong', [], + """ + Signed integer type, compatible with C ``unsigned long long``. + """) + +add_newdoc_for_scalar_type('half', [], + """ + Half-precision floating-point number type. + """) + +add_newdoc_for_scalar_type('single', [], + """ + Single-precision floating-point number type, compatible with C ``float``. + """) + +add_newdoc_for_scalar_type('double', ['float_'], + """ + Double-precision floating-point number type, compatible with Python `float` + and C ``double``. + """) + +add_newdoc_for_scalar_type('longdouble', ['longfloat'], + """ + Extended-precision floating-point number type, compatible with C + ``long double`` but not necessarily with IEEE 754 quadruple-precision. + """) + +add_newdoc_for_scalar_type('csingle', ['singlecomplex'], + """ + Complex number type composed of two single-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'], + """ + Complex number type composed of two double-precision floating-point + numbers, compatible with Python `complex`. + """) + +add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'], + """ + Complex number type composed of two extended-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('object_', [], + """ + Any Python object. + """) + +add_newdoc_for_scalar_type('str_', ['unicode_'], + r""" + A unicode string. + + When used in arrays, this type strips trailing null codepoints. + + Unlike the builtin `str`, this supports the :ref:`python:bufferobjects`, exposing its + contents as UCS4: + + >>> m = memoryview(np.str_("abc")) + >>> m.format + '3w' + >>> m.tobytes() + b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' + """) + +add_newdoc_for_scalar_type('bytes_', ['string_'], + r""" + A byte string. + + When used in arrays, this type strips trailing null bytes. + """) + +add_newdoc_for_scalar_type('void', [], + r""" + Either an opaque sequence of bytes, or a structure. + + >>> np.void(b'abcd') + void(b'\x61\x62\x63\x64') + + Structured `void` scalars can only be constructed via extraction from :ref:`structured_arrays`: + + >>> arr = np.array((1, 2), dtype=[('x', np.int8), ('y', np.int8)]) + >>> arr[()] + (1, 2) # looks like a tuple, but is `np.void` + """) + +add_newdoc_for_scalar_type('datetime64', [], + """ + If created from a 64-bit integer, it represents an offset from + ``1970-01-01T00:00:00``. + If created from string, the string can be in ISO 8601 date + or datetime format. + + >>> np.datetime64(10, 'Y') + numpy.datetime64('1980') + >>> np.datetime64('1980', 'Y') + numpy.datetime64('1980') + >>> np.datetime64(10, 'D') + numpy.datetime64('1970-01-11') + + See :ref:`arrays.datetime` for more information. + """) + +add_newdoc_for_scalar_type('timedelta64', [], + """ + A timedelta stored as a 64-bit integer. + + See :ref:`arrays.datetime` for more information. + """) + +# TODO: work out how to put this on the base class, np.floating +for float_name in ('half', 'single', 'double', 'longdouble'): + add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio', + """ + {ftype}.as_integer_ratio() -> (int, int) + + Return a pair of integers, whose ratio is exactly equal to the original + floating point number, and with a positive denominator. + Raise `OverflowError` on infinities and a `ValueError` on NaNs. + + >>> np.{ftype}(10.0).as_integer_ratio() + (10, 1) + >>> np.{ftype}(0.0).as_integer_ratio() + (0, 1) + >>> np.{ftype}(-.25).as_integer_ratio() + (-1, 4) + """.format(ftype=float_name))) diff --git a/MLPY/Lib/site-packages/numpy/core/_asarray.py b/MLPY/Lib/site-packages/numpy/core/_asarray.py new file mode 100644 index 0000000000000000000000000000000000000000..0d34f368635f10f182c5171986a67f9d9c356cce --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_asarray.py @@ -0,0 +1,140 @@ +""" +Functions in the ``as*array`` family that promote array-likes into arrays. + +`require` fits this category despite its name not matching this pattern. +""" +from .overrides import ( + array_function_dispatch, + set_array_function_like_doc, + set_module, +) +from .multiarray import array, asanyarray + + +__all__ = ["require"] + + + +def _require_dispatcher(a, dtype=None, requirements=None, *, like=None): + return (like,) + + +@set_array_function_like_doc +@set_module('numpy') +def require(a, dtype=None, requirements=None, *, like=None): + """ + Return an ndarray of the provided type that satisfies requirements. + + This function is useful to be sure that an array with the correct flags + is returned for passing to compiled code (perhaps through ctypes). + + Parameters + ---------- + a : array_like + The object to be converted to a type-and-requirement-satisfying array. + dtype : data-type + The required data-type. If None preserve the current dtype. If your + application requires the data to be in native byteorder, include + a byteorder specification as a part of the dtype specification. + requirements : str or list of str + The requirements list can be any of the following + + * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array + * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array + * 'ALIGNED' ('A') - ensure a data-type aligned array + * 'WRITEABLE' ('W') - ensure a writable array + * 'OWNDATA' ('O') - ensure an array that owns its own data + * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array with specified requirements and type if given. + + See Also + -------- + asarray : Convert input to an ndarray. + asanyarray : Convert to an ndarray, but pass through ndarray subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + ndarray.flags : Information about the memory layout of the array. + + Notes + ----- + The returned array will be guaranteed to have the listed requirements + by making a copy if needed. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + + >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) + >>> y.flags + C_CONTIGUOUS : False + F_CONTIGUOUS : True + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + + """ + if like is not None: + return _require_with_like( + a, + dtype=dtype, + requirements=requirements, + like=like, + ) + + possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', + 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', + 'A': 'A', 'ALIGNED': 'A', + 'W': 'W', 'WRITEABLE': 'W', + 'O': 'O', 'OWNDATA': 'O', + 'E': 'E', 'ENSUREARRAY': 'E'} + if not requirements: + return asanyarray(a, dtype=dtype) + else: + requirements = {possible_flags[x.upper()] for x in requirements} + + if 'E' in requirements: + requirements.remove('E') + subok = False + else: + subok = True + + order = 'A' + if requirements >= {'C', 'F'}: + raise ValueError('Cannot specify both "C" and "F" order') + elif 'F' in requirements: + order = 'F' + requirements.remove('F') + elif 'C' in requirements: + order = 'C' + requirements.remove('C') + + arr = array(a, dtype=dtype, order=order, copy=False, subok=subok) + + for prop in requirements: + if not arr.flags[prop]: + arr = arr.copy(order) + break + return arr + + +_require_with_like = array_function_dispatch( + _require_dispatcher +)(require) diff --git a/MLPY/Lib/site-packages/numpy/core/_asarray.pyi b/MLPY/Lib/site-packages/numpy/core/_asarray.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9d1a158301a0d52d6bde683263948f4232e5106c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_asarray.pyi @@ -0,0 +1,81 @@ +import sys +from typing import TypeVar, Union, Iterable, overload + +from numpy import ndarray, _OrderKACF +from numpy.typing import ArrayLike, DTypeLike + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +_ArrayType = TypeVar("_ArrayType", bound=ndarray) + +# TODO: The following functions are now defined in C, so should be defined +# in a (not yet existing) `multiarray.pyi`. +# (with the exception of `require`) + +def asarray( + a: object, + dtype: DTypeLike = ..., + order: _OrderKACF = ..., + *, + like: ArrayLike = ... +) -> ndarray: ... +@overload +def asanyarray( + a: _ArrayType, + dtype: None = ..., + order: _OrderKACF = ..., + *, + like: ArrayLike = ... +) -> _ArrayType: ... +@overload +def asanyarray( + a: object, + dtype: DTypeLike = ..., + order: _OrderKACF = ..., + *, + like: ArrayLike = ... +) -> ndarray: ... +def ascontiguousarray( + a: object, dtype: DTypeLike = ..., *, like: ArrayLike = ... +) -> ndarray: ... +def asfortranarray( + a: object, dtype: DTypeLike = ..., *, like: ArrayLike = ... +) -> ndarray: ... + +_Requirements = Literal[ + "C", "C_CONTIGUOUS", "CONTIGUOUS", + "F", "F_CONTIGUOUS", "FORTRAN", + "A", "ALIGNED", + "W", "WRITEABLE", + "O", "OWNDATA" +] +_E = Literal["E", "ENSUREARRAY"] +_RequirementsWithE = Union[_Requirements, _E] + +@overload +def require( + a: _ArrayType, + dtype: None = ..., + requirements: Union[None, _Requirements, Iterable[_Requirements]] = ..., + *, + like: ArrayLike = ... +) -> _ArrayType: ... +@overload +def require( + a: object, + dtype: DTypeLike = ..., + requirements: Union[_E, Iterable[_RequirementsWithE]] = ..., + *, + like: ArrayLike = ... +) -> ndarray: ... +@overload +def require( + a: object, + dtype: DTypeLike = ..., + requirements: Union[None, _Requirements, Iterable[_Requirements]] = ..., + *, + like: ArrayLike = ... +) -> ndarray: ... diff --git a/MLPY/Lib/site-packages/numpy/core/_dtype.py b/MLPY/Lib/site-packages/numpy/core/_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..c4f710d91ee3aa410c5d80987dc7ca0b19267232 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_dtype.py @@ -0,0 +1,342 @@ +""" +A place for code to be called from the implementation of np.dtype + +String handling is much easier to do correctly in python. +""" +import numpy as np + + +_kind_to_stem = { + 'u': 'uint', + 'i': 'int', + 'c': 'complex', + 'f': 'float', + 'b': 'bool', + 'V': 'void', + 'O': 'object', + 'M': 'datetime', + 'm': 'timedelta', + 'S': 'bytes', + 'U': 'str', +} + + +def _kind_name(dtype): + try: + return _kind_to_stem[dtype.kind] + except KeyError as e: + raise RuntimeError( + "internal dtype error, unknown kind {!r}" + .format(dtype.kind) + ) from None + + +def __str__(dtype): + if dtype.fields is not None: + return _struct_str(dtype, include_align=True) + elif dtype.subdtype: + return _subarray_str(dtype) + elif issubclass(dtype.type, np.flexible) or not dtype.isnative: + return dtype.str + else: + return dtype.name + + +def __repr__(dtype): + arg_str = _construction_repr(dtype, include_align=False) + if dtype.isalignedstruct: + arg_str = arg_str + ", align=True" + return "dtype({})".format(arg_str) + + +def _unpack_field(dtype, offset, title=None): + """ + Helper function to normalize the items in dtype.fields. + + Call as: + + dtype, offset, title = _unpack_field(*dtype.fields[name]) + """ + return dtype, offset, title + + +def _isunsized(dtype): + # PyDataType_ISUNSIZED + return dtype.itemsize == 0 + + +def _construction_repr(dtype, include_align=False, short=False): + """ + Creates a string repr of the dtype, excluding the 'dtype()' part + surrounding the object. This object may be a string, a list, or + a dict depending on the nature of the dtype. This + is the object passed as the first parameter to the dtype + constructor, and if no additional constructor parameters are + given, will reproduce the exact memory layout. + + Parameters + ---------- + short : bool + If true, this creates a shorter repr using 'kind' and 'itemsize', instead + of the longer type name. + + include_align : bool + If true, this includes the 'align=True' parameter + inside the struct dtype construction dict when needed. Use this flag + if you want a proper repr string without the 'dtype()' part around it. + + If false, this does not preserve the + 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for + struct arrays like the regular repr does, because the 'align' + flag is not part of first dtype constructor parameter. This + mode is intended for a full 'repr', where the 'align=True' is + provided as the second parameter. + """ + if dtype.fields is not None: + return _struct_str(dtype, include_align=include_align) + elif dtype.subdtype: + return _subarray_str(dtype) + else: + return _scalar_str(dtype, short=short) + + +def _scalar_str(dtype, short): + byteorder = _byte_order_str(dtype) + + if dtype.type == np.bool_: + if short: + return "'?'" + else: + return "'bool'" + + elif dtype.type == np.object_: + # The object reference may be different sizes on different + # platforms, so it should never include the itemsize here. + return "'O'" + + elif dtype.type == np.string_: + if _isunsized(dtype): + return "'S'" + else: + return "'S%d'" % dtype.itemsize + + elif dtype.type == np.unicode_: + if _isunsized(dtype): + return "'%sU'" % byteorder + else: + return "'%sU%d'" % (byteorder, dtype.itemsize / 4) + + # unlike the other types, subclasses of void are preserved - but + # historically the repr does not actually reveal the subclass + elif issubclass(dtype.type, np.void): + if _isunsized(dtype): + return "'V'" + else: + return "'V%d'" % dtype.itemsize + + elif dtype.type == np.datetime64: + return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype)) + + elif dtype.type == np.timedelta64: + return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype)) + + elif np.issubdtype(dtype, np.number): + # Short repr with endianness, like '' """ + # hack to obtain the native and swapped byte order characters + swapped = np.dtype(int).newbyteorder('S') + native = swapped.newbyteorder('S') + + byteorder = dtype.byteorder + if byteorder == '=': + return native.byteorder + if byteorder == 'S': + # TODO: this path can never be reached + return swapped.byteorder + elif byteorder == '|': + return '' + else: + return byteorder + + +def _datetime_metadata_str(dtype): + # TODO: this duplicates the C metastr_to_unicode functionality + unit, count = np.datetime_data(dtype) + if unit == 'generic': + return '' + elif count == 1: + return '[{}]'.format(unit) + else: + return '[{}{}]'.format(count, unit) + + +def _struct_dict_str(dtype, includealignedflag): + # unpack the fields dictionary into ls + names = dtype.names + fld_dtypes = [] + offsets = [] + titles = [] + for name in names: + fld_dtype, offset, title = _unpack_field(*dtype.fields[name]) + fld_dtypes.append(fld_dtype) + offsets.append(offset) + titles.append(title) + + # Build up a string to make the dictionary + + # First, the names + ret = "{'names':[" + ret += ",".join(repr(name) for name in names) + + # Second, the formats + ret += "], 'formats':[" + ret += ",".join( + _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) + + # Third, the offsets + ret += "], 'offsets':[" + ret += ",".join("%d" % offset for offset in offsets) + + # Fourth, the titles + if any(title is not None for title in titles): + ret += "], 'titles':[" + ret += ",".join(repr(title) for title in titles) + + # Fifth, the itemsize + ret += "], 'itemsize':%d" % dtype.itemsize + + if (includealignedflag and dtype.isalignedstruct): + # Finally, the aligned flag + ret += ", 'aligned':True}" + else: + ret += "}" + + return ret + + +def _is_packed(dtype): + """ + Checks whether the structured data type in 'dtype' + has a simple layout, where all the fields are in order, + and follow each other with no alignment padding. + + When this returns true, the dtype can be reconstructed + from a list of the field names and dtypes with no additional + dtype parameters. + + Duplicates the C `is_dtype_struct_simple_unaligned_layout` function. + """ + total_offset = 0 + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + if fld_offset != total_offset: + return False + total_offset += fld_dtype.itemsize + if total_offset != dtype.itemsize: + return False + return True + + +def _struct_list_str(dtype): + items = [] + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + item = "(" + if title is not None: + item += "({!r}, {!r}), ".format(title, name) + else: + item += "{!r}, ".format(name) + # Special case subarray handling here + if fld_dtype.subdtype is not None: + base, shape = fld_dtype.subdtype + item += "{}, {}".format( + _construction_repr(base, short=True), + shape + ) + else: + item += _construction_repr(fld_dtype, short=True) + + item += ")" + items.append(item) + + return "[" + ", ".join(items) + "]" + + +def _struct_str(dtype, include_align): + # The list str representation can't include the 'align=' flag, + # so if it is requested and the struct has the aligned flag set, + # we must use the dict str instead. + if not (include_align and dtype.isalignedstruct) and _is_packed(dtype): + sub = _struct_list_str(dtype) + + else: + sub = _struct_dict_str(dtype, include_align) + + # If the data type isn't the default, void, show it + if dtype.type != np.void: + return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub) + else: + return sub + + +def _subarray_str(dtype): + base, shape = dtype.subdtype + return "({}, {})".format( + _construction_repr(base, short=True), + shape + ) + + +def _name_includes_bit_suffix(dtype): + if dtype.type == np.object_: + # pointer size varies by system, best to omit it + return False + elif dtype.type == np.bool_: + # implied + return False + elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): + # unspecified + return False + else: + return True + + +def _name_get(dtype): + # provides dtype.name.__get__, documented as returning a "bit name" + + if dtype.isbuiltin == 2: + # user dtypes don't promise to do anything special + return dtype.type.__name__ + + if issubclass(dtype.type, np.void): + # historically, void subclasses preserve their name, eg `record64` + name = dtype.type.__name__ + else: + name = _kind_name(dtype) + + # append bit counts + if _name_includes_bit_suffix(dtype): + name += "{}".format(dtype.itemsize * 8) + + # append metadata to datetimes + if dtype.type in (np.datetime64, np.timedelta64): + name += _datetime_metadata_str(dtype) + + return name diff --git a/MLPY/Lib/site-packages/numpy/core/_dtype_ctypes.py b/MLPY/Lib/site-packages/numpy/core/_dtype_ctypes.py new file mode 100644 index 0000000000000000000000000000000000000000..bd51fd766134d8286afb142d68f746a1888f7768 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_dtype_ctypes.py @@ -0,0 +1,117 @@ +""" +Conversion from ctypes to dtype. + +In an ideal world, we could achieve this through the PEP3118 buffer protocol, +something like:: + + def dtype_from_ctypes_type(t): + # needed to ensure that the shape of `t` is within memoryview.format + class DummyStruct(ctypes.Structure): + _fields_ = [('a', t)] + + # empty to avoid memory allocation + ctype_0 = (DummyStruct * 0)() + mv = memoryview(ctype_0) + + # convert the struct, and slice back out the field + return _dtype_from_pep3118(mv.format)['a'] + +Unfortunately, this fails because: + +* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782) +* PEP3118 cannot represent unions, but both numpy and ctypes can +* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) +""" + +# We delay-import ctypes for distributions that do not include it. +# While this module is not used unless the user passes in ctypes +# members, it is eagerly imported from numpy/core/__init__.py. +import numpy as np + + +def _from_ctypes_array(t): + return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,))) + + +def _from_ctypes_structure(t): + for item in t._fields_: + if len(item) > 2: + raise TypeError( + "ctypes bitfields have no dtype equivalent") + + if hasattr(t, "_pack_"): + import ctypes + formats = [] + offsets = [] + names = [] + current_offset = 0 + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + # Each type has a default offset, this is platform dependent for some types. + effective_pack = min(t._pack_, ctypes.alignment(ftyp)) + current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack + offsets.append(current_offset) + current_offset += ctypes.sizeof(ftyp) + + return np.dtype(dict( + formats=formats, + offsets=offsets, + names=names, + itemsize=ctypes.sizeof(t))) + else: + fields = [] + for fname, ftyp in t._fields_: + fields.append((fname, dtype_from_ctypes_type(ftyp))) + + # by default, ctypes structs are aligned + return np.dtype(fields, align=True) + + +def _from_ctypes_scalar(t): + """ + Return the dtype type with endianness included if it's the case + """ + if getattr(t, '__ctype_be__', None) is t: + return np.dtype('>' + t._type_) + elif getattr(t, '__ctype_le__', None) is t: + return np.dtype('<' + t._type_) + else: + return np.dtype(t._type_) + + +def _from_ctypes_union(t): + import ctypes + formats = [] + offsets = [] + names = [] + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + offsets.append(0) # Union fields are offset to 0 + + return np.dtype(dict( + formats=formats, + offsets=offsets, + names=names, + itemsize=ctypes.sizeof(t))) + + +def dtype_from_ctypes_type(t): + """ + Construct a dtype object from a ctypes type + """ + import _ctypes + if issubclass(t, _ctypes.Array): + return _from_ctypes_array(t) + elif issubclass(t, _ctypes._Pointer): + raise TypeError("ctypes pointers have no dtype equivalent") + elif issubclass(t, _ctypes.Structure): + return _from_ctypes_structure(t) + elif issubclass(t, _ctypes.Union): + return _from_ctypes_union(t) + elif isinstance(getattr(t, '_type_', None), str): + return _from_ctypes_scalar(t) + else: + raise NotImplementedError( + "Unknown ctypes type {}".format(t.__name__)) diff --git a/MLPY/Lib/site-packages/numpy/core/_exceptions.py b/MLPY/Lib/site-packages/numpy/core/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..0cfdf7c90c17e68e8a6102999fddae8f6f8451ae --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_exceptions.py @@ -0,0 +1,197 @@ +""" +Various richly-typed exceptions, that also help us deal with string formatting +in python where it's easier. + +By putting the formatting in `__str__`, we also avoid paying the cost for +users who silence the exceptions. +""" +from numpy.core.overrides import set_module + +def _unpack_tuple(tup): + if len(tup) == 1: + return tup[0] + else: + return tup + + +def _display_as_base(cls): + """ + A decorator that makes an exception class look like its base. + + We use this to hide subclasses that are implementation details - the user + should catch the base type, which is what the traceback will show them. + + Classes decorated with this decorator are subject to removal without a + deprecation warning. + """ + assert issubclass(cls, Exception) + cls.__name__ = cls.__base__.__name__ + return cls + + +class UFuncTypeError(TypeError): + """ Base class for all ufunc exceptions """ + def __init__(self, ufunc): + self.ufunc = ufunc + + +@_display_as_base +class _UFuncBinaryResolutionError(UFuncTypeError): + """ Thrown when a binary resolution fails """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc) + self.dtypes = tuple(dtypes) + assert len(self.dtypes) == 2 + + def __str__(self): + return ( + "ufunc {!r} cannot use operands with types {!r} and {!r}" + ).format( + self.ufunc.__name__, *self.dtypes + ) + + +@_display_as_base +class _UFuncNoLoopError(UFuncTypeError): + """ Thrown when a ufunc loop cannot be found """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc) + self.dtypes = tuple(dtypes) + + def __str__(self): + return ( + "ufunc {!r} did not contain a loop with signature matching types " + "{!r} -> {!r}" + ).format( + self.ufunc.__name__, + _unpack_tuple(self.dtypes[:self.ufunc.nin]), + _unpack_tuple(self.dtypes[self.ufunc.nin:]) + ) + + +@_display_as_base +class _UFuncCastingError(UFuncTypeError): + def __init__(self, ufunc, casting, from_, to): + super().__init__(ufunc) + self.casting = casting + self.from_ = from_ + self.to = to + + +@_display_as_base +class _UFuncInputCastingError(_UFuncCastingError): + """ Thrown when a ufunc input cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.in_i = i + + def __str__(self): + # only show the number if more than one input exists + i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else "" + return ( + "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting " + "rule {!r}" + ).format( + self.ufunc.__name__, i_str, self.from_, self.to, self.casting + ) + + +@_display_as_base +class _UFuncOutputCastingError(_UFuncCastingError): + """ Thrown when a ufunc output cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.out_i = i + + def __str__(self): + # only show the number if more than one output exists + i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else "" + return ( + "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting " + "rule {!r}" + ).format( + self.ufunc.__name__, i_str, self.from_, self.to, self.casting + ) + + +# Exception used in shares_memory() +@set_module('numpy') +class TooHardError(RuntimeError): + pass + + +@set_module('numpy') +class AxisError(ValueError, IndexError): + """ Axis supplied was invalid. """ + def __init__(self, axis, ndim=None, msg_prefix=None): + # single-argument form just delegates to base class + if ndim is None and msg_prefix is None: + msg = axis + + # do the string formatting here, to save work in the C code + else: + msg = ("axis {} is out of bounds for array of dimension {}" + .format(axis, ndim)) + if msg_prefix is not None: + msg = "{}: {}".format(msg_prefix, msg) + + super().__init__(msg) + + +@_display_as_base +class _ArrayMemoryError(MemoryError): + """ Thrown when an array cannot be allocated""" + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + @property + def _total_size(self): + num_bytes = self.dtype.itemsize + for dim in self.shape: + num_bytes *= dim + return num_bytes + + @staticmethod + def _size_to_string(num_bytes): + """ Convert a number of bytes into a binary size string """ + + # https://en.wikipedia.org/wiki/Binary_prefix + LOG2_STEP = 10 + STEP = 1024 + units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] + + unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP + unit_val = 1 << (unit_i * LOG2_STEP) + n_units = num_bytes / unit_val + del unit_val + + # ensure we pick a unit that is correct after rounding + if round(n_units) == STEP: + unit_i += 1 + n_units /= STEP + + # deal with sizes so large that we don't have units for them + if unit_i >= len(units): + new_unit_i = len(units) - 1 + n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) + unit_i = new_unit_i + + unit_name = units[unit_i] + # format with a sensible number of digits + if unit_i == 0: + # no decimal point on bytes + return '{:.0f} {}'.format(n_units, unit_name) + elif round(n_units) < 1000: + # 3 significant figures, if none are dropped to the left of the . + return '{:#.3g} {}'.format(n_units, unit_name) + else: + # just give all the digits otherwise + return '{:#.0f} {}'.format(n_units, unit_name) + + def __str__(self): + size_str = self._size_to_string(self._total_size) + return ( + "Unable to allocate {} for an array with shape {} and data type {}" + .format(size_str, self.shape, self.dtype) + ) diff --git a/MLPY/Lib/site-packages/numpy/core/_internal.py b/MLPY/Lib/site-packages/numpy/core/_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..cc16b207cd263901da81749413e7157719e964fe --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_internal.py @@ -0,0 +1,910 @@ +""" +A place for internal code + +Some things are more easily handled Python. + +""" +import ast +import re +import sys +import platform +import warnings + +from .multiarray import dtype, array, ndarray +try: + import ctypes +except ImportError: + ctypes = None + +IS_PYPY = platform.python_implementation() == 'PyPy' + +if sys.byteorder == 'little': + _nbo = '<' +else: + _nbo = '>' + +def _makenames_list(adict, align): + allfields = [] + + for fname, obj in adict.items(): + n = len(obj) + if not isinstance(obj, tuple) or n not in (2, 3): + raise ValueError("entry not a 2- or 3- tuple") + if n > 2 and obj[2] == fname: + continue + num = int(obj[1]) + if num < 0: + raise ValueError("invalid offset.") + format = dtype(obj[0], align=align) + if n > 2: + title = obj[2] + else: + title = None + allfields.append((fname, format, num, title)) + # sort by offsets + allfields.sort(key=lambda x: x[2]) + names = [x[0] for x in allfields] + formats = [x[1] for x in allfields] + offsets = [x[2] for x in allfields] + titles = [x[3] for x in allfields] + + return names, formats, offsets, titles + +# Called in PyArray_DescrConverter function when +# a dictionary without "names" and "formats" +# fields is used as a data-type descriptor. +def _usefields(adict, align): + try: + names = adict[-1] + except KeyError: + names = None + if names is None: + names, formats, offsets, titles = _makenames_list(adict, align) + else: + formats = [] + offsets = [] + titles = [] + for name in names: + res = adict[name] + formats.append(res[0]) + offsets.append(res[1]) + if len(res) > 2: + titles.append(res[2]) + else: + titles.append(None) + + return dtype({"names": names, + "formats": formats, + "offsets": offsets, + "titles": titles}, align) + + +# construct an array_protocol descriptor list +# from the fields attribute of a descriptor +# This calls itself recursively but should eventually hit +# a descriptor that has no fields and then return +# a simple typestring + +def _array_descr(descriptor): + fields = descriptor.fields + if fields is None: + subdtype = descriptor.subdtype + if subdtype is None: + if descriptor.metadata is None: + return descriptor.str + else: + new = descriptor.metadata.copy() + if new: + return (descriptor.str, new) + else: + return descriptor.str + else: + return (_array_descr(subdtype[0]), subdtype[1]) + + names = descriptor.names + ordered_fields = [fields[x] + (x,) for x in names] + result = [] + offset = 0 + for field in ordered_fields: + if field[1] > offset: + num = field[1] - offset + result.append(('', f'|V{num}')) + offset += num + elif field[1] < offset: + raise ValueError( + "dtype.descr is not defined for types with overlapping or " + "out-of-order fields") + if len(field) > 3: + name = (field[2], field[3]) + else: + name = field[2] + if field[0].subdtype: + tup = (name, _array_descr(field[0].subdtype[0]), + field[0].subdtype[1]) + else: + tup = (name, _array_descr(field[0])) + offset += field[0].itemsize + result.append(tup) + + if descriptor.itemsize > offset: + num = descriptor.itemsize - offset + result.append(('', f'|V{num}')) + + return result + +# Build a new array from the information in a pickle. +# Note that the name numpy.core._internal._reconstruct is embedded in +# pickles of ndarrays made with NumPy before release 1.0 +# so don't remove the name here, or you'll +# break backward compatibility. +def _reconstruct(subtype, shape, dtype): + return ndarray.__new__(subtype, shape, dtype) + + +# format_re was originally from numarray by J. Todd Miller + +format_re = re.compile(r'(?P[<>|=]?)' + r'(?P *[(]?[ ,0-9]*[)]? *)' + r'(?P[<>|=]?)' + r'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') +sep_re = re.compile(r'\s*,\s*') +space_re = re.compile(r'\s+$') + +# astr is a string (perhaps comma separated) + +_convorder = {'=': _nbo} + +def _commastring(astr): + startindex = 0 + result = [] + while startindex < len(astr): + mo = format_re.match(astr, pos=startindex) + try: + (order1, repeats, order2, dtype) = mo.groups() + except (TypeError, AttributeError): + raise ValueError( + f'format number {len(result)+1} of "{astr}" is not recognized' + ) from None + startindex = mo.end() + # Separator or ending padding + if startindex < len(astr): + if space_re.match(astr, pos=startindex): + startindex = len(astr) + else: + mo = sep_re.match(astr, pos=startindex) + if not mo: + raise ValueError( + 'format number %d of "%s" is not recognized' % + (len(result)+1, astr)) + startindex = mo.end() + + if order2 == '': + order = order1 + elif order1 == '': + order = order2 + else: + order1 = _convorder.get(order1, order1) + order2 = _convorder.get(order2, order2) + if (order1 != order2): + raise ValueError( + 'inconsistent byte-order specification %s and %s' % + (order1, order2)) + order = order1 + + if order in ('|', '=', _nbo): + order = '' + dtype = order + dtype + if (repeats == ''): + newitem = dtype + else: + newitem = (dtype, ast.literal_eval(repeats)) + result.append(newitem) + + return result + +class dummy_ctype: + def __init__(self, cls): + self._cls = cls + def __mul__(self, other): + return self + def __call__(self, *other): + return self._cls(other) + def __eq__(self, other): + return self._cls == other._cls + def __ne__(self, other): + return self._cls != other._cls + +def _getintp_ctype(): + val = _getintp_ctype.cache + if val is not None: + return val + if ctypes is None: + import numpy as np + val = dummy_ctype(np.intp) + else: + char = dtype('p').char + if char == 'i': + val = ctypes.c_int + elif char == 'l': + val = ctypes.c_long + elif char == 'q': + val = ctypes.c_longlong + else: + val = ctypes.c_long + _getintp_ctype.cache = val + return val +_getintp_ctype.cache = None + +# Used for .ctypes attribute of ndarray + +class _missing_ctypes: + def cast(self, num, obj): + return num.value + + class c_void_p: + def __init__(self, ptr): + self.value = ptr + + +class _ctypes: + def __init__(self, array, ptr=None): + self._arr = array + + if ctypes: + self._ctypes = ctypes + self._data = self._ctypes.c_void_p(ptr) + else: + # fake a pointer-like object that holds onto the reference + self._ctypes = _missing_ctypes() + self._data = self._ctypes.c_void_p(ptr) + self._data._objects = array + + if self._arr.ndim == 0: + self._zerod = True + else: + self._zerod = False + + def data_as(self, obj): + """ + Return the data pointer cast to a particular c-types object. + For example, calling ``self._as_parameter_`` is equivalent to + ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a + pointer to a ctypes array of floating-point data: + ``self.data_as(ctypes.POINTER(ctypes.c_double))``. + + The returned pointer will keep a reference to the array. + """ + # _ctypes.cast function causes a circular reference of self._data in + # self._data._objects. Attributes of self._data cannot be released + # until gc.collect is called. Make a copy of the pointer first then let + # it hold the array reference. This is a workaround to circumvent the + # CPython bug https://bugs.python.org/issue12836 + ptr = self._ctypes.cast(self._data, obj) + ptr._arr = self._arr + return ptr + + def shape_as(self, obj): + """ + Return the shape tuple as an array of some other c-types + type. For example: ``self.shape_as(ctypes.c_short)``. + """ + if self._zerod: + return None + return (obj*self._arr.ndim)(*self._arr.shape) + + def strides_as(self, obj): + """ + Return the strides tuple as an array of some other + c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. + """ + if self._zerod: + return None + return (obj*self._arr.ndim)(*self._arr.strides) + + @property + def data(self): + """ + A pointer to the memory area of the array as a Python integer. + This memory area may contain data that is not aligned, or not in correct + byte-order. The memory area may not even be writeable. The array + flags and data-type of this array should be respected when passing this + attribute to arbitrary C-code to avoid trouble that can include Python + crashing. User Beware! The value of this attribute is exactly the same + as ``self._array_interface_['data'][0]``. + + Note that unlike ``data_as``, a reference will not be kept to the array: + code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a + pointer to a deallocated array, and should be spelt + ``(a + b).ctypes.data_as(ctypes.c_void_p)`` + """ + return self._data.value + + @property + def shape(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the C-integer corresponding to ``dtype('p')`` on this + platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or + `ctypes.c_longlong` depending on the platform. + The c_intp type is defined accordingly in `numpy.ctypeslib`. + The ctypes array contains the shape of the underlying array. + """ + return self.shape_as(_getintp_ctype()) + + @property + def strides(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the same as for the shape attribute. This ctypes array + contains the strides information from the underlying array. This strides + information is important for showing how many bytes must be jumped to + get to the next element in the array. + """ + return self.strides_as(_getintp_ctype()) + + @property + def _as_parameter_(self): + """ + Overrides the ctypes semi-magic method + + Enables `c_func(some_array.ctypes)` + """ + return self.data_as(ctypes.c_void_p) + + # Numpy 1.21.0, 2021-05-18 + + def get_data(self): + """Deprecated getter for the `_ctypes.data` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_data" is deprecated. Use "data" instead', + DeprecationWarning, stacklevel=2) + return self.data + + def get_shape(self): + """Deprecated getter for the `_ctypes.shape` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_shape" is deprecated. Use "shape" instead', + DeprecationWarning, stacklevel=2) + return self.shape + + def get_strides(self): + """Deprecated getter for the `_ctypes.strides` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_strides" is deprecated. Use "strides" instead', + DeprecationWarning, stacklevel=2) + return self.strides + + def get_as_parameter(self): + """Deprecated getter for the `_ctypes._as_parameter_` property. + + .. deprecated:: 1.21 + """ + warnings.warn( + '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', + DeprecationWarning, stacklevel=2, + ) + return self._as_parameter_ + + +def _newnames(datatype, order): + """ + Given a datatype and an order object, return a new names tuple, with the + order indicated + """ + oldnames = datatype.names + nameslist = list(oldnames) + if isinstance(order, str): + order = [order] + seen = set() + if isinstance(order, (list, tuple)): + for name in order: + try: + nameslist.remove(name) + except ValueError: + if name in seen: + raise ValueError(f"duplicate field name: {name}") from None + else: + raise ValueError(f"unknown field name: {name}") from None + seen.add(name) + return tuple(list(order) + nameslist) + raise ValueError(f"unsupported order value: {order}") + +def _copy_fields(ary): + """Return copy of structured array with padding between fields removed. + + Parameters + ---------- + ary : ndarray + Structured array from which to remove padding bytes + + Returns + ------- + ary_copy : ndarray + Copy of ary with padding bytes removed + """ + dt = ary.dtype + copy_dtype = {'names': dt.names, + 'formats': [dt.fields[name][0] for name in dt.names]} + return array(ary, dtype=copy_dtype, copy=True) + +def _getfield_is_safe(oldtype, newtype, offset): + """ Checks safety of getfield for object arrays. + + As in _view_is_safe, we need to check that memory containing objects is not + reinterpreted as a non-object datatype and vice versa. + + Parameters + ---------- + oldtype : data-type + Data type of the original ndarray. + newtype : data-type + Data type of the field being accessed by ndarray.getfield + offset : int + Offset of the field being accessed by ndarray.getfield + + Raises + ------ + TypeError + If the field access is invalid + + """ + if newtype.hasobject or oldtype.hasobject: + if offset == 0 and newtype == oldtype: + return + if oldtype.names is not None: + for name in oldtype.names: + if (oldtype.fields[name][1] == offset and + oldtype.fields[name][0] == newtype): + return + raise TypeError("Cannot get/set field of an object array") + return + +def _view_is_safe(oldtype, newtype): + """ Checks safety of a view involving object arrays, for example when + doing:: + + np.zeros(10, dtype=oldtype).view(newtype) + + Parameters + ---------- + oldtype : data-type + Data type of original ndarray + newtype : data-type + Data type of the view + + Raises + ------ + TypeError + If the new type is incompatible with the old type. + + """ + + # if the types are equivalent, there is no problem. + # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) + if oldtype == newtype: + return + + if newtype.hasobject or oldtype.hasobject: + raise TypeError("Cannot change data-type for object array.") + return + +# Given a string containing a PEP 3118 format specifier, +# construct a NumPy dtype + +_pep3118_native_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'h', + 'H': 'H', + 'i': 'i', + 'I': 'I', + 'l': 'l', + 'L': 'L', + 'q': 'q', + 'Q': 'Q', + 'e': 'e', + 'f': 'f', + 'd': 'd', + 'g': 'g', + 'Zf': 'F', + 'Zd': 'D', + 'Zg': 'G', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) + +_pep3118_standard_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'i2', + 'H': 'u2', + 'i': 'i4', + 'I': 'u4', + 'l': 'i4', + 'L': 'u4', + 'q': 'i8', + 'Q': 'u8', + 'e': 'f2', + 'f': 'f', + 'd': 'd', + 'Zf': 'F', + 'Zd': 'D', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) + +_pep3118_unsupported_map = { + 'u': 'UCS-2 strings', + '&': 'pointers', + 't': 'bitfields', + 'X': 'function pointers', +} + +class _Stream: + def __init__(self, s): + self.s = s + self.byteorder = '@' + + def advance(self, n): + res = self.s[:n] + self.s = self.s[n:] + return res + + def consume(self, c): + if self.s[:len(c)] == c: + self.advance(len(c)) + return True + return False + + def consume_until(self, c): + if callable(c): + i = 0 + while i < len(self.s) and not c(self.s[i]): + i = i + 1 + return self.advance(i) + else: + i = self.s.index(c) + res = self.advance(i) + self.advance(len(c)) + return res + + @property + def next(self): + return self.s[0] + + def __bool__(self): + return bool(self.s) + + +def _dtype_from_pep3118(spec): + stream = _Stream(spec) + dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) + return dtype + +def __dtype_from_pep3118(stream, is_subdtype): + field_spec = dict( + names=[], + formats=[], + offsets=[], + itemsize=0 + ) + offset = 0 + common_alignment = 1 + is_padding = False + + # Parse spec + while stream: + value = None + + # End of structure, bail out to upper level + if stream.consume('}'): + break + + # Sub-arrays (1) + shape = None + if stream.consume('('): + shape = stream.consume_until(')') + shape = tuple(map(int, shape.split(','))) + + # Byte order + if stream.next in ('@', '=', '<', '>', '^', '!'): + byteorder = stream.advance(1) + if byteorder == '!': + byteorder = '>' + stream.byteorder = byteorder + + # Byte order characters also control native vs. standard type sizes + if stream.byteorder in ('@', '^'): + type_map = _pep3118_native_map + type_map_chars = _pep3118_native_typechars + else: + type_map = _pep3118_standard_map + type_map_chars = _pep3118_standard_typechars + + # Item sizes + itemsize_str = stream.consume_until(lambda c: not c.isdigit()) + if itemsize_str: + itemsize = int(itemsize_str) + else: + itemsize = 1 + + # Data types + is_padding = False + + if stream.consume('T{'): + value, align = __dtype_from_pep3118( + stream, is_subdtype=True) + elif stream.next in type_map_chars: + if stream.next == 'Z': + typechar = stream.advance(2) + else: + typechar = stream.advance(1) + + is_padding = (typechar == 'x') + dtypechar = type_map[typechar] + if dtypechar in 'USV': + dtypechar += '%d' % itemsize + itemsize = 1 + numpy_byteorder = {'@': '=', '^': '='}.get( + stream.byteorder, stream.byteorder) + value = dtype(numpy_byteorder + dtypechar) + align = value.alignment + elif stream.next in _pep3118_unsupported_map: + desc = _pep3118_unsupported_map[stream.next] + raise NotImplementedError( + "Unrepresentable PEP 3118 data type {!r} ({})" + .format(stream.next, desc)) + else: + raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s) + + # + # Native alignment may require padding + # + # Here we assume that the presence of a '@' character implicitly implies + # that the start of the array is *already* aligned. + # + extra_offset = 0 + if stream.byteorder == '@': + start_padding = (-offset) % align + intra_padding = (-value.itemsize) % align + + offset += start_padding + + if intra_padding != 0: + if itemsize > 1 or (shape is not None and _prod(shape) > 1): + # Inject internal padding to the end of the sub-item + value = _add_trailing_padding(value, intra_padding) + else: + # We can postpone the injection of internal padding, + # as the item appears at most once + extra_offset += intra_padding + + # Update common alignment + common_alignment = _lcm(align, common_alignment) + + # Convert itemsize to sub-array + if itemsize != 1: + value = dtype((value, (itemsize,))) + + # Sub-arrays (2) + if shape is not None: + value = dtype((value, shape)) + + # Field name + if stream.consume(':'): + name = stream.consume_until(':') + else: + name = None + + if not (is_padding and name is None): + if name is not None and name in field_spec['names']: + raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format") + field_spec['names'].append(name) + field_spec['formats'].append(value) + field_spec['offsets'].append(offset) + + offset += value.itemsize + offset += extra_offset + + field_spec['itemsize'] = offset + + # extra final padding for aligned types + if stream.byteorder == '@': + field_spec['itemsize'] += (-offset) % common_alignment + + # Check if this was a simple 1-item type, and unwrap it + if (field_spec['names'] == [None] + and field_spec['offsets'][0] == 0 + and field_spec['itemsize'] == field_spec['formats'][0].itemsize + and not is_subdtype): + ret = field_spec['formats'][0] + else: + _fix_names(field_spec) + ret = dtype(field_spec) + + # Finished + return ret, common_alignment + +def _fix_names(field_spec): + """ Replace names which are None with the next unused f%d name """ + names = field_spec['names'] + for i, name in enumerate(names): + if name is not None: + continue + + j = 0 + while True: + name = f'f{j}' + if name not in names: + break + j = j + 1 + names[i] = name + +def _add_trailing_padding(value, padding): + """Inject the specified number of padding bytes at the end of a dtype""" + if value.fields is None: + field_spec = dict( + names=['f0'], + formats=[value], + offsets=[0], + itemsize=value.itemsize + ) + else: + fields = value.fields + names = value.names + field_spec = dict( + names=names, + formats=[fields[name][0] for name in names], + offsets=[fields[name][1] for name in names], + itemsize=value.itemsize + ) + + field_spec['itemsize'] += padding + return dtype(field_spec) + +def _prod(a): + p = 1 + for x in a: + p *= x + return p + +def _gcd(a, b): + """Calculate the greatest common divisor of a and b""" + while b: + a, b = b, a % b + return a + +def _lcm(a, b): + return a // _gcd(a, b) * b + +def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): + """ Format the error message for when __array_ufunc__ gives up. """ + args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + + ['{}={!r}'.format(k, v) + for k, v in kwargs.items()]) + args = inputs + kwargs.get('out', ()) + types_string = ', '.join(repr(type(arg).__name__) for arg in args) + return ('operand type(s) all returned NotImplemented from ' + '__array_ufunc__({!r}, {!r}, {}): {}' + .format(ufunc, method, args_string, types_string)) + + +def array_function_errmsg_formatter(public_api, types): + """ Format the error message for when __array_ufunc__ gives up. """ + func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) + return ("no implementation found for '{}' on types that implement " + '__array_function__: {}'.format(func_name, list(types))) + + +def _ufunc_doc_signature_formatter(ufunc): + """ + Builds a signature string which resembles PEP 457 + + This is used to construct the first line of the docstring + """ + + # input arguments are simple + if ufunc.nin == 1: + in_args = 'x' + else: + in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin)) + + # output arguments are both keyword or positional + if ufunc.nout == 0: + out_args = ', /, out=()' + elif ufunc.nout == 1: + out_args = ', /, out=None' + else: + out_args = '[, {positional}], / [, out={default}]'.format( + positional=', '.join( + 'out{}'.format(i+1) for i in range(ufunc.nout)), + default=repr((None,)*ufunc.nout) + ) + + # keyword only args depend on whether this is a gufunc + kwargs = ( + ", casting='same_kind'" + ", order='K'" + ", dtype=None" + ", subok=True" + ) + + # NOTE: gufuncs may or may not support the `axis` parameter + if ufunc.signature is None: + kwargs = f", where=True{kwargs}[, signature, extobj]" + else: + kwargs += "[, signature, extobj, axes, axis]" + + # join all the parts together + return '{name}({in_args}{out_args}, *{kwargs})'.format( + name=ufunc.__name__, + in_args=in_args, + out_args=out_args, + kwargs=kwargs + ) + + +def npy_ctypes_check(cls): + # determine if a class comes from ctypes, in order to work around + # a bug in the buffer protocol for those objects, bpo-10746 + try: + # ctypes class are new-style, so have an __mro__. This probably fails + # for ctypes classes with multiple inheritance. + if IS_PYPY: + # (..., _ctypes.basics._CData, Bufferable, object) + ctype_base = cls.__mro__[-3] + else: + # # (..., _ctypes._CData, object) + ctype_base = cls.__mro__[-2] + # right now, they're part of the _ctypes module + return '_ctypes' in ctype_base.__module__ + except Exception: + return False + + +class recursive: + ''' + A decorator class for recursive nested functions. + Naive recursive nested functions hold a reference to themselves: + + def outer(*args): + def stringify_leaky(arg0, *arg1): + if len(arg1) > 0: + return stringify_leaky(*arg1) # <- HERE + return str(arg0) + stringify_leaky(*args) + + This design pattern creates a reference cycle that is difficult for a + garbage collector to resolve. The decorator class prevents the + cycle by passing the nested function in as an argument `self`: + + def outer(*args): + @recursive + def stringify(self, arg0, *arg1): + if len(arg1) > 0: + return self(*arg1) + return str(arg0) + stringify(*args) + + ''' + def __init__(self, func): + self.func = func + def __call__(self, *args, **kwargs): + return self.func(self, *args, **kwargs) + diff --git a/MLPY/Lib/site-packages/numpy/core/_internal.pyi b/MLPY/Lib/site-packages/numpy/core/_internal.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8998b003a1e7ff237affeac65106b4fb2f0ba5c6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_internal.pyi @@ -0,0 +1,35 @@ +from typing import Any, TypeVar, Type, overload, Optional, Generic +import ctypes as ct + +from numpy import ndarray + +_CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast` +_CT = TypeVar("_CT", bound=ct._CData) +_PT = TypeVar("_PT", bound=Optional[int]) + +# TODO: Let the likes of `shape_as` and `strides_as` return `None` +# for 0D arrays once we've got shape-support + +class _ctypes(Generic[_PT]): + @overload + def __new__(cls, array: ndarray[Any, Any], ptr: None = ...) -> _ctypes[None]: ... + @overload + def __new__(cls, array: ndarray[Any, Any], ptr: _PT) -> _ctypes[_PT]: ... + + # NOTE: In practice `shape` and `strides` return one of the concrete + # platform dependant array-types (`c_int`, `c_long` or `c_longlong`) + # corresponding to C's `int_ptr_t`, as determined by `_getintp_ctype` + # TODO: Hook this in to the mypy plugin so that a more appropiate + # `ctypes._SimpleCData[int]` sub-type can be returned + @property + def data(self) -> _PT: ... + @property + def shape(self) -> ct.Array[ct.c_int64]: ... + @property + def strides(self) -> ct.Array[ct.c_int64]: ... + @property + def _as_parameter_(self) -> ct.c_void_p: ... + + def data_as(self, obj: Type[_CastT]) -> _CastT: ... + def shape_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ... + def strides_as(self, obj: Type[_CT]) -> ct.Array[_CT]: ... diff --git a/MLPY/Lib/site-packages/numpy/core/_methods.py b/MLPY/Lib/site-packages/numpy/core/_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..efd1fb850a6d450117b783137598664aa641003c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_methods.py @@ -0,0 +1,290 @@ +""" +Array methods which are called by both the C-code for the method +and the Python code for the NumPy-namespace function + +""" +import warnings +from contextlib import nullcontext + +from numpy.core import multiarray as mu +from numpy.core import umath as um +from numpy.core.multiarray import asanyarray +from numpy.core import numerictypes as nt +from numpy.core import _exceptions +from numpy._globals import _NoValue +from numpy.compat import pickle, os_fspath + +# save those O(100) nanoseconds! +umr_maximum = um.maximum.reduce +umr_minimum = um.minimum.reduce +umr_sum = um.add.reduce +umr_prod = um.multiply.reduce +umr_any = um.logical_or.reduce +umr_all = um.logical_and.reduce + +# Complex types to -> (2,)float view for fast-path computation in _var() +_complex_to_float = { + nt.dtype(nt.csingle) : nt.dtype(nt.single), + nt.dtype(nt.cdouble) : nt.dtype(nt.double), +} +# Special case for windows: ensure double takes precedence +if nt.dtype(nt.longdouble) != nt.dtype(nt.double): + _complex_to_float.update({ + nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble), + }) + +# avoid keyword arguments to speed up parsing, saves about 15%-20% for very +# small reductions +def _amax(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_maximum(a, axis, None, out, keepdims, initial, where) + +def _amin(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_minimum(a, axis, None, out, keepdims, initial, where) + +def _sum(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_sum(a, axis, dtype, out, keepdims, initial, where) + +def _prod(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_prod(a, axis, dtype, out, keepdims, initial, where) + +def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_any(a, axis, dtype, out, keepdims) + return umr_any(a, axis, dtype, out, keepdims, where=where) + +def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_all(a, axis, dtype, out, keepdims) + return umr_all(a, axis, dtype, out, keepdims, where=where) + +def _count_reduce_items(arr, axis, keepdims=False, where=True): + # fast-path for the default case + if where is True: + # no boolean mask given, calculate items according to axis + if axis is None: + axis = tuple(range(arr.ndim)) + elif not isinstance(axis, tuple): + axis = (axis,) + items = nt.intp(1) + for ax in axis: + items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)] + else: + # TODO: Optimize case when `where` is broadcast along a non-reduction + # axis and full sum is more excessive than needed. + + # guarded to protect circular imports + from numpy.lib.stride_tricks import broadcast_to + # count True values in (potentially broadcasted) boolean mask + items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None, + keepdims) + return items + +# Numpy 1.17.0, 2019-02-24 +# Various clip behavior deprecations, marked with _clip_dep as a prefix. + +def _clip_dep_is_scalar_nan(a): + # guarded to protect circular imports + from numpy.core.fromnumeric import ndim + if ndim(a) != 0: + return False + try: + return um.isnan(a) + except TypeError: + return False + +def _clip_dep_is_byte_swapped(a): + if isinstance(a, mu.ndarray): + return not a.dtype.isnative + return False + +def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs): + # normal path + if casting is not None: + return ufunc(*args, out=out, casting=casting, **kwargs) + + # try to deal with broken casting rules + try: + return ufunc(*args, out=out, **kwargs) + except _exceptions._UFuncOutputCastingError as e: + # Numpy 1.17.0, 2019-02-24 + warnings.warn( + "Converting the output of clip from {!r} to {!r} is deprecated. " + "Pass `casting=\"unsafe\"` explicitly to silence this warning, or " + "correct the type of the variables.".format(e.from_, e.to), + DeprecationWarning, + stacklevel=2 + ) + return ufunc(*args, out=out, casting="unsafe", **kwargs) + +def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs): + if min is None and max is None: + raise ValueError("One of max or min must be given") + + # Numpy 1.17.0, 2019-02-24 + # This deprecation probably incurs a substantial slowdown for small arrays, + # it will be good to get rid of it. + if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out): + using_deprecated_nan = False + if _clip_dep_is_scalar_nan(min): + min = -float('inf') + using_deprecated_nan = True + if _clip_dep_is_scalar_nan(max): + max = float('inf') + using_deprecated_nan = True + if using_deprecated_nan: + warnings.warn( + "Passing `np.nan` to mean no clipping in np.clip has always " + "been unreliable, and is now deprecated. " + "In future, this will always return nan, like it already does " + "when min or max are arrays that contain nan. " + "To skip a bound, pass either None or an np.inf of an " + "appropriate sign.", + DeprecationWarning, + stacklevel=2 + ) + + if min is None: + return _clip_dep_invoke_with_casting( + um.minimum, a, max, out=out, casting=casting, **kwargs) + elif max is None: + return _clip_dep_invoke_with_casting( + um.maximum, a, min, out=out, casting=casting, **kwargs) + else: + return _clip_dep_invoke_with_casting( + um.clip, a, min, max, out=out, casting=casting, **kwargs) + +def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + arr = asanyarray(a) + + is_float16_result = False + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): + warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None: + if issubclass(arr.dtype.type, (nt.integer, nt.bool_)): + dtype = mu.dtype('f8') + elif issubclass(arr.dtype.type, nt.float16): + dtype = mu.dtype('f4') + is_float16_result = True + + ret = umr_sum(arr, axis, dtype, out, keepdims, where=where) + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + if is_float16_result and out is None: + ret = arr.dtype.type(ret) + elif hasattr(ret, 'dtype'): + if is_float16_result: + ret = arr.dtype.type(ret / rcount) + else: + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True): + arr = asanyarray(a) + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + # Make this warning show up on top. + if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None): + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, + stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): + dtype = mu.dtype('f8') + + # Compute the mean. + # Note that if dtype is not of inexact type then arraymean will + # not be either. + arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where) + # The shape of rcount has to match arrmean to not change the shape of out + # in broadcasting. Otherwise, it cannot be stored back to arrmean. + if rcount.ndim == 0: + # fast-path for default case when where is True + div = rcount + else: + # matching rcount to arrmean when where is specified as array + div = rcount.reshape(arrmean.shape) + if isinstance(arrmean, mu.ndarray): + arrmean = um.true_divide(arrmean, div, out=arrmean, casting='unsafe', + subok=False) + else: + arrmean = arrmean.dtype.type(arrmean / rcount) + + # Compute sum of squared deviations from mean + # Note that x may not be inexact and that we need it to be an array, + # not a scalar. + x = asanyarray(arr - arrmean) + + if issubclass(arr.dtype.type, (nt.floating, nt.integer)): + x = um.multiply(x, x, out=x) + # Fast-paths for built-in complex types + elif x.dtype in _complex_to_float: + xv = x.view(dtype=(_complex_to_float[x.dtype], (2,))) + um.multiply(xv, xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real).real + # Most general case; includes handling object arrays containing imaginary + # numbers and complex types with non-native byteorder + else: + x = um.multiply(x, um.conjugate(x), out=x).real + + ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where) + + # Compute degrees of freedom and make sure it is not negative. + rcount = um.maximum(rcount - ddof, 0) + + # divide by degrees of freedom + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True): + ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where) + + if isinstance(ret, mu.ndarray): + ret = um.sqrt(ret, out=ret) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(um.sqrt(ret)) + else: + ret = um.sqrt(ret) + + return ret + +def _ptp(a, axis=None, out=None, keepdims=False): + return um.subtract( + umr_maximum(a, axis, None, out, keepdims), + umr_minimum(a, axis, None, None, keepdims), + out + ) + +def _dump(self, file, protocol=2): + if hasattr(file, 'write'): + ctx = nullcontext(file) + else: + ctx = open(os_fspath(file), "wb") + with ctx as f: + pickle.dump(self, f, protocol=protocol) + +def _dumps(self, protocol=2): + return pickle.dumps(self, protocol=protocol) diff --git a/MLPY/Lib/site-packages/numpy/core/_multiarray_tests.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/core/_multiarray_tests.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..4ad37f94f4abb9190f2c42f6eb9047890dc55872 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/_multiarray_tests.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/core/_multiarray_umath.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/core/_multiarray_umath.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..caa90a2f5161b0bc85a5fcb6dc0d3ea17f0c7c69 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_multiarray_umath.cp39-win_amd64.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd7d9fa3ae229d3feba0a6928864a889bd18e79178bd175831c8b1c15896b909 +size 2942976 diff --git a/MLPY/Lib/site-packages/numpy/core/_operand_flag_tests.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/core/_operand_flag_tests.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e7d06717f912ce5f7fe947755adaa258344821a5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/_operand_flag_tests.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/core/_rational_tests.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/core/_rational_tests.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..94222b378b2c6a051ac67c1979233bc57a1bf2da Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/_rational_tests.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/core/_simd.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/core/_simd.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f9526324a20b5c680876f0e0fcafa2c407c70077 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_simd.cp39-win_amd64.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6b5e95abbe50bc275d61040c94f71316edd6ba542776108035e133ed3cc6b03 +size 1467904 diff --git a/MLPY/Lib/site-packages/numpy/core/_string_helpers.py b/MLPY/Lib/site-packages/numpy/core/_string_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..459e17cb8828512919223f75bb219340198f3bdf --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_string_helpers.py @@ -0,0 +1,100 @@ +""" +String-handling utilities to avoid locale-dependence. + +Used primarily to generate type name aliases. +""" +# "import string" is costly to import! +# Construct the translation tables directly +# "A" = chr(65), "a" = chr(97) +_all_chars = [chr(_m) for _m in range(256)] +_ascii_upper = _all_chars[65:65+26] +_ascii_lower = _all_chars[97:97+26] +LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:]) +UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:]) + + +def english_lower(s): + """ Apply English case rules to convert ASCII strings to all lower case. + + This is an internal utility function to replace calls to str.lower() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + lowered : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_lower + >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' + >>> english_lower('') + '' + """ + lowered = s.translate(LOWER_TABLE) + return lowered + + +def english_upper(s): + """ Apply English case rules to convert ASCII strings to all upper case. + + This is an internal utility function to replace calls to str.upper() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + uppered : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_upper + >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' + >>> english_upper('') + '' + """ + uppered = s.translate(UPPER_TABLE) + return uppered + + +def english_capitalize(s): + """ Apply English case rules to convert the first character of an ASCII + string to upper case. + + This is an internal utility function to replace calls to str.capitalize() + such that we can avoid changing behavior with changing locales. + + Parameters + ---------- + s : str + + Returns + ------- + capitalized : str + + Examples + -------- + >>> from numpy.core.numerictypes import english_capitalize + >>> english_capitalize('int8') + 'Int8' + >>> english_capitalize('Int8') + 'Int8' + >>> english_capitalize('') + '' + """ + if s: + return english_upper(s[0]) + s[1:] + else: + return s diff --git a/MLPY/Lib/site-packages/numpy/core/_struct_ufunc_tests.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/core/_struct_ufunc_tests.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c6be6b742878ed0fa06c5760c8b80105f15fe21c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/_struct_ufunc_tests.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/core/_type_aliases.py b/MLPY/Lib/site-packages/numpy/core/_type_aliases.py new file mode 100644 index 0000000000000000000000000000000000000000..bc8fd570dd12ef51c503ea8e02d78a9cb8179901 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_type_aliases.py @@ -0,0 +1,244 @@ +""" +Due to compatibility, numpy has a very large number of different naming +conventions for the scalar types (those subclassing from `numpy.generic`). +This file produces a convoluted set of dictionaries mapping names to types, +and sometimes other mappings too. + +.. data:: allTypes + A dictionary of names to types that will be exposed as attributes through + ``np.core.numerictypes.*`` + +.. data:: sctypeDict + Similar to `allTypes`, but maps a broader set of aliases to their types. + +.. data:: sctypes + A dictionary keyed by a "type group" string, providing a list of types + under that group. + +""" + +from numpy.compat import unicode +from numpy.core._string_helpers import english_lower +from numpy.core.multiarray import typeinfo, dtype +from numpy.core._dtype import _kind_name + + +sctypeDict = {} # Contains all leaf-node scalar types with aliases +allTypes = {} # Collect the types we will add to the module + + +# separate the actual type info from the abstract base classes +_abstract_types = {} +_concrete_typeinfo = {} +for k, v in typeinfo.items(): + # make all the keys lowercase too + k = english_lower(k) + if isinstance(v, type): + _abstract_types[k] = v + else: + _concrete_typeinfo[k] = v + +_concrete_types = {v.type for k, v in _concrete_typeinfo.items()} + + +def _bits_of(obj): + try: + info = next(v for v in _concrete_typeinfo.values() if v.type is obj) + except StopIteration: + if obj in _abstract_types.values(): + msg = "Cannot count the bits of an abstract type" + raise ValueError(msg) from None + + # some third-party type - make a best-guess + return dtype(obj).itemsize * 8 + else: + return info.bits + + +def bitname(obj): + """Return a bit-width name for a given type object""" + bits = _bits_of(obj) + dt = dtype(obj) + char = dt.kind + base = _kind_name(dt) + + if base == 'object': + bits = 0 + + if bits != 0: + char = "%s%d" % (char, bits // 8) + + return base, bits, char + + +def _add_types(): + for name, info in _concrete_typeinfo.items(): + # define C-name and insert typenum and typechar references also + allTypes[name] = info.type + sctypeDict[name] = info.type + sctypeDict[info.char] = info.type + sctypeDict[info.num] = info.type + + for name, cls in _abstract_types.items(): + allTypes[name] = cls +_add_types() + +# This is the priority order used to assign the bit-sized NPY_INTxx names, which +# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be +# consistent. +# If two C types have the same size, then the earliest one in this list is used +# as the sized name. +_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte'] +_uint_ctypes = list('u' + t for t in _int_ctypes) + +def _add_aliases(): + for name, info in _concrete_typeinfo.items(): + # these are handled by _add_integer_aliases + if name in _int_ctypes or name in _uint_ctypes: + continue + + # insert bit-width version for this class (if relevant) + base, bit, char = bitname(info.type) + + myname = "%s%d" % (base, bit) + + # ensure that (c)longdouble does not overwrite the aliases assigned to + # (c)double + if name in ('longdouble', 'clongdouble') and myname in allTypes: + continue + + allTypes[myname] = info.type + + # add mapping for both the bit name and the numarray name + sctypeDict[myname] = info.type + + # add forward, reverse, and string mapping to numarray + sctypeDict[char] = info.type + + # Add deprecated numeric-style type aliases manually, at some point + # we may want to deprecate the lower case "bytes0" version as well. + for name in ["Bytes0", "Datetime64", "Str0", "Uint32", "Uint64"]: + if english_lower(name) not in allTypes: + # Only one of Uint32 or Uint64, aliases of `np.uintp`, was (and is) defined, note that this + # is not UInt32/UInt64 (capital i), which is removed. + continue + allTypes[name] = allTypes[english_lower(name)] + sctypeDict[name] = sctypeDict[english_lower(name)] + +_add_aliases() + +def _add_integer_aliases(): + seen_bits = set() + for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes): + i_info = _concrete_typeinfo[i_ctype] + u_info = _concrete_typeinfo[u_ctype] + bits = i_info.bits # same for both + + for info, charname, intname in [ + (i_info,'i%d' % (bits//8,), 'int%d' % bits), + (u_info,'u%d' % (bits//8,), 'uint%d' % bits)]: + if bits not in seen_bits: + # sometimes two different types have the same number of bits + # if so, the one iterated over first takes precedence + allTypes[intname] = info.type + sctypeDict[intname] = info.type + sctypeDict[charname] = info.type + + seen_bits.add(bits) + +_add_integer_aliases() + +# We use these later +void = allTypes['void'] + +# +# Rework the Python names (so that float and complex and int are consistent +# with Python usage) +# +def _set_up_aliases(): + type_pairs = [('complex_', 'cdouble'), + ('int0', 'intp'), + ('uint0', 'uintp'), + ('single', 'float'), + ('csingle', 'cfloat'), + ('singlecomplex', 'cfloat'), + ('float_', 'double'), + ('intc', 'int'), + ('uintc', 'uint'), + ('int_', 'long'), + ('uint', 'ulong'), + ('cfloat', 'cdouble'), + ('longfloat', 'longdouble'), + ('clongfloat', 'clongdouble'), + ('longcomplex', 'clongdouble'), + ('bool_', 'bool'), + ('bytes_', 'string'), + ('string_', 'string'), + ('str_', 'unicode'), + ('unicode_', 'unicode'), + ('object_', 'object')] + for alias, t in type_pairs: + allTypes[alias] = allTypes[t] + sctypeDict[alias] = sctypeDict[t] + # Remove aliases overriding python types and modules + to_remove = ['ulong', 'object', 'int', 'float', + 'complex', 'bool', 'string', 'datetime', 'timedelta', + 'bytes', 'str'] + + for t in to_remove: + try: + del allTypes[t] + del sctypeDict[t] + except KeyError: + pass +_set_up_aliases() + + +sctypes = {'int': [], + 'uint':[], + 'float':[], + 'complex':[], + 'others':[bool, object, bytes, unicode, void]} + +def _add_array_type(typename, bits): + try: + t = allTypes['%s%d' % (typename, bits)] + except KeyError: + pass + else: + sctypes[typename].append(t) + +def _set_array_types(): + ibytes = [1, 2, 4, 8, 16, 32, 64] + fbytes = [2, 4, 8, 10, 12, 16, 32, 64] + for bytes in ibytes: + bits = 8*bytes + _add_array_type('int', bits) + _add_array_type('uint', bits) + for bytes in fbytes: + bits = 8*bytes + _add_array_type('float', bits) + _add_array_type('complex', 2*bits) + _gi = dtype('p') + if _gi.type not in sctypes['int']: + indx = 0 + sz = _gi.itemsize + _lst = sctypes['int'] + while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): + indx += 1 + sctypes['int'].insert(indx, _gi.type) + sctypes['uint'].insert(indx, dtype('P').type) +_set_array_types() + + +# Add additional strings to the sctypeDict +_toadd = ['int', 'float', 'complex', 'bool', 'object', + 'str', 'bytes', ('a', 'bytes_')] + +for name in _toadd: + if isinstance(name, tuple): + sctypeDict[name[0]] = allTypes[name[1]] + else: + sctypeDict[name] = allTypes['%s_' % name] + +del _toadd, name diff --git a/MLPY/Lib/site-packages/numpy/core/_type_aliases.pyi b/MLPY/Lib/site-packages/numpy/core/_type_aliases.pyi new file mode 100644 index 0000000000000000000000000000000000000000..254bca3067205c2741f406c6b4b7aca46ee933ec --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_type_aliases.pyi @@ -0,0 +1,19 @@ +import sys +from typing import Dict, Union, Type, List + +from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating + +if sys.version_info >= (3, 8): + from typing import TypedDict +else: + from typing_extensions import TypedDict + +class _SCTypes(TypedDict): + int: List[Type[signedinteger]] + uint: List[Type[unsignedinteger]] + float: List[Type[floating]] + complex: List[Type[complexfloating]] + others: List[type] + +sctypeDict: Dict[Union[int, str], Type[generic]] +sctypes: _SCTypes diff --git a/MLPY/Lib/site-packages/numpy/core/_ufunc_config.py b/MLPY/Lib/site-packages/numpy/core/_ufunc_config.py new file mode 100644 index 0000000000000000000000000000000000000000..ae287643df36da7fb46bf4f8f4c16349c5ab3a50 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_ufunc_config.py @@ -0,0 +1,446 @@ +""" +Functions for changing global ufunc configuration + +This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj` +""" +import collections.abc +import contextlib + +from .overrides import set_module +from .umath import ( + UFUNC_BUFSIZE_DEFAULT, + ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT, + SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID, +) +from . import umath + +__all__ = [ + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate", +] + +_errdict = {"ignore": ERR_IGNORE, + "warn": ERR_WARN, + "raise": ERR_RAISE, + "call": ERR_CALL, + "print": ERR_PRINT, + "log": ERR_LOG} + +_errdict_rev = {value: key for key, value in _errdict.items()} + + +@set_module('numpy') +def seterr(all=None, divide=None, over=None, under=None, invalid=None): + """ + Set how floating-point errors are handled. + + Note that operations on integer scalar types (such as `int16`) are + handled like floating point, and are affected by these settings. + + Parameters + ---------- + all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Set treatment for all types of floating-point errors at once: + + - ignore: Take no action when the exception occurs. + - warn: Print a `RuntimeWarning` (via the Python `warnings` module). + - raise: Raise a `FloatingPointError`. + - call: Call a function specified using the `seterrcall` function. + - print: Print a warning directly to ``stdout``. + - log: Record error in a Log object specified by `seterrcall`. + + The default is not to change the current behavior. + divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for division by zero. + over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point overflow. + under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point underflow. + invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for invalid floating-point operation. + + Returns + ------- + old_settings : dict + Dictionary containing the old settings. + + See also + -------- + seterrcall : Set a callback function for the 'call' mode. + geterr, geterrcall, errstate + + Notes + ----- + The floating-point exceptions are defined in the IEEE 754 standard [1]_: + + - Division by zero: infinite result obtained from finite numbers. + - Overflow: result too large to be expressed. + - Underflow: result so close to zero that some precision + was lost. + - Invalid operation: result is not an expressible number, typically + indicates that a NaN was produced. + + .. [1] https://en.wikipedia.org/wiki/IEEE_754 + + Examples + -------- + >>> old_settings = np.seterr(all='ignore') #seterr to known value + >>> np.seterr(over='raise') + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + >>> np.seterr(**old_settings) # reset to default + {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'} + + >>> np.int16(32000) * np.int16(3) + 30464 + >>> old_settings = np.seterr(all='warn', over='raise') + >>> np.int16(32000) * np.int16(3) + Traceback (most recent call last): + File "", line 1, in + FloatingPointError: overflow encountered in short_scalars + + >>> old_settings = np.seterr(all='print') + >>> np.geterr() + {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} + >>> np.int16(32000) * np.int16(3) + 30464 + + """ + + pyvals = umath.geterrobj() + old = geterr() + + if divide is None: + divide = all or old['divide'] + if over is None: + over = all or old['over'] + if under is None: + under = all or old['under'] + if invalid is None: + invalid = all or old['invalid'] + + maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + + (_errdict[over] << SHIFT_OVERFLOW) + + (_errdict[under] << SHIFT_UNDERFLOW) + + (_errdict[invalid] << SHIFT_INVALID)) + + pyvals[1] = maskvalue + umath.seterrobj(pyvals) + return old + + +@set_module('numpy') +def geterr(): + """ + Get the current way of handling floating-point errors. + + Returns + ------- + res : dict + A dictionary with keys "divide", "over", "under", and "invalid", + whose values are from the strings "ignore", "print", "log", "warn", + "raise", and "call". The keys represent possible floating-point + exceptions, and the values define how these exceptions are handled. + + See Also + -------- + geterrcall, seterr, seterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterr() + {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} + >>> np.arange(3.) / np.arange(3.) + array([nan, 1., 1.]) + + >>> oldsettings = np.seterr(all='warn', over='raise') + >>> np.geterr() + {'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'} + >>> np.arange(3.) / np.arange(3.) + array([nan, 1., 1.]) + + """ + maskvalue = umath.geterrobj()[1] + mask = 7 + res = {} + val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask + res['divide'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_OVERFLOW) & mask + res['over'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_UNDERFLOW) & mask + res['under'] = _errdict_rev[val] + val = (maskvalue >> SHIFT_INVALID) & mask + res['invalid'] = _errdict_rev[val] + return res + + +@set_module('numpy') +def setbufsize(size): + """ + Set the size of the buffer used in ufuncs. + + Parameters + ---------- + size : int + Size of buffer. + + """ + if size > 10e6: + raise ValueError("Buffer size, %s, is too big." % size) + if size < 5: + raise ValueError("Buffer size, %s, is too small." % size) + if size % 16 != 0: + raise ValueError("Buffer size, %s, is not a multiple of 16." % size) + + pyvals = umath.geterrobj() + old = getbufsize() + pyvals[0] = size + umath.seterrobj(pyvals) + return old + + +@set_module('numpy') +def getbufsize(): + """ + Return the size of the buffer used in ufuncs. + + Returns + ------- + getbufsize : int + Size of ufunc buffer in bytes. + + """ + return umath.geterrobj()[0] + + +@set_module('numpy') +def seterrcall(func): + """ + Set the floating-point error callback function or log object. + + There are two ways to capture floating-point error messages. The first + is to set the error-handler to 'call', using `seterr`. Then, set + the function to call using this function. + + The second is to set the error-handler to 'log', using `seterr`. + Floating-point errors then trigger a call to the 'write' method of + the provided object. + + Parameters + ---------- + func : callable f(err, flag) or object with write method + Function to call upon floating-point errors ('call'-mode) or + object whose 'write' method is used to log such message ('log'-mode). + + The call function takes two arguments. The first is a string describing + the type of error (such as "divide by zero", "overflow", "underflow", + or "invalid value"), and the second is the status flag. The flag is a + byte, whose four least-significant bits indicate the type of error, one + of "divide", "over", "under", "invalid":: + + [0 0 0 0 divide over under invalid] + + In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. + + If an object is provided, its write method should take one argument, + a string. + + Returns + ------- + h : callable, log instance or None + The old error handler. + + See Also + -------- + seterr, geterr, geterrcall + + Examples + -------- + Callback upon error: + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + + >>> saved_handler = np.seterrcall(err_handler) + >>> save_err = np.seterr(all='call') + + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> np.seterrcall(saved_handler) + + >>> np.seterr(**save_err) + {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'} + + Log error message: + + >>> class Log: + ... def write(self, msg): + ... print("LOG: %s" % msg) + ... + + >>> log = Log() + >>> saved_handler = np.seterrcall(log) + >>> save_err = np.seterr(all='log') + + >>> np.array([1, 2, 3]) / 0.0 + LOG: Warning: divide by zero encountered in true_divide + array([inf, inf, inf]) + + >>> np.seterrcall(saved_handler) + + >>> np.seterr(**save_err) + {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'} + + """ + if func is not None and not isinstance(func, collections.abc.Callable): + if (not hasattr(func, 'write') or + not isinstance(func.write, collections.abc.Callable)): + raise ValueError("Only callable can be used as callback") + pyvals = umath.geterrobj() + old = geterrcall() + pyvals[2] = func + umath.seterrobj(pyvals) + return old + + +@set_module('numpy') +def geterrcall(): + """ + Return the current callback function used on floating-point errors. + + When the error handling for a floating-point error (one of "divide", + "over", "under", or "invalid") is set to 'call' or 'log', the function + that is called or the log instance that is written to is returned by + `geterrcall`. This function or log instance has been set with + `seterrcall`. + + Returns + ------- + errobj : callable, log instance or None + The current error handler. If no handler was set through `seterrcall`, + ``None`` is returned. + + See Also + -------- + seterrcall, seterr, geterr + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> np.geterrcall() # we did not yet set a handler, returns None + + >>> oldsettings = np.seterr(all='call') + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + >>> oldhandler = np.seterrcall(err_handler) + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> cur_handler = np.geterrcall() + >>> cur_handler is err_handler + True + + """ + return umath.geterrobj()[2] + + +class _unspecified: + pass + + +_Unspecified = _unspecified() + + +@set_module('numpy') +class errstate(contextlib.ContextDecorator): + """ + errstate(**kwargs) + + Context manager for floating-point error handling. + + Using an instance of `errstate` as a context manager allows statements in + that context to execute with a known error handling behavior. Upon entering + the context the error handling is set with `seterr` and `seterrcall`, and + upon exiting it is reset to what it was before. + + .. versionchanged:: 1.17.0 + `errstate` is also usable as a function decorator, saving + a level of indentation if an entire function is wrapped. + See :py:class:`contextlib.ContextDecorator` for more information. + + Parameters + ---------- + kwargs : {divide, over, under, invalid} + Keyword arguments. The valid keywords are the possible floating-point + exceptions. Each keyword should have a string value that defines the + treatment for the particular error. Possible values are + {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. + + See Also + -------- + seterr, geterr, seterrcall, geterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> olderr = np.seterr(all='ignore') # Set error handling to known state. + + >>> np.arange(3) / 0. + array([nan, inf, inf]) + >>> with np.errstate(divide='warn'): + ... np.arange(3) / 0. + array([nan, inf, inf]) + + >>> np.sqrt(-1) + nan + >>> with np.errstate(invalid='raise'): + ... np.sqrt(-1) + Traceback (most recent call last): + File "", line 2, in + FloatingPointError: invalid value encountered in sqrt + + Outside the context the error handling behavior has not changed: + + >>> np.geterr() + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + + """ + + def __init__(self, *, call=_Unspecified, **kwargs): + self.call = call + self.kwargs = kwargs + + def __enter__(self): + self.oldstate = seterr(**self.kwargs) + if self.call is not _Unspecified: + self.oldcall = seterrcall(self.call) + + def __exit__(self, *exc_info): + seterr(**self.oldstate) + if self.call is not _Unspecified: + seterrcall(self.oldcall) + + +def _setdef(): + defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None] + umath.seterrobj(defval) + + +# set the default values +_setdef() diff --git a/MLPY/Lib/site-packages/numpy/core/_ufunc_config.pyi b/MLPY/Lib/site-packages/numpy/core/_ufunc_config.pyi new file mode 100644 index 0000000000000000000000000000000000000000..48ac70877b63c72d10218a69b9a33e9d90cf1d59 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/_ufunc_config.pyi @@ -0,0 +1,43 @@ +import sys +from typing import Optional, Union, Callable, Any + +if sys.version_info >= (3, 8): + from typing import Literal, Protocol, TypedDict +else: + from typing_extensions import Literal, Protocol, TypedDict + +_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +_ErrFunc = Callable[[str, int], Any] + +class _SupportsWrite(Protocol): + def write(self, __msg: str) -> Any: ... + +class _ErrDict(TypedDict): + divide: _ErrKind + over: _ErrKind + under: _ErrKind + invalid: _ErrKind + +class _ErrDictOptional(TypedDict, total=False): + all: Optional[_ErrKind] + divide: Optional[_ErrKind] + over: Optional[_ErrKind] + under: Optional[_ErrKind] + invalid: Optional[_ErrKind] + +def seterr( + all: Optional[_ErrKind] = ..., + divide: Optional[_ErrKind] = ..., + over: Optional[_ErrKind] = ..., + under: Optional[_ErrKind] = ..., + invalid: Optional[_ErrKind] = ..., +) -> _ErrDict: ... +def geterr() -> _ErrDict: ... +def setbufsize(size: int) -> int: ... +def getbufsize() -> int: ... +def seterrcall( + func: Union[None, _ErrFunc, _SupportsWrite] +) -> Union[None, _ErrFunc, _SupportsWrite]: ... +def geterrcall() -> Union[None, _ErrFunc, _SupportsWrite]: ... + +# See `numpy/__init__.pyi` for the `errstate` class diff --git a/MLPY/Lib/site-packages/numpy/core/_umath_tests.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/core/_umath_tests.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..3dc36a17339eed0b0932914ce9e938ab84ff355d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/_umath_tests.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/core/arrayprint.py b/MLPY/Lib/site-packages/numpy/core/arrayprint.py new file mode 100644 index 0000000000000000000000000000000000000000..26ebb21a7ba548e09737d7334cf1bf1208ba9c0a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/arrayprint.py @@ -0,0 +1,1664 @@ +"""Array printing function + +$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ + +""" +__all__ = ["array2string", "array_str", "array_repr", "set_string_function", + "set_printoptions", "get_printoptions", "printoptions", + "format_float_positional", "format_float_scientific"] +__docformat__ = 'restructuredtext' + +# +# Written by Konrad Hinsen +# last revision: 1996-3-13 +# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) +# and by Perry Greenfield 2000-4-1 for numarray +# and by Travis Oliphant 2005-8-22 for numpy + + +# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy +# scalars but for different purposes. scalartypes.c.src has str/reprs for when +# the scalar is printed on its own, while arrayprint.py has strs for when +# scalars are printed inside an ndarray. Only the latter strs are currently +# user-customizable. + +import functools +import numbers +try: + from _thread import get_ident +except ImportError: + from _dummy_thread import get_ident + +import numpy as np +from . import numerictypes as _nt +from .umath import absolute, isinf, isfinite, isnat +from . import multiarray +from .multiarray import (array, dragon4_positional, dragon4_scientific, + datetime_as_string, datetime_data, ndarray, + set_legacy_print_mode) +from .fromnumeric import any +from .numeric import concatenate, asarray, errstate +from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, + flexible) +from .overrides import array_function_dispatch, set_module +import operator +import warnings +import contextlib + +_format_options = { + 'edgeitems': 3, # repr N leading and trailing items of each dimension + 'threshold': 1000, # total items > triggers array summarization + 'floatmode': 'maxprec', + 'precision': 8, # precision of floating point representations + 'suppress': False, # suppress printing small floating values in exp format + 'linewidth': 75, + 'nanstr': 'nan', + 'infstr': 'inf', + 'sign': '-', + 'formatter': None, + 'legacy': False} + +def _make_options_dict(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + sign=None, formatter=None, floatmode=None, legacy=None): + """ make a dictionary out of the non-None arguments, plus sanity checks """ + + options = {k: v for k, v in locals().items() if v is not None} + + if suppress is not None: + options['suppress'] = bool(suppress) + + modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] + if floatmode not in modes + [None]: + raise ValueError("floatmode option must be one of " + + ", ".join('"{}"'.format(m) for m in modes)) + + if sign not in [None, '-', '+', ' ']: + raise ValueError("sign option must be one of ' ', '+', or '-'") + + if legacy not in [None, False, '1.13']: + warnings.warn("legacy printing option can currently only be '1.13' or " + "`False`", stacklevel=3) + + if threshold is not None: + # forbid the bad threshold arg suggested by stack overflow, gh-12351 + if not isinstance(threshold, numbers.Number): + raise TypeError("threshold must be numeric") + if np.isnan(threshold): + raise ValueError("threshold must be non-NAN, try " + "sys.maxsize for untruncated representation") + + if precision is not None: + # forbid the bad precision arg as suggested by issue #18254 + try: + options['precision'] = operator.index(precision) + except TypeError as e: + raise TypeError('precision must be an integer') from e + + return options + + +@set_module('numpy') +def set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + formatter=None, sign=None, floatmode=None, *, legacy=None): + """ + Set printing options. + + These options determine the way floating point numbers, arrays and + other NumPy objects are displayed. + + Parameters + ---------- + precision : int or None, optional + Number of digits of precision for floating point output (default 8). + May be None if `floatmode` is not `fixed`, to print as many digits as + necessary to uniquely specify the value. + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr (default 1000). + To always use the full repr without summarization, pass `sys.maxsize`. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension (default 3). + linewidth : int, optional + The number of characters per line for the purpose of inserting + line breaks (default 75). + suppress : bool, optional + If True, always print floating point numbers using fixed point + notation, in which case numbers equal to zero in the current precision + will print as zero. If False, then scientific notation is used when + absolute value of the smallest number is < 1e-4 or the ratio of the + maximum absolute value to the minimum is > 1e3. The default is False. + nanstr : str, optional + String representation of floating point not-a-number (default nan). + infstr : str, optional + String representation of floating point infinity (default inf). + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. (default '-') + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'numpystr' : types `numpy.string_` and `numpy.unicode_` + - 'object' : `np.object_` arrays + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. Can take the following values + (default maxprec_equal): + + * 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + * 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + * 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + * 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + .. versionadded:: 1.14.0 + + See Also + -------- + get_printoptions, printoptions, set_string_function, array2string + + Notes + ----- + `formatter` is always reset with a call to `set_printoptions`. + + Use `printoptions` as a context manager to set the values temporarily. + + Examples + -------- + Floating point precision can be set: + + >>> np.set_printoptions(precision=4) + >>> np.array([1.123456789]) + [1.1235] + + Long arrays can be summarised: + + >>> np.set_printoptions(threshold=5) + >>> np.arange(10) + array([0, 1, 2, ..., 7, 8, 9]) + + Small results can be suppressed: + + >>> eps = np.finfo(float).eps + >>> x = np.arange(4.) + >>> x**2 - (x + eps)**2 + array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) + >>> np.set_printoptions(suppress=True) + >>> x**2 - (x + eps)**2 + array([-0., -0., 0., 0.]) + + A custom formatter can be used to display array elements as desired: + + >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) + >>> x = np.arange(3) + >>> x + array([int: 0, int: -1, int: -2]) + >>> np.set_printoptions() # formatter gets reset + >>> x + array([0, 1, 2]) + + To put back the default options, you can use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + Also to temporarily override options, use `printoptions` as a context manager: + + >>> with np.printoptions(precision=2, suppress=True, threshold=5): + ... np.linspace(0, 10, 10) + array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) + + """ + opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) + # formatter is always reset + opt['formatter'] = formatter + _format_options.update(opt) + + # set the C variable for legacy mode + if _format_options['legacy'] == '1.13': + set_legacy_print_mode(113) + # reset the sign option in legacy mode to avoid confusion + _format_options['sign'] = '-' + elif _format_options['legacy'] is False: + set_legacy_print_mode(0) + + +@set_module('numpy') +def get_printoptions(): + """ + Return the current print options. + + Returns + ------- + print_opts : dict + Dictionary of current print options with keys + + - precision : int + - threshold : int + - edgeitems : int + - linewidth : int + - suppress : bool + - nanstr : str + - infstr : str + - formatter : dict of callables + - sign : str + + For a full description of these options, see `set_printoptions`. + + See Also + -------- + set_printoptions, printoptions, set_string_function + + """ + return _format_options.copy() + + +@set_module('numpy') +@contextlib.contextmanager +def printoptions(*args, **kwargs): + """Context manager for setting print options. + + Set print options for the scope of the `with` block, and restore the old + options at the end. See `set_printoptions` for the full description of + available options. + + Examples + -------- + + >>> from numpy.testing import assert_equal + >>> with np.printoptions(precision=2): + ... np.array([2.0]) / 3 + array([0.67]) + + The `as`-clause of the `with`-statement gives the current print options: + + >>> with np.printoptions(precision=2) as opts: + ... assert_equal(opts, np.get_printoptions()) + + See Also + -------- + set_printoptions, get_printoptions + + """ + opts = np.get_printoptions() + try: + np.set_printoptions(*args, **kwargs) + yield np.get_printoptions() + finally: + np.set_printoptions(**opts) + + +def _leading_trailing(a, edgeitems, index=()): + """ + Keep only the N-D corners (leading and trailing edges) of an array. + + Should be passed a base-class ndarray, since it makes no guarantees about + preserving subclasses. + """ + axis = len(index) + if axis == a.ndim: + return a[index] + + if a.shape[axis] > 2*edgeitems: + return concatenate(( + _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]), + _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) + ), axis=axis) + else: + return _leading_trailing(a, edgeitems, index + np.index_exp[:]) + + +def _object_format(o): + """ Object arrays containing lists should be printed unambiguously """ + if type(o) is list: + fmt = 'list({!r})' + else: + fmt = '{!r}' + return fmt.format(o) + +def repr_format(x): + return repr(x) + +def str_format(x): + return str(x) + +def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy, + formatter, **kwargs): + # note: extra arguments in kwargs are ignored + + # wrapped in lambdas to avoid taking a code path with the wrong type of data + formatdict = { + 'bool': lambda: BoolFormat(data), + 'int': lambda: IntegerFormat(data), + 'float': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longfloat': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'complexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longcomplexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'datetime': lambda: DatetimeFormat(data, legacy=legacy), + 'timedelta': lambda: TimedeltaFormat(data), + 'object': lambda: _object_format, + 'void': lambda: str_format, + 'numpystr': lambda: repr_format} + + # we need to wrap values in `formatter` in a lambda, so that the interface + # is the same as the above values. + def indirect(x): + return lambda: x + + if formatter is not None: + fkeys = [k for k in formatter.keys() if formatter[k] is not None] + if 'all' in fkeys: + for key in formatdict.keys(): + formatdict[key] = indirect(formatter['all']) + if 'int_kind' in fkeys: + for key in ['int']: + formatdict[key] = indirect(formatter['int_kind']) + if 'float_kind' in fkeys: + for key in ['float', 'longfloat']: + formatdict[key] = indirect(formatter['float_kind']) + if 'complex_kind' in fkeys: + for key in ['complexfloat', 'longcomplexfloat']: + formatdict[key] = indirect(formatter['complex_kind']) + if 'str_kind' in fkeys: + formatdict['numpystr'] = indirect(formatter['str_kind']) + for key in formatdict.keys(): + if key in fkeys: + formatdict[key] = indirect(formatter[key]) + + return formatdict + +def _get_format_function(data, **options): + """ + find the right formatting function for the dtype_ + """ + dtype_ = data.dtype + dtypeobj = dtype_.type + formatdict = _get_formatdict(data, **options) + if issubclass(dtypeobj, _nt.bool_): + return formatdict['bool']() + elif issubclass(dtypeobj, _nt.integer): + if issubclass(dtypeobj, _nt.timedelta64): + return formatdict['timedelta']() + else: + return formatdict['int']() + elif issubclass(dtypeobj, _nt.floating): + if issubclass(dtypeobj, _nt.longfloat): + return formatdict['longfloat']() + else: + return formatdict['float']() + elif issubclass(dtypeobj, _nt.complexfloating): + if issubclass(dtypeobj, _nt.clongfloat): + return formatdict['longcomplexfloat']() + else: + return formatdict['complexfloat']() + elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): + return formatdict['numpystr']() + elif issubclass(dtypeobj, _nt.datetime64): + return formatdict['datetime']() + elif issubclass(dtypeobj, _nt.object_): + return formatdict['object']() + elif issubclass(dtypeobj, _nt.void): + if dtype_.names is not None: + return StructuredVoidFormat.from_data(data, **options) + else: + return formatdict['void']() + else: + return formatdict['numpystr']() + + +def _recursive_guard(fillvalue='...'): + """ + Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs + + Decorates a function such that if it calls itself with the same first + argument, it returns `fillvalue` instead of recursing. + + Largely copied from reprlib.recursive_repr + """ + + def decorating_function(f): + repr_running = set() + + @functools.wraps(f) + def wrapper(self, *args, **kwargs): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + return f(self, *args, **kwargs) + finally: + repr_running.discard(key) + + return wrapper + + return decorating_function + + +# gracefully handle recursive calls, when object arrays contain themselves +@_recursive_guard() +def _array2string(a, options, separator=' ', prefix=""): + # The formatter __init__s in _get_format_function cannot deal with + # subclasses yet, and we also need to avoid recursion issues in + # _formatArray with subclasses which return 0d arrays in place of scalars + data = asarray(a) + if a.shape == (): + a = data + + if a.size > options['threshold']: + summary_insert = "..." + data = _leading_trailing(data, options['edgeitems']) + else: + summary_insert = "" + + # find the right formatting function for the array + format_function = _get_format_function(data, **options) + + # skip over "[" + next_line_prefix = " " + # skip over array( + next_line_prefix += " "*len(prefix) + + lst = _formatArray(a, format_function, options['linewidth'], + next_line_prefix, separator, options['edgeitems'], + summary_insert, options['legacy']) + return lst + + +def _array2string_dispatcher( + a, max_line_width=None, precision=None, + suppress_small=None, separator=None, prefix=None, + style=None, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix=None, + *, legacy=None): + return (a,) + + +@array_function_dispatch(_array2string_dispatcher, module='numpy') +def array2string(a, max_line_width=None, precision=None, + suppress_small=None, separator=' ', prefix="", + style=np._NoValue, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix="", + *, legacy=None): + """ + Return a string representation of an array. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int or None, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + separator : str, optional + Inserted between elements. + prefix : str, optional + suffix : str, optional + The length of the prefix and suffix strings are used to respectively + align and wrap the output. An array is typically printed as:: + + prefix + array2string(a) + suffix + + The output is left-padded by the length of the prefix string, and + wrapping is forced at the column ``max_line_width - len(suffix)``. + It should be noted that the content of prefix and suffix strings are + not included in the output. + style : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.14.0 + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'void' : type `numpy.void` + - 'numpystr' : types `numpy.string_` and `numpy.unicode_` + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr. + Defaults to ``numpy.get_printoptions()['threshold']``. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension. + Defaults to ``numpy.get_printoptions()['edgeitems']``. + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. + Defaults to ``numpy.get_printoptions()['sign']``. + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. + Defaults to ``numpy.get_printoptions()['floatmode']``. + Can take the following values: + + - 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + - 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + - 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + - 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + .. versionadded:: 1.14.0 + + Returns + ------- + array_str : str + String representation of the array. + + Raises + ------ + TypeError + if a callable in `formatter` does not return a string. + + See Also + -------- + array_str, array_repr, set_printoptions, get_printoptions + + Notes + ----- + If a formatter is specified for a certain type, the `precision` keyword is + ignored for that type. + + This is a very flexible function; `array_repr` and `array_str` are using + `array2string` internally so keywords with the same name should work + identically in all three functions. + + Examples + -------- + >>> x = np.array([1e-16,1,2,3]) + >>> np.array2string(x, precision=2, separator=',', + ... suppress_small=True) + '[0.,1.,2.,3.]' + + >>> x = np.arange(3.) + >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) + '[0.00 1.00 2.00]' + + >>> x = np.arange(3) + >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) + '[0x0 0x1 0x2]' + + """ + + overrides = _make_options_dict(precision, threshold, edgeitems, + max_line_width, suppress_small, None, None, + sign, formatter, floatmode, legacy) + options = _format_options.copy() + options.update(overrides) + + if options['legacy'] == '1.13': + if style is np._NoValue: + style = repr + + if a.shape == () and a.dtype.names is None: + return style(a.item()) + elif style is not np._NoValue: + # Deprecation 11-9-2017 v1.14 + warnings.warn("'style' argument is deprecated and no longer functional" + " except in 1.13 'legacy' mode", + DeprecationWarning, stacklevel=3) + + if options['legacy'] != '1.13': + options['linewidth'] -= len(suffix) + + # treat as a null array if any of shape elements == 0 + if a.size == 0: + return "[]" + + return _array2string(a, options, separator, prefix) + + +def _extendLine(s, line, word, line_width, next_line_prefix, legacy): + needs_wrap = len(line) + len(word) > line_width + if legacy != '1.13': + # don't wrap lines if it won't help + if len(line) <= len(next_line_prefix): + needs_wrap = False + + if needs_wrap: + s += line.rstrip() + "\n" + line = next_line_prefix + line += word + return s, line + + +def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): + """ + Extends line with nicely formatted (possibly multi-line) string ``word``. + """ + words = word.splitlines() + if len(words) == 1 or legacy == '1.13': + return _extendLine(s, line, word, line_width, next_line_prefix, legacy) + + max_word_length = max(len(word) for word in words) + if (len(line) + max_word_length > line_width and + len(line) > len(next_line_prefix)): + s += line.rstrip() + '\n' + line = next_line_prefix + words[0] + indent = next_line_prefix + else: + indent = len(line)*' ' + line += words[0] + + for word in words[1::]: + s += line.rstrip() + '\n' + line = indent + word + + suffix_length = max_word_length - len(words[-1]) + line += suffix_length*' ' + + return s, line + +def _formatArray(a, format_function, line_width, next_line_prefix, + separator, edge_items, summary_insert, legacy): + """formatArray is designed for two modes of operation: + + 1. Full output + + 2. Summarized output + + """ + def recurser(index, hanging_indent, curr_width): + """ + By using this local function, we don't need to recurse with all the + arguments. Since this function is not created recursively, the cost is + not significant + """ + axis = len(index) + axes_left = a.ndim - axis + + if axes_left == 0: + return format_function(a[index]) + + # when recursing, add a space to align with the [ added, and reduce the + # length of the line by 1 + next_hanging_indent = hanging_indent + ' ' + if legacy == '1.13': + next_width = curr_width + else: + next_width = curr_width - len(']') + + a_len = a.shape[axis] + show_summary = summary_insert and 2*edge_items < a_len + if show_summary: + leading_items = edge_items + trailing_items = edge_items + else: + leading_items = 0 + trailing_items = a_len + + # stringify the array with the hanging indent on the first line too + s = '' + + # last axis (rows) - wrap elements if they would not fit on one line + if axes_left == 1: + # the length up until the beginning of the separator / bracket + if legacy == '1.13': + elem_width = curr_width - len(separator.rstrip()) + else: + elem_width = curr_width - max(len(separator.rstrip()), len(']')) + + line = hanging_indent + for i in range(leading_items): + word = recurser(index + (i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if show_summary: + s, line = _extendLine( + s, line, summary_insert, elem_width, hanging_indent, legacy) + if legacy == '1.13': + line += ", " + else: + line += separator + + for i in range(trailing_items, 1, -1): + word = recurser(index + (-i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if legacy == '1.13': + # width of the separator is not considered on 1.13 + elem_width = curr_width + word = recurser(index + (-1,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + + s += line + + # other axes - insert newlines between rows + else: + s = '' + line_sep = separator.rstrip() + '\n'*(axes_left - 1) + + for i in range(leading_items): + nested = recurser(index + (i,), next_hanging_indent, next_width) + s += hanging_indent + nested + line_sep + + if show_summary: + if legacy == '1.13': + # trailing space, fixed nbr of newlines, and fixed separator + s += hanging_indent + summary_insert + ", \n" + else: + s += hanging_indent + summary_insert + line_sep + + for i in range(trailing_items, 1, -1): + nested = recurser(index + (-i,), next_hanging_indent, + next_width) + s += hanging_indent + nested + line_sep + + nested = recurser(index + (-1,), next_hanging_indent, next_width) + s += hanging_indent + nested + + # remove the hanging indent, and wrap in [] + s = '[' + s[len(hanging_indent):] + ']' + return s + + try: + # invoke the recursive part with an initial index and prefix + return recurser(index=(), + hanging_indent=next_line_prefix, + curr_width=line_width) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + recurser = None + +def _none_or_positive_arg(x, name): + if x is None: + return -1 + if x < 0: + raise ValueError("{} must be >= 0".format(name)) + return x + +class FloatingFormat: + """ Formatter for subtypes of np.floating """ + def __init__(self, data, precision, floatmode, suppress_small, sign=False, + *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + self._legacy = legacy + if self._legacy == '1.13': + # when not 0d, legacy does not support '-' + if data.shape != () and sign == '-': + sign = ' ' + + self.floatmode = floatmode + if floatmode == 'unique': + self.precision = None + else: + self.precision = precision + + self.precision = _none_or_positive_arg(self.precision, 'precision') + + self.suppress_small = suppress_small + self.sign = sign + self.exp_format = False + self.large_exponent = False + + self.fillFormat(data) + + def fillFormat(self, data): + # only the finite values are used to compute the number of digits + finite_vals = data[isfinite(data)] + + # choose exponential mode based on the non-zero finite values: + abs_non_zero = absolute(finite_vals[finite_vals != 0]) + if len(abs_non_zero) != 0: + max_val = np.max(abs_non_zero) + min_val = np.min(abs_non_zero) + with errstate(over='ignore'): # division can overflow + if max_val >= 1.e8 or (not self.suppress_small and + (min_val < 0.0001 or max_val/min_val > 1000.)): + self.exp_format = True + + # do a first pass of printing all the numbers, to determine sizes + if len(finite_vals) == 0: + self.pad_left = 0 + self.pad_right = 0 + self.trim = '.' + self.exp_size = -1 + self.unique = True + self.min_digits = None + elif self.exp_format: + trim, unique = '.', True + if self.floatmode == 'fixed' or self._legacy == '1.13': + trim, unique = 'k', False + strs = (dragon4_scientific(x, precision=self.precision, + unique=unique, trim=trim, sign=self.sign == '+') + for x in finite_vals) + frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) + int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) + self.exp_size = max(len(s) for s in exp_strs) - 1 + + self.trim = 'k' + self.precision = max(len(s) for s in frac_part) + self.min_digits = self.precision + self.unique = unique + + # for back-compat with np 1.13, use 2 spaces & sign and full prec + if self._legacy == '1.13': + self.pad_left = 3 + else: + # this should be only 1 or 2. Can be calculated from sign. + self.pad_left = max(len(s) for s in int_part) + # pad_right is only needed for nan length calculation + self.pad_right = self.exp_size + 2 + self.precision + else: + trim, unique = '.', True + if self.floatmode == 'fixed': + trim, unique = 'k', False + strs = (dragon4_positional(x, precision=self.precision, + fractional=True, + unique=unique, trim=trim, + sign=self.sign == '+') + for x in finite_vals) + int_part, frac_part = zip(*(s.split('.') for s in strs)) + if self._legacy == '1.13': + self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) + else: + self.pad_left = max(len(s) for s in int_part) + self.pad_right = max(len(s) for s in frac_part) + self.exp_size = -1 + self.unique = unique + + if self.floatmode in ['fixed', 'maxprec_equal']: + self.precision = self.min_digits = self.pad_right + self.trim = 'k' + else: + self.trim = '.' + self.min_digits = 0 + + if self._legacy != '1.13': + # account for sign = ' ' by adding one to pad_left + if self.sign == ' ' and not any(np.signbit(finite_vals)): + self.pad_left += 1 + + # if there are non-finite values, may need to increase pad_left + if data.size != finite_vals.size: + neginf = self.sign != '-' or any(data[isinf(data)] < 0) + nanlen = len(_format_options['nanstr']) + inflen = len(_format_options['infstr']) + neginf + offset = self.pad_right + 1 # +1 for decimal pt + self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset) + + def __call__(self, x): + if not np.isfinite(x): + with errstate(invalid='ignore'): + if np.isnan(x): + sign = '+' if self.sign == '+' else '' + ret = sign + _format_options['nanstr'] + else: # isinf + sign = '-' if x < 0 else '+' if self.sign == '+' else '' + ret = sign + _format_options['infstr'] + return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret + + if self.exp_format: + return dragon4_scientific(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + exp_digits=self.exp_size) + else: + return dragon4_positional(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + fractional=True, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + pad_right=self.pad_right) + + +@set_module('numpy') +def format_float_scientific(x, precision=None, unique=True, trim='k', + sign=False, pad_left=None, exp_digits=None, + min_digits=None): + """ + Format a floating-point scalar as a decimal string in scientific notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed. If `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + exp_digits : non-negative integer, optional + Pad the exponent with zeros until it contains at least this many digits. + If omitted, the exponent will be at least 2 digits. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. This only has an effect for + `unique=True`. In that case more digits than necessary to uniquely + identify the value may be printed and rounded unbiased. + + -- versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_positional + + Examples + -------- + >>> np.format_float_scientific(np.float32(np.pi)) + '3.1415927e+00' + >>> s = np.float32(1.23e24) + >>> np.format_float_scientific(s, unique=False, precision=15) + '1.230000071797338e+24' + >>> np.format_float_scientific(s, exp_digits=4) + '1.23e+0024' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_scientific(x, precision=precision, unique=unique, + trim=trim, sign=sign, pad_left=pad_left, + exp_digits=exp_digits, min_digits=min_digits) + + +@set_module('numpy') +def format_float_positional(x, precision=None, unique=True, + fractional=True, trim='k', sign=False, + pad_left=None, pad_right=None, min_digits=None): + """ + Format a floating-point scalar as a decimal string in positional notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed, or if `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + fractional : boolean, optional + If `True`, the cutoffs of `precision` and `min_digits` refer to the + total number of digits after the decimal point, including leading + zeros. + If `False`, `precision` and `min_digits` refer to the total number of + significant digits, before or after the decimal point, ignoring leading + zeros. + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + pad_right : non-negative integer, optional + Pad the right side of the string with whitespace until at least that + many characters are to the right of the decimal point. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. Only has an effect if `unique=True` + in which case additional digits past those necessary to uniquely + identify the value may be printed, rounding the last additional digit. + + -- versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + + Examples + -------- + >>> np.format_float_positional(np.float32(np.pi)) + '3.1415927' + >>> np.format_float_positional(np.float16(np.pi)) + '3.14' + >>> np.format_float_positional(np.float16(0.3)) + '0.3' + >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) + '0.3000488281' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + pad_right = _none_or_positive_arg(pad_right, 'pad_right') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if not fractional and precision == 0: + raise ValueError("precision must be greater than 0 if " + "fractional=False") + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_positional(x, precision=precision, unique=unique, + fractional=fractional, trim=trim, + sign=sign, pad_left=pad_left, + pad_right=pad_right, min_digits=min_digits) + + +class IntegerFormat: + def __init__(self, data): + if data.size > 0: + max_str_len = max(len(str(np.max(data))), + len(str(np.min(data)))) + else: + max_str_len = 0 + self.format = '%{}d'.format(max_str_len) + + def __call__(self, x): + return self.format % x + + +class BoolFormat: + def __init__(self, data, **kwargs): + # add an extra space so " True" and "False" have the same length and + # array elements align nicely when printed, except in 0d arrays + self.truestr = ' True' if data.shape != () else 'True' + + def __call__(self, x): + return self.truestr if x else "False" + + +class ComplexFloatingFormat: + """ Formatter for subtypes of np.complexfloating """ + def __init__(self, x, precision, floatmode, suppress_small, + sign=False, *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + floatmode_real = floatmode_imag = floatmode + if legacy == '1.13': + floatmode_real = 'maxprec_equal' + floatmode_imag = 'maxprec' + + self.real_format = FloatingFormat( + x.real, precision, floatmode_real, suppress_small, + sign=sign, legacy=legacy + ) + self.imag_format = FloatingFormat( + x.imag, precision, floatmode_imag, suppress_small, + sign='+', legacy=legacy + ) + + def __call__(self, x): + r = self.real_format(x.real) + i = self.imag_format(x.imag) + + # add the 'j' before the terminal whitespace in i + sp = len(i.rstrip()) + i = i[:sp] + 'j' + i[sp:] + + return r + i + + +class _TimelikeFormat: + def __init__(self, data): + non_nat = data[~isnat(data)] + if len(non_nat) > 0: + # Max str length of non-NaT elements + max_str_len = max(len(self._format_non_nat(np.max(non_nat))), + len(self._format_non_nat(np.min(non_nat)))) + else: + max_str_len = 0 + if len(non_nat) < data.size: + # data contains a NaT + max_str_len = max(max_str_len, 5) + self._format = '%{}s'.format(max_str_len) + self._nat = "'NaT'".rjust(max_str_len) + + def _format_non_nat(self, x): + # override in subclass + raise NotImplementedError + + def __call__(self, x): + if isnat(x): + return self._nat + else: + return self._format % self._format_non_nat(x) + + +class DatetimeFormat(_TimelikeFormat): + def __init__(self, x, unit=None, timezone=None, casting='same_kind', + legacy=False): + # Get the unit from the dtype + if unit is None: + if x.dtype.kind == 'M': + unit = datetime_data(x.dtype)[0] + else: + unit = 's' + + if timezone is None: + timezone = 'naive' + self.timezone = timezone + self.unit = unit + self.casting = casting + self.legacy = legacy + + # must be called after the above are configured + super().__init__(x) + + def __call__(self, x): + if self.legacy == '1.13': + return self._format_non_nat(x) + return super().__call__(x) + + def _format_non_nat(self, x): + return "'%s'" % datetime_as_string(x, + unit=self.unit, + timezone=self.timezone, + casting=self.casting) + + +class TimedeltaFormat(_TimelikeFormat): + def _format_non_nat(self, x): + return str(x.astype('i8')) + + +class SubArrayFormat: + def __init__(self, format_function): + self.format_function = format_function + + def __call__(self, arr): + if arr.ndim <= 1: + return "[" + ", ".join(self.format_function(a) for a in arr) + "]" + return "[" + ", ".join(self.__call__(a) for a in arr) + "]" + + +class StructuredVoidFormat: + """ + Formatter for structured np.void objects. + + This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), + as alias scalars lose their field information, and the implementation + relies upon np.void.__getitem__. + """ + def __init__(self, format_functions): + self.format_functions = format_functions + + @classmethod + def from_data(cls, data, **options): + """ + This is a second way to initialize StructuredVoidFormat, using the raw data + as input. Added to avoid changing the signature of __init__. + """ + format_functions = [] + for field_name in data.dtype.names: + format_function = _get_format_function(data[field_name], **options) + if data.dtype[field_name].shape != (): + format_function = SubArrayFormat(format_function) + format_functions.append(format_function) + return cls(format_functions) + + def __call__(self, x): + str_fields = [ + format_function(field) + for field, format_function in zip(x, self.format_functions) + ] + if len(str_fields) == 1: + return "({},)".format(str_fields[0]) + else: + return "({})".format(", ".join(str_fields)) + + +def _void_scalar_repr(x): + """ + Implements the repr for structured-void scalars. It is called from the + scalartypes.c.src code, and is placed here because it uses the elementwise + formatters defined above. + """ + return StructuredVoidFormat.from_data(array(x), **_format_options)(x) + + +_typelessdata = [int_, float_, complex_, bool_] +if issubclass(intc, int): + _typelessdata.append(intc) +if issubclass(longlong, int): + _typelessdata.append(longlong) + + +def dtype_is_implied(dtype): + """ + Determine if the given dtype is implied by the representation of its values. + + Parameters + ---------- + dtype : dtype + Data type + + Returns + ------- + implied : bool + True if the dtype is implied by the representation of its values. + + Examples + -------- + >>> np.core.arrayprint.dtype_is_implied(int) + True + >>> np.array([1, 2, 3], int) + array([1, 2, 3]) + >>> np.core.arrayprint.dtype_is_implied(np.int8) + False + >>> np.array([1, 2, 3], np.int8) + array([1, 2, 3], dtype=int8) + """ + dtype = np.dtype(dtype) + if _format_options['legacy'] == '1.13' and dtype.type == bool_: + return False + + # not just void types can be structured, and names are not part of the repr + if dtype.names is not None: + return False + + return dtype.type in _typelessdata + + +def dtype_short_repr(dtype): + """ + Convert a dtype to a short form which evaluates to the same dtype. + + The intent is roughly that the following holds + + >>> from numpy import * + >>> dt = np.int64([1, 2]).dtype + >>> assert eval(dtype_short_repr(dt)) == dt + """ + if dtype.names is not None: + # structured dtypes give a list or tuple repr + return str(dtype) + elif issubclass(dtype.type, flexible): + # handle these separately so they don't give garbage like str256 + return "'%s'" % str(dtype) + + typename = dtype.name + # quote typenames which can't be represented as python variable names + if typename and not (typename[0].isalpha() and typename.isalnum()): + typename = repr(typename) + + return typename + + +def _array_repr_implementation( + arr, max_line_width=None, precision=None, suppress_small=None, + array2string=array2string): + """Internal version of array_repr() that allows overriding array2string.""" + if max_line_width is None: + max_line_width = _format_options['linewidth'] + + if type(arr) is not ndarray: + class_name = type(arr).__name__ + else: + class_name = "array" + + skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0 + + prefix = class_name + "(" + suffix = ")" if skipdtype else "," + + if (_format_options['legacy'] == '1.13' and + arr.shape == () and not arr.dtype.names): + lst = repr(arr.item()) + elif arr.size > 0 or arr.shape == (0,): + lst = array2string(arr, max_line_width, precision, suppress_small, + ', ', prefix, suffix=suffix) + else: # show zero-length shape unless it is (0,) + lst = "[], shape=%s" % (repr(arr.shape),) + + arr_str = prefix + lst + suffix + + if skipdtype: + return arr_str + + dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) + + # compute whether we should put dtype on a new line: Do so if adding the + # dtype would extend the last line past max_line_width. + # Note: This line gives the correct result even when rfind returns -1. + last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) + spacer = " " + if _format_options['legacy'] == '1.13': + if issubclass(arr.dtype.type, flexible): + spacer = '\n' + ' '*len(class_name + "(") + elif last_line_len + len(dtype_str) + 1 > max_line_width: + spacer = '\n' + ' '*len(class_name + "(") + + return arr_str + spacer + dtype_str + + +def _array_repr_dispatcher( + arr, max_line_width=None, precision=None, suppress_small=None): + return (arr,) + + +@array_function_dispatch(_array_repr_dispatcher, module='numpy') +def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): + """ + Return the string representation of an array. + + Parameters + ---------- + arr : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + Returns + ------- + string : str + The string representation of an array. + + See Also + -------- + array_str, array2string, set_printoptions + + Examples + -------- + >>> np.array_repr(np.array([1,2])) + 'array([1, 2])' + >>> np.array_repr(np.ma.array([0.])) + 'MaskedArray([0.])' + >>> np.array_repr(np.array([], np.int32)) + 'array([], dtype=int32)' + + >>> x = np.array([1e-6, 4e-7, 2, 3]) + >>> np.array_repr(x, precision=6, suppress_small=True) + 'array([0.000001, 0. , 2. , 3. ])' + + """ + return _array_repr_implementation( + arr, max_line_width, precision, suppress_small) + + +@_recursive_guard() +def _guarded_repr_or_str(v): + if isinstance(v, bytes): + return repr(v) + return str(v) + + +def _array_str_implementation( + a, max_line_width=None, precision=None, suppress_small=None, + array2string=array2string): + """Internal version of array_str() that allows overriding array2string.""" + if (_format_options['legacy'] == '1.13' and + a.shape == () and not a.dtype.names): + return str(a.item()) + + # the str of 0d arrays is a special case: It should appear like a scalar, + # so floats are not truncated by `precision`, and strings are not wrapped + # in quotes. So we return the str of the scalar value. + if a.shape == (): + # obtain a scalar and call str on it, avoiding problems for subclasses + # for which indexing with () returns a 0d instead of a scalar by using + # ndarray's getindex. Also guard against recursive 0d object arrays. + return _guarded_repr_or_str(np.ndarray.__getitem__(a, ())) + + return array2string(a, max_line_width, precision, suppress_small, ' ', "") + + +def _array_str_dispatcher( + a, max_line_width=None, precision=None, suppress_small=None): + return (a,) + + +@array_function_dispatch(_array_str_dispatcher, module='numpy') +def array_str(a, max_line_width=None, precision=None, suppress_small=None): + """ + Return a string representation of the data in an array. + + The data in the array is returned as a single string. This function is + similar to `array_repr`, the difference being that `array_repr` also + returns information on the kind of array and its data type. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + See Also + -------- + array2string, array_repr, set_printoptions + + Examples + -------- + >>> np.array_str(np.arange(3)) + '[0 1 2]' + + """ + return _array_str_implementation( + a, max_line_width, precision, suppress_small) + + +# needed if __array_function__ is disabled +_array2string_impl = getattr(array2string, '__wrapped__', array2string) +_default_array_str = functools.partial(_array_str_implementation, + array2string=_array2string_impl) +_default_array_repr = functools.partial(_array_repr_implementation, + array2string=_array2string_impl) + + +def set_string_function(f, repr=True): + """ + Set a Python function to be used when pretty printing arrays. + + Parameters + ---------- + f : function or None + Function to be used to pretty print arrays. The function should expect + a single array argument and return a string of the representation of + the array. If None, the function is reset to the default NumPy function + to print arrays. + repr : bool, optional + If True (default), the function for pretty printing (``__repr__``) + is set, if False the function that returns the default string + representation (``__str__``) is set. + + See Also + -------- + set_printoptions, get_printoptions + + Examples + -------- + >>> def pprint(arr): + ... return 'HA! - What are you going to do now?' + ... + >>> np.set_string_function(pprint) + >>> a = np.arange(10) + >>> a + HA! - What are you going to do now? + >>> _ = a + >>> # [0 1 2 3 4 5 6 7 8 9] + + We can reset the function to the default: + + >>> np.set_string_function(None) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + `repr` affects either pretty printing or normal string representation. + Note that ``__repr__`` is still affected by setting ``__str__`` + because the width of each array element in the returned string becomes + equal to the length of the result of ``__str__()``. + + >>> x = np.arange(4) + >>> np.set_string_function(lambda x:'random', repr=False) + >>> x.__str__() + 'random' + >>> x.__repr__() + 'array([0, 1, 2, 3])' + + """ + if f is None: + if repr: + return multiarray.set_string_function(_default_array_repr, 1) + else: + return multiarray.set_string_function(_default_array_str, 0) + else: + return multiarray.set_string_function(f, repr) diff --git a/MLPY/Lib/site-packages/numpy/core/arrayprint.pyi b/MLPY/Lib/site-packages/numpy/core/arrayprint.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4cc774af6d416330079d1bbdafb5311a4d669e69 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/arrayprint.pyi @@ -0,0 +1,147 @@ +import sys +from types import TracebackType +from typing import Any, Optional, Callable, Union, Type + +# Using a private class is by no means ideal, but it is simply a consquence +# of a `contextlib.context` returning an instance of aformentioned class +from contextlib import _GeneratorContextManager + +from numpy import ( + ndarray, + generic, + bool_, + integer, + timedelta64, + datetime64, + floating, + complexfloating, + void, + str_, + bytes_, + longdouble, + clongdouble, +) +from numpy.typing import ArrayLike, _CharLike_co, _FloatLike_co + +if sys.version_info > (3, 8): + from typing import Literal, TypedDict, SupportsIndex +else: + from typing_extensions import Literal, TypedDict, SupportsIndex + +_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] + +class _FormatDict(TypedDict, total=False): + bool: Callable[[bool_], str] + int: Callable[[integer[Any]], str] + timedelta: Callable[[timedelta64], str] + datetime: Callable[[datetime64], str] + float: Callable[[floating[Any]], str] + longfloat: Callable[[longdouble], str] + complexfloat: Callable[[complexfloating[Any, Any]], str] + longcomplexfloat: Callable[[clongdouble], str] + void: Callable[[void], str] + numpystr: Callable[[_CharLike_co], str] + object: Callable[[object], str] + all: Callable[[object], str] + int_kind: Callable[[integer[Any]], str] + float_kind: Callable[[floating[Any]], str] + complex_kind: Callable[[complexfloating[Any, Any]], str] + str_kind: Callable[[_CharLike_co], str] + +class _FormatOptions(TypedDict): + precision: int + threshold: int + edgeitems: int + linewidth: int + suppress: bool + nanstr: str + infstr: str + formatter: Optional[_FormatDict] + sign: Literal["-", "+", " "] + floatmode: _FloatMode + legacy: Literal[False, "1.13"] + +def set_printoptions( + precision: Optional[SupportsIndex] = ..., + threshold: Optional[int] = ..., + edgeitems: Optional[int] = ..., + linewidth: Optional[int] = ..., + suppress: Optional[bool] = ..., + nanstr: Optional[str] = ..., + infstr: Optional[str] = ..., + formatter: Optional[_FormatDict] = ..., + sign: Optional[Literal["-", "+", " "]] = ..., + floatmode: Optional[_FloatMode] = ..., + *, + legacy: Optional[Literal[False, "1.13"]] = ... +) -> None: ... +def get_printoptions() -> _FormatOptions: ... +def array2string( + a: ndarray[Any, Any], + max_line_width: Optional[int] = ..., + precision: Optional[SupportsIndex] = ..., + suppress_small: Optional[bool] = ..., + separator: str = ..., + prefix: str = ..., + # NOTE: With the `style` argument being deprecated, + # all arguments between `formatter` and `suffix` are de facto + # keyworld-only arguments + *, + formatter: Optional[_FormatDict] = ..., + threshold: Optional[int] = ..., + edgeitems: Optional[int] = ..., + sign: Optional[Literal["-", "+", " "]] = ..., + floatmode: Optional[_FloatMode] = ..., + suffix: str = ..., + legacy: Optional[Literal[False, "1.13"]] = ..., +) -> str: ... +def format_float_scientific( + x: _FloatLike_co, + precision: Optional[int] = ..., + unique: bool = ..., + trim: Literal["k", ".", "0", "-"] = ..., + sign: bool = ..., + pad_left: Optional[int] = ..., + exp_digits: Optional[int] = ..., + min_digits: Optional[int] = ..., +) -> str: ... +def format_float_positional( + x: _FloatLike_co, + precision: Optional[int] = ..., + unique: bool = ..., + fractional: bool = ..., + trim: Literal["k", ".", "0", "-"] = ..., + sign: bool = ..., + pad_left: Optional[int] = ..., + pad_right: Optional[int] = ..., + min_digits: Optional[int] = ..., +) -> str: ... +def array_repr( + arr: ndarray[Any, Any], + max_line_width: Optional[int] = ..., + precision: Optional[SupportsIndex] = ..., + suppress_small: Optional[bool] = ..., +) -> str: ... +def array_str( + a: ndarray[Any, Any], + max_line_width: Optional[int] = ..., + precision: Optional[SupportsIndex] = ..., + suppress_small: Optional[bool] = ..., +) -> str: ... +def set_string_function( + f: Optional[Callable[[ndarray[Any, Any]], str]], repr: bool = ... +) -> None: ... +def printoptions( + precision: Optional[SupportsIndex] = ..., + threshold: Optional[int] = ..., + edgeitems: Optional[int] = ..., + linewidth: Optional[int] = ..., + suppress: Optional[bool] = ..., + nanstr: Optional[str] = ..., + infstr: Optional[str] = ..., + formatter: Optional[_FormatDict] = ..., + sign: Optional[Literal["-", "+", " "]] = ..., + floatmode: Optional[_FloatMode] = ..., + *, + legacy: Optional[Literal[False, "1.13"]] = ... +) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/MLPY/Lib/site-packages/numpy/core/cversions.py b/MLPY/Lib/site-packages/numpy/core/cversions.py new file mode 100644 index 0000000000000000000000000000000000000000..ef75a6f4f35cd29569c2d806927695f600066066 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/cversions.py @@ -0,0 +1,13 @@ +"""Simple script to compute the api hash of the current API. + +The API has is defined by numpy_api_order and ufunc_api_order. + +""" +from os.path import dirname + +from code_generators.genapi import fullapi_hash +from code_generators.numpy_api import full_api + +if __name__ == '__main__': + curdir = dirname(__file__) + print(fullapi_hash(full_api)) diff --git a/MLPY/Lib/site-packages/numpy/core/defchararray.py b/MLPY/Lib/site-packages/numpy/core/defchararray.py new file mode 100644 index 0000000000000000000000000000000000000000..7c3b0a627392a00d9b707c0d92903b6f4bad63ce --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/defchararray.py @@ -0,0 +1,2795 @@ +""" +This module contains a set of functions for vectorized string +operations and methods. + +.. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `string_` or `unicode_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + +Some methods will only be available if the corresponding string method is +available in your version of Python. + +The preferred alias for `defchararray` is `numpy.char`. + +""" +import functools +import sys +from .numerictypes import ( + string_, unicode_, integer, int_, object_, bool_, character) +from .numeric import ndarray, compare_chararrays +from .numeric import array as narray +from numpy.core.multiarray import _vec_string +from numpy.core.overrides import set_module +from numpy.core import overrides +from numpy.compat import asbytes +import numpy + +__all__ = [ + 'equal', 'not_equal', 'greater_equal', 'less_equal', + 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', + 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', + 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', + 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', + 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', + 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', + 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', + 'array', 'asarray' + ] + + +_globalvar = 0 + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.char') + + +def _use_unicode(*args): + """ + Helper function for determining the output type of some string + operations. + + For an operation on two ndarrays, if at least one is unicode, the + result should be unicode. + """ + for x in args: + if (isinstance(x, str) or + issubclass(numpy.asarray(x).dtype.type, unicode_)): + return unicode_ + return string_ + +def _to_string_or_unicode_array(result): + """ + Helper function to cast a result back into a string or unicode array + if an object array must be used as an intermediary. + """ + return numpy.asarray(result.tolist()) + +def _clean_args(*args): + """ + Helper function for delegating arguments to Python string + functions. + + Many of the Python string operations that have optional arguments + do not use 'None' to indicate a default value. In these cases, + we need to remove all None arguments, and those following them. + """ + newargs = [] + for chk in args: + if chk is None: + break + newargs.append(chk) + return newargs + +def _get_num_chars(a): + """ + Helper function that returns the number of characters per field in + a string or unicode array. This is to abstract out the fact that + for a unicode array this is itemsize / 4. + """ + if issubclass(a.dtype.type, unicode_): + return a.itemsize // 4 + return a.itemsize + + +def _binary_op_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_binary_op_dispatcher) +def equal(x1, x2): + """ + Return (x1 == x2) element-wise. + + Unlike `numpy.equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + not_equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '==', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def not_equal(x1, x2): + """ + Return (x1 != x2) element-wise. + + Unlike `numpy.not_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '!=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater_equal(x1, x2): + """ + Return (x1 >= x2) element-wise. + + Unlike `numpy.greater_equal`, this comparison is performed by + first stripping whitespace characters from the end of the string. + This behavior is provided for backward-compatibility with + numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '>=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less_equal(x1, x2): + """ + Return (x1 <= x2) element-wise. + + Unlike `numpy.less_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, greater, less + """ + return compare_chararrays(x1, x2, '<=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater(x1, x2): + """ + Return (x1 > x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, less + """ + return compare_chararrays(x1, x2, '>', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less(x1, x2): + """ + Return (x1 < x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, greater + """ + return compare_chararrays(x1, x2, '<', True) + + +def _unary_op_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_op_dispatcher) +def str_len(a): + """ + Return len(a) element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of integers + + See Also + -------- + builtins.len + """ + # Note: __len__, etc. currently return ints, which are not C-integers. + # Generally intp would be expected for lengths, although int is sufficient + # due to the dtype itemsize limitation. + return _vec_string(a, int_, '__len__') + + +@array_function_dispatch(_binary_op_dispatcher) +def add(x1, x2): + """ + Return element-wise string concatenation for two arrays of str or unicode. + + Arrays `x1` and `x2` must have the same shape. + + Parameters + ---------- + x1 : array_like of str or unicode + Input array. + x2 : array_like of str or unicode + Input array. + + Returns + ------- + add : ndarray + Output array of `string_` or `unicode_`, depending on input types + of the same shape as `x1` and `x2`. + + """ + arr1 = numpy.asarray(x1) + arr2 = numpy.asarray(x2) + out_size = _get_num_chars(arr1) + _get_num_chars(arr2) + dtype = _use_unicode(arr1, arr2) + return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,)) + + +def _multiply_dispatcher(a, i): + return (a,) + + +@array_function_dispatch(_multiply_dispatcher) +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in `i` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like of str or unicode + + i : array_like of ints + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + """ + a_arr = numpy.asarray(a) + i_arr = numpy.asarray(i) + if not issubclass(i_arr.dtype.type, integer): + raise ValueError("Can only multiply by integers") + out_size = _get_num_chars(a_arr) * max(int(i_arr.max()), 0) + return _vec_string( + a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) + + +def _mod_dispatcher(a, values): + return (a, values) + + +@array_function_dispatch(_mod_dispatcher) +def mod(a, values): + """ + Return (a % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of str + or unicode. + + Parameters + ---------- + a : array_like of str or unicode + + values : array_like of values + These values will be element-wise interpolated into the string. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + See Also + -------- + str.__mod__ + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, '__mod__', (values,))) + + +@array_function_dispatch(_unary_op_dispatcher) +def capitalize(a): + """ + Return a copy of `a` with only the first character of each element + capitalized. + + Calls `str.capitalize` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + Input array of strings to capitalize. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input + types + + See Also + -------- + str.capitalize + + Examples + -------- + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], + dtype='|S4') + >>> np.char.capitalize(c) + array(['A1b2', '1b2a', 'B2a1', '2a1b'], + dtype='|S4') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'capitalize') + + +def _center_dispatcher(a, width, fillchar=None): + return (a,) + + +@array_function_dispatch(_center_dispatcher) +def center(a, width, fillchar=' '): + """ + Return a copy of `a` with its elements centered in a string of + length `width`. + + Calls `str.center` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The padding character to use (default is space). + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input + types + + See Also + -------- + str.center + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = int(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.string_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar)) + + +def _count_dispatcher(a, sub, start=None, end=None): + return (a,) + + +@array_function_dispatch(_count_dispatcher) +def count(a, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + Calls `str.count` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + The substring to search for. + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as slice + notation to specify the range in which to count. + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + str.count + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.count(c, 'A') + array([3, 1, 1]) + >>> np.char.count(c, 'aA') + array([3, 1, 0]) + >>> np.char.count(c, 'A', start=1, end=4) + array([2, 1, 1]) + >>> np.char.count(c, 'A', start=1, end=3) + array([1, 0, 0]) + + """ + return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end)) + + +def _code_dispatcher(a, encoding=None, errors=None): + return (a,) + + +@array_function_dispatch(_code_dispatcher) +def decode(a, encoding=None, errors=None): + """ + Calls `str.decode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the + :mod:`codecs` module. + + Parameters + ---------- + a : array_like of str or unicode + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See Also + -------- + str.decode + + Notes + ----- + The type of the result will depend on the encoding specified. + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.encode(c, encoding='cp037') + array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@', + '\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], + dtype='|S7') + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'decode', _clean_args(encoding, errors))) + + +@array_function_dispatch(_code_dispatcher) +def encode(a, encoding=None, errors=None): + """ + Calls `str.encode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the codecs + module. + + Parameters + ---------- + a : array_like of str or unicode + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See Also + -------- + str.encode + + Notes + ----- + The type of the result will depend on the encoding specified. + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'encode', _clean_args(encoding, errors))) + + +def _endswith_dispatcher(a, suffix, start=None, end=None): + return (a,) + + +@array_function_dispatch(_endswith_dispatcher) +def endswith(a, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `a` ends with `suffix`, otherwise `False`. + + Calls `str.endswith` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + suffix : str + + start, end : int, optional + With optional `start`, test beginning at that position. With + optional `end`, stop comparing at that position. + + Returns + ------- + out : ndarray + Outputs an array of bools. + + See Also + -------- + str.endswith + + Examples + -------- + >>> s = np.array(['foo', 'bar']) + >>> s[0] = 'foo' + >>> s[1] = 'bar' + >>> s + array(['foo', 'bar'], dtype='>> np.char.endswith(s, 'ar') + array([False, True]) + >>> np.char.endswith(s, 'a', start=1, end=2) + array([False, True]) + + """ + return _vec_string( + a, bool_, 'endswith', [suffix, start] + _clean_args(end)) + + +def _expandtabs_dispatcher(a, tabsize=None): + return (a,) + + +@array_function_dispatch(_expandtabs_dispatcher) +def expandtabs(a, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + Calls `str.expandtabs` element-wise. + + Return a copy of each string element where all tab characters are + replaced by one or more spaces, depending on the current column + and the given `tabsize`. The column number is reset to zero after + each newline occurring in the string. This doesn't understand other + non-printing characters or escape sequences. + + Parameters + ---------- + a : array_like of str or unicode + Input array + tabsize : int, optional + Replace tabs with `tabsize` number of spaces. If not given defaults + to 8 spaces. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.expandtabs + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'expandtabs', (tabsize,))) + + +@array_function_dispatch(_count_dispatcher) +def find(a, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + Calls `str.find` element-wise. + + For each element, return the lowest index in the string where + substring `sub` is found, such that `sub` is contained in the + range [`start`, `end`]. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as in + slice notation. + + Returns + ------- + out : ndarray or int + Output array of ints. Returns -1 if `sub` is not found. + + See Also + -------- + str.find + + """ + return _vec_string( + a, int_, 'find', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_count_dispatcher) +def index(a, sub, start=0, end=None): + """ + Like `find`, but raises `ValueError` when the substring is not found. + + Calls `str.index` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sub : str or unicode + + start, end : int, optional + + Returns + ------- + out : ndarray + Output array of ints. Returns -1 if `sub` is not found. + + See Also + -------- + find, str.find + + """ + return _vec_string( + a, int_, 'index', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_unary_op_dispatcher) +def isalnum(a): + """ + Returns true for each element if all characters in the string are + alphanumeric and there is at least one character, false otherwise. + + Calls `str.isalnum` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.isalnum + """ + return _vec_string(a, bool_, 'isalnum') + + +@array_function_dispatch(_unary_op_dispatcher) +def isalpha(a): + """ + Returns true for each element if all characters in the string are + alphabetic and there is at least one character, false otherwise. + + Calls `str.isalpha` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.isalpha + """ + return _vec_string(a, bool_, 'isalpha') + + +@array_function_dispatch(_unary_op_dispatcher) +def isdigit(a): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + Calls `str.isdigit` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.isdigit + """ + return _vec_string(a, bool_, 'isdigit') + + +@array_function_dispatch(_unary_op_dispatcher) +def islower(a): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + Calls `str.islower` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.islower + """ + return _vec_string(a, bool_, 'islower') + + +@array_function_dispatch(_unary_op_dispatcher) +def isspace(a): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + Calls `str.isspace` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.isspace + """ + return _vec_string(a, bool_, 'isspace') + + +@array_function_dispatch(_unary_op_dispatcher) +def istitle(a): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + Call `str.istitle` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.istitle + """ + return _vec_string(a, bool_, 'istitle') + + +@array_function_dispatch(_unary_op_dispatcher) +def isupper(a): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + Call `str.isupper` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.isupper + """ + return _vec_string(a, bool_, 'isupper') + + +def _join_dispatcher(sep, seq): + return (sep, seq) + + +@array_function_dispatch(_join_dispatcher) +def join(sep, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + Calls `str.join` element-wise. + + Parameters + ---------- + sep : array_like of str or unicode + seq : array_like of str or unicode + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + See Also + -------- + str.join + """ + return _to_string_or_unicode_array( + _vec_string(sep, object_, 'join', (seq,))) + + + +def _just_dispatcher(a, width, fillchar=None): + return (a,) + + +@array_function_dispatch(_just_dispatcher) +def ljust(a, width, fillchar=' '): + """ + Return an array with the elements of `a` left-justified in a + string of length `width`. + + Calls `str.ljust` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The character to use for padding + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.ljust + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = int(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.string_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar)) + + +@array_function_dispatch(_unary_op_dispatcher) +def lower(a): + """ + Return an array with the elements converted to lowercase. + + Call `str.lower` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See Also + -------- + str.lower + + Examples + -------- + >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c + array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.char.lower(c) + array(['a1b c', '1bca', 'bca1'], dtype='>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.lstrip(c, 'a') + array(['AaAaA', ' aA ', 'bBABba'], dtype='>> np.char.lstrip(c, 'A') # leaves c unchanged + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() + ... # XXX: is this a regression? This used to return True + ... # np.char.lstrip(c,'') does not modify c at all. + False + >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() + True + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,)) + + +def _partition_dispatcher(a, sep): + return (a,) + + +@array_function_dispatch(_partition_dispatcher) +def partition(a, sep): + """ + Partition each element in `a` around `sep`. + + Calls `str.partition` element-wise. + + For each element in `a`, split the element as the first + occurrence of `sep`, and return 3 strings containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, return 3 strings + containing the string itself, followed by two empty strings. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array + sep : {str, unicode} + Separator to split each string element in `a`. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type. + The output array will have an extra dimension with 3 + elements per input element. + + See Also + -------- + str.partition + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'partition', (sep,))) + + +def _replace_dispatcher(a, old, new, count=None): + return (a,) + + +@array_function_dispatch(_replace_dispatcher) +def replace(a, old, new, count=None): + """ + For each element in `a`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + Calls `str.replace` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + old, new : str or unicode + + count : int, optional + If the optional argument `count` is given, only the first + `count` occurrences are replaced. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.replace + + """ + return _to_string_or_unicode_array( + _vec_string( + a, object_, 'replace', [old, new] + _clean_args(count))) + + +@array_function_dispatch(_count_dispatcher) +def rfind(a, sub, start=0, end=None): + """ + For each element in `a`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + Calls `str.rfind` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + sub : str or unicode + + start, end : int, optional + Optional arguments `start` and `end` are interpreted as in + slice notation. + + Returns + ------- + out : ndarray + Output array of ints. Return -1 on failure. + + See Also + -------- + str.rfind + + """ + return _vec_string( + a, int_, 'rfind', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_count_dispatcher) +def rindex(a, sub, start=0, end=None): + """ + Like `rfind`, but raises `ValueError` when the substring `sub` is + not found. + + Calls `str.rindex` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + sub : str or unicode + + start, end : int, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + rfind, str.rindex + + """ + return _vec_string( + a, int_, 'rindex', [sub, start] + _clean_args(end)) + + +@array_function_dispatch(_just_dispatcher) +def rjust(a, width, fillchar=' '): + """ + Return an array with the elements of `a` right-justified in a + string of length `width`. + + Calls `str.rjust` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + width : int + The length of the resulting strings + fillchar : str or unicode, optional + The character to use for padding + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.rjust + + """ + a_arr = numpy.asarray(a) + width_arr = numpy.asarray(width) + size = int(numpy.max(width_arr.flat)) + if numpy.issubdtype(a_arr.dtype, numpy.string_): + fillchar = asbytes(fillchar) + return _vec_string( + a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) + + +@array_function_dispatch(_partition_dispatcher) +def rpartition(a, sep): + """ + Partition (split) each element around the right-most separator. + + Calls `str.rpartition` element-wise. + + For each element in `a`, split the element as the last + occurrence of `sep`, and return 3 strings containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, return 3 strings + containing the string itself, followed by two empty strings. + + Parameters + ---------- + a : array_like of str or unicode + Input array + sep : str or unicode + Right-most separator to split each element in array. + + Returns + ------- + out : ndarray + Output array of string or unicode, depending on input + type. The output array will have an extra dimension with + 3 elements per input element. + + See Also + -------- + str.rpartition + + """ + return _to_string_or_unicode_array( + _vec_string(a, object_, 'rpartition', (sep,))) + + +def _split_dispatcher(a, sep=None, maxsplit=None): + return (a,) + + +@array_function_dispatch(_split_dispatcher) +def rsplit(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls `str.rsplit` element-wise. + + Except for splitting from the right, `rsplit` + behaves like `split`. + + Parameters + ---------- + a : array_like of str or unicode + + sep : str or unicode, optional + If `sep` is not specified or None, any whitespace string + is a separator. + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done, + the rightmost ones. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.rsplit, split + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, object_, 'rsplit', [sep] + _clean_args(maxsplit)) + + +def _strip_dispatcher(a, chars=None): + return (a,) + + +@array_function_dispatch(_strip_dispatcher) +def rstrip(a, chars=None): + """ + For each element in `a`, return a copy with the trailing + characters removed. + + Calls `str.rstrip` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + chars : str or unicode, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a suffix; rather, all combinations of its values are + stripped. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.rstrip + + Examples + -------- + >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c + array(['aAaAaA', 'abBABba'], + dtype='|S7') + >>> np.char.rstrip(c, b'a') + array(['aAaAaA', 'abBABb'], + dtype='|S7') + >>> np.char.rstrip(c, b'A') + array(['aAaAa', 'abBABba'], + dtype='|S7') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) + + +@array_function_dispatch(_split_dispatcher) +def split(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls `str.split` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + sep : str or unicode, optional + If `sep` is not specified or None, any whitespace string is a + separator. + + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.split, rsplit + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, object_, 'split', [sep] + _clean_args(maxsplit)) + + +def _splitlines_dispatcher(a, keepends=None): + return (a,) + + +@array_function_dispatch(_splitlines_dispatcher) +def splitlines(a, keepends=None): + """ + For each element in `a`, return a list of the lines in the + element, breaking at line boundaries. + + Calls `str.splitlines` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + keepends : bool, optional + Line breaks are not included in the resulting list unless + keepends is given and true. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.splitlines + + """ + return _vec_string( + a, object_, 'splitlines', _clean_args(keepends)) + + +def _startswith_dispatcher(a, prefix, start=None, end=None): + return (a,) + + +@array_function_dispatch(_startswith_dispatcher) +def startswith(a, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `a` starts with `prefix`, otherwise `False`. + + Calls `str.startswith` element-wise. + + Parameters + ---------- + a : array_like of str or unicode + + prefix : str + + start, end : int, optional + With optional `start`, test beginning at that position. With + optional `end`, stop comparing at that position. + + Returns + ------- + out : ndarray + Array of booleans + + See Also + -------- + str.startswith + + """ + return _vec_string( + a, bool_, 'startswith', [prefix, start] + _clean_args(end)) + + +@array_function_dispatch(_strip_dispatcher) +def strip(a, chars=None): + """ + For each element in `a`, return a copy with the leading and + trailing characters removed. + + Calls `str.strip` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + chars : str or unicode, optional + The `chars` argument is a string specifying the set of + characters to be removed. If omitted or None, the `chars` + argument defaults to removing whitespace. The `chars` argument + is not a prefix or suffix; rather, all combinations of its + values are stripped. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.strip + + Examples + -------- + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.char.strip(c) + array(['aAaAaA', 'aA', 'abBABba'], dtype='>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads + array(['AaAaA', ' aA ', 'bBABb'], dtype='>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails + array(['aAaAa', ' aA ', 'abBABba'], dtype='>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c + array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], + dtype='|S5') + >>> np.char.swapcase(c) + array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'swapcase') + + +@array_function_dispatch(_unary_op_dispatcher) +def title(a): + """ + Return element-wise title cased version of string or unicode. + + Title case words start with uppercase characters, all remaining cased + characters are lowercase. + + Calls `str.title` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.title + + Examples + -------- + >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c + array(['a1b c', '1b ca', 'b ca1', 'ca1b'], + dtype='|S5') + >>> np.char.title(c) + array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = numpy.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'title') + + +def _translate_dispatcher(a, table, deletechars=None): + return (a,) + + +@array_function_dispatch(_translate_dispatcher) +def translate(a, table, deletechars=None): + """ + For each element in `a`, return a copy of the string where all + characters occurring in the optional argument `deletechars` are + removed, and the remaining characters have been mapped through the + given translation table. + + Calls `str.translate` element-wise. + + Parameters + ---------- + a : array-like of str or unicode + + table : str of length 256 + + deletechars : str + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input type + + See Also + -------- + str.translate + + """ + a_arr = numpy.asarray(a) + if issubclass(a_arr.dtype.type, unicode_): + return _vec_string( + a_arr, a_arr.dtype, 'translate', (table,)) + else: + return _vec_string( + a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars)) + + +@array_function_dispatch(_unary_op_dispatcher) +def upper(a): + """ + Return an array with the elements converted to uppercase. + + Calls `str.upper` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array_like, {str, unicode} + Input array. + + Returns + ------- + out : ndarray, {str, unicode} + Output array of str or unicode, depending on input type + + See Also + -------- + str.upper + + Examples + -------- + >>> c = np.array(['a1b c', '1bca', 'bca1']); c + array(['a1b c', '1bca', 'bca1'], dtype='>> np.char.upper(c) + array(['A1B C', '1BCA', 'BCA1'], dtype='= 2`` and ``order='F'``, in which case `strides` + is in "Fortran order". + + Methods + ------- + astype + argsort + copy + count + decode + dump + dumps + encode + endswith + expandtabs + fill + find + flatten + getfield + index + isalnum + isalpha + isdecimal + isdigit + islower + isnumeric + isspace + istitle + isupper + item + join + ljust + lower + lstrip + nonzero + put + ravel + repeat + replace + reshape + resize + rfind + rindex + rjust + rsplit + rstrip + searchsorted + setfield + setflags + sort + split + splitlines + squeeze + startswith + strip + swapaxes + swapcase + take + title + tofile + tolist + tostring + translate + transpose + upper + view + zfill + + Parameters + ---------- + shape : tuple + Shape of the array. + itemsize : int, optional + Length of each array element, in number of characters. Default is 1. + unicode : bool, optional + Are the array elements of type unicode (True) or string (False). + Default is False. + buffer : object exposing the buffer interface or str, optional + Memory address of the start of the array data. Default is None, + in which case a new array is created. + offset : int, optional + Fixed stride displacement from the beginning of an axis? + Default is 0. Needs to be >=0. + strides : array_like of ints, optional + Strides for the array (see `ndarray.strides` for full description). + Default is None. + order : {'C', 'F'}, optional + The order in which the array data is stored in memory: 'C' -> + "row major" order (the default), 'F' -> "column major" + (Fortran) order. + + Examples + -------- + >>> charar = np.chararray((3, 3)) + >>> charar[:] = 'a' + >>> charar + chararray([[b'a', b'a', b'a'], + [b'a', b'a', b'a'], + [b'a', b'a', b'a']], dtype='|S1') + + >>> charar = np.chararray(charar.shape, itemsize=5) + >>> charar[:] = 'abc' + >>> charar + chararray([[b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc']], dtype='|S5') + + """ + def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + offset=0, strides=None, order='C'): + global _globalvar + + if unicode: + dtype = unicode_ + else: + dtype = string_ + + # force itemsize to be a Python int, since using NumPy integer + # types results in itemsize.itemsize being used as the size of + # strings in the new array. + itemsize = int(itemsize) + + if isinstance(buffer, str): + # unicode objects do not have the buffer interface + filler = buffer + buffer = None + else: + filler = None + + _globalvar = 1 + if buffer is None: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + order=order) + else: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + buffer=buffer, + offset=offset, strides=strides, + order=order) + if filler is not None: + self[...] = filler + _globalvar = 0 + return self + + def __array_finalize__(self, obj): + # The b is a special case because it is used for reconstructing. + if not _globalvar and self.dtype.char not in 'SUbc': + raise ValueError("Can only create a chararray from string data.") + + def __getitem__(self, obj): + val = ndarray.__getitem__(self, obj) + + if isinstance(val, character): + temp = val.rstrip() + if len(temp) == 0: + val = '' + else: + val = temp + + return val + + # IMPLEMENTATION NOTE: Most of the methods of this class are + # direct delegations to the free functions in this module. + # However, those that return an array of strings should instead + # return a chararray, so some extra wrapping is required. + + def __eq__(self, other): + """ + Return (self == other) element-wise. + + See Also + -------- + equal + """ + return equal(self, other) + + def __ne__(self, other): + """ + Return (self != other) element-wise. + + See Also + -------- + not_equal + """ + return not_equal(self, other) + + def __ge__(self, other): + """ + Return (self >= other) element-wise. + + See Also + -------- + greater_equal + """ + return greater_equal(self, other) + + def __le__(self, other): + """ + Return (self <= other) element-wise. + + See Also + -------- + less_equal + """ + return less_equal(self, other) + + def __gt__(self, other): + """ + Return (self > other) element-wise. + + See Also + -------- + greater + """ + return greater(self, other) + + def __lt__(self, other): + """ + Return (self < other) element-wise. + + See Also + -------- + less + """ + return less(self, other) + + def __add__(self, other): + """ + Return (self + other), that is string concatenation, + element-wise for a pair of array_likes of str or unicode. + + See Also + -------- + add + """ + return asarray(add(self, other)) + + def __radd__(self, other): + """ + Return (other + self), that is string concatenation, + element-wise for a pair of array_likes of `string_` or `unicode_`. + + See Also + -------- + add + """ + return asarray(add(numpy.asarray(other), self)) + + def __mul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __rmul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __mod__(self, i): + """ + Return (self % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of `string_` + or `unicode_`. + + See Also + -------- + mod + """ + return asarray(mod(self, i)) + + def __rmod__(self, other): + return NotImplemented + + def argsort(self, axis=-1, kind=None, order=None): + """ + Return the indices that sort the array lexicographically. + + For full documentation see `numpy.argsort`, for which this method is + in fact merely a "thin wrapper." + + Examples + -------- + >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') + >>> c = c.view(np.chararray); c + chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], + dtype='|S5') + >>> c[c.argsort()] + chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], + dtype='|S5') + + """ + return self.__array__().argsort(axis, kind, order) + argsort.__doc__ = ndarray.argsort.__doc__ + + def capitalize(self): + """ + Return a copy of `self` with only the first character of each element + capitalized. + + See Also + -------- + char.capitalize + + """ + return asarray(capitalize(self)) + + def center(self, width, fillchar=' '): + """ + Return a copy of `self` with its elements centered in a + string of length `width`. + + See Also + -------- + center + """ + return asarray(center(self, width, fillchar)) + + def count(self, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + See Also + -------- + char.count + + """ + return count(self, sub, start, end) + + def decode(self, encoding=None, errors=None): + """ + Calls `str.decode` element-wise. + + See Also + -------- + char.decode + + """ + return decode(self, encoding, errors) + + def encode(self, encoding=None, errors=None): + """ + Calls `str.encode` element-wise. + + See Also + -------- + char.encode + + """ + return encode(self, encoding, errors) + + def endswith(self, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` ends with `suffix`, otherwise `False`. + + See Also + -------- + char.endswith + + """ + return endswith(self, suffix, start, end) + + def expandtabs(self, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + See Also + -------- + char.expandtabs + + """ + return asarray(expandtabs(self, tabsize)) + + def find(self, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + See Also + -------- + char.find + + """ + return find(self, sub, start, end) + + def index(self, sub, start=0, end=None): + """ + Like `find`, but raises `ValueError` when the substring is not found. + + See Also + -------- + char.index + + """ + return index(self, sub, start, end) + + def isalnum(self): + """ + Returns true for each element if all characters in the string + are alphanumeric and there is at least one character, false + otherwise. + + See Also + -------- + char.isalnum + + """ + return isalnum(self) + + def isalpha(self): + """ + Returns true for each element if all characters in the string + are alphabetic and there is at least one character, false + otherwise. + + See Also + -------- + char.isalpha + + """ + return isalpha(self) + + def isdigit(self): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + See Also + -------- + char.isdigit + + """ + return isdigit(self) + + def islower(self): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + See Also + -------- + char.islower + + """ + return islower(self) + + def isspace(self): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + See Also + -------- + char.isspace + + """ + return isspace(self) + + def istitle(self): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + See Also + -------- + char.istitle + + """ + return istitle(self) + + def isupper(self): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + See Also + -------- + char.isupper + + """ + return isupper(self) + + def join(self, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + See Also + -------- + char.join + + """ + return join(self, seq) + + def ljust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` left-justified in a + string of length `width`. + + See Also + -------- + char.ljust + + """ + return asarray(ljust(self, width, fillchar)) + + def lower(self): + """ + Return an array with the elements of `self` converted to + lowercase. + + See Also + -------- + char.lower + + """ + return asarray(lower(self)) + + def lstrip(self, chars=None): + """ + For each element in `self`, return a copy with the leading characters + removed. + + See Also + -------- + char.lstrip + + """ + return asarray(lstrip(self, chars)) + + def partition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + partition + """ + return asarray(partition(self, sep)) + + def replace(self, old, new, count=None): + """ + For each element in `self`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + See Also + -------- + char.replace + + """ + return asarray(replace(self, old, new, count)) + + def rfind(self, sub, start=0, end=None): + """ + For each element in `self`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + See Also + -------- + char.rfind + + """ + return rfind(self, sub, start, end) + + def rindex(self, sub, start=0, end=None): + """ + Like `rfind`, but raises `ValueError` when the substring `sub` is + not found. + + See Also + -------- + char.rindex + + """ + return rindex(self, sub, start, end) + + def rjust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` + right-justified in a string of length `width`. + + See Also + -------- + char.rjust + + """ + return asarray(rjust(self, width, fillchar)) + + def rpartition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + rpartition + """ + return asarray(rpartition(self, sep)) + + def rsplit(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in + the string, using `sep` as the delimiter string. + + See Also + -------- + char.rsplit + + """ + return rsplit(self, sep, maxsplit) + + def rstrip(self, chars=None): + """ + For each element in `self`, return a copy with the trailing + characters removed. + + See Also + -------- + char.rstrip + + """ + return asarray(rstrip(self, chars)) + + def split(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in the + string, using `sep` as the delimiter string. + + See Also + -------- + char.split + + """ + return split(self, sep, maxsplit) + + def splitlines(self, keepends=None): + """ + For each element in `self`, return a list of the lines in the + element, breaking at line boundaries. + + See Also + -------- + char.splitlines + + """ + return splitlines(self, keepends) + + def startswith(self, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` starts with `prefix`, otherwise `False`. + + See Also + -------- + char.startswith + + """ + return startswith(self, prefix, start, end) + + def strip(self, chars=None): + """ + For each element in `self`, return a copy with the leading and + trailing characters removed. + + See Also + -------- + char.strip + + """ + return asarray(strip(self, chars)) + + def swapcase(self): + """ + For each element in `self`, return a copy of the string with + uppercase characters converted to lowercase and vice versa. + + See Also + -------- + char.swapcase + + """ + return asarray(swapcase(self)) + + def title(self): + """ + For each element in `self`, return a titlecased version of the + string: words start with uppercase characters, all remaining cased + characters are lowercase. + + See Also + -------- + char.title + + """ + return asarray(title(self)) + + def translate(self, table, deletechars=None): + """ + For each element in `self`, return a copy of the string where + all characters occurring in the optional argument + `deletechars` are removed, and the remaining characters have + been mapped through the given translation table. + + See Also + -------- + char.translate + + """ + return asarray(translate(self, table, deletechars)) + + def upper(self): + """ + Return an array with the elements of `self` converted to + uppercase. + + See Also + -------- + char.upper + + """ + return asarray(upper(self)) + + def zfill(self, width): + """ + Return the numeric string left-filled with zeros in a string of + length `width`. + + See Also + -------- + char.zfill + + """ + return asarray(zfill(self, width)) + + def isnumeric(self): + """ + For each element in `self`, return True if there are only + numeric characters in the element. + + See Also + -------- + char.isnumeric + + """ + return isnumeric(self) + + def isdecimal(self): + """ + For each element in `self`, return True if there are only + decimal characters in the element. + + See Also + -------- + char.isdecimal + + """ + return isdecimal(self) + + +def array(obj, itemsize=None, copy=True, unicode=None, order=None): + """ + Create a `chararray`. + + .. note:: + This class is provided for numarray backward-compatibility. + New code (not concerned with numarray compatibility) should use + arrays of type `string_` or `unicode_` and use the free functions + in :mod:`numpy.char ` for fast + vectorized string operations instead. + + Versus a regular NumPy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy + will only be made if __array__ returns a copy, if obj is a + nested sequence, or if a copy is needed to satisfy any of the other + requirements (`itemsize`, unicode, `order`, etc.). + + unicode : bool, optional + When true, the resulting `chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `chararray`, + - an ndarray of type `str` or `unicode` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). If order is 'A', then the returned array may + be in any order (either C-, Fortran-contiguous, or even + discontiguous). + """ + if isinstance(obj, (bytes, str)): + if unicode is None: + if isinstance(obj, str): + unicode = True + else: + unicode = False + + if itemsize is None: + itemsize = len(obj) + shape = len(obj) // itemsize + + return chararray(shape, itemsize=itemsize, unicode=unicode, + buffer=obj, order=order) + + if isinstance(obj, (list, tuple)): + obj = numpy.asarray(obj) + + if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character): + # If we just have a vanilla chararray, create a chararray + # view around it. + if not isinstance(obj, chararray): + obj = obj.view(chararray) + + if itemsize is None: + itemsize = obj.itemsize + # itemsize is in 8-bit chars, so for Unicode, we need + # to divide by the size of a single Unicode character, + # which for NumPy is always 4 + if issubclass(obj.dtype.type, unicode_): + itemsize //= 4 + + if unicode is None: + if issubclass(obj.dtype.type, unicode_): + unicode = True + else: + unicode = False + + if unicode: + dtype = unicode_ + else: + dtype = string_ + + if order is not None: + obj = numpy.asarray(obj, order=order) + if (copy or + (itemsize != obj.itemsize) or + (not unicode and isinstance(obj, unicode_)) or + (unicode and isinstance(obj, string_))): + obj = obj.astype((dtype, int(itemsize))) + return obj + + if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object): + if itemsize is None: + # Since no itemsize was specified, convert the input array to + # a list so the ndarray constructor will automatically + # determine the itemsize for us. + obj = obj.tolist() + # Fall through to the default case + + if unicode: + dtype = unicode_ + else: + dtype = string_ + + if itemsize is None: + val = narray(obj, dtype=dtype, order=order, subok=True) + else: + val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True) + return val.view(chararray) + + +def asarray(obj, itemsize=None, unicode=None, order=None): + """ + Convert the input to a `chararray`, copying the data only if + necessary. + + Versus a regular NumPy array of type `str` or `unicode`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + unicode : bool, optional + When true, the resulting `chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `chararray`, + - an ndarray of type `str` or 'unicode` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). + """ + return array(obj, itemsize, copy=False, + unicode=unicode, order=order) diff --git a/MLPY/Lib/site-packages/numpy/core/einsumfunc.py b/MLPY/Lib/site-packages/numpy/core/einsumfunc.py new file mode 100644 index 0000000000000000000000000000000000000000..041f4dd673d79ebaeba3fed8c2729f87d6657d9a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/einsumfunc.py @@ -0,0 +1,1431 @@ +""" +Implementation of optimized einsum. + +""" +import itertools +import operator + +from numpy.core.multiarray import c_einsum +from numpy.core.numeric import asanyarray, tensordot +from numpy.core.overrides import array_function_dispatch + +__all__ = ['einsum', 'einsum_path'] + +einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' +einsum_symbols_set = set(einsum_symbols) + + +def _flop_count(idx_contraction, inner, num_terms, size_dictionary): + """ + Computes the number of FLOPS in the contraction. + + Parameters + ---------- + idx_contraction : iterable + The indices involved in the contraction + inner : bool + Does this contraction require an inner product? + num_terms : int + The number of terms in a contraction + size_dictionary : dict + The size of each of the indices in idx_contraction + + Returns + ------- + flop_count : int + The total number of FLOPS required for the contraction. + + Examples + -------- + + >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) + 30 + + >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) + 60 + + """ + + overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) + op_factor = max(1, num_terms - 1) + if inner: + op_factor += 1 + + return overall_size * op_factor + +def _compute_size_by_dict(indices, idx_dict): + """ + Computes the product of the elements in indices based on the dictionary + idx_dict. + + Parameters + ---------- + indices : iterable + Indices to base the product on. + idx_dict : dictionary + Dictionary of index sizes + + Returns + ------- + ret : int + The resulting product. + + Examples + -------- + >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) + 90 + + """ + ret = 1 + for i in indices: + ret *= idx_dict[i] + return ret + + +def _find_contraction(positions, input_sets, output_set): + """ + Finds the contraction for a given set of input and output sets. + + Parameters + ---------- + positions : iterable + Integer positions of terms used in the contraction. + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + + Returns + ------- + new_result : set + The indices of the resulting contraction + remaining : list + List of sets that have not been contracted, the new set is appended to + the end of this list + idx_removed : set + Indices removed from the entire contraction + idx_contraction : set + The indices used in the current contraction + + Examples + -------- + + # A simple dot product test case + >>> pos = (0, 1) + >>> isets = [set('ab'), set('bc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) + + # A more complex case with additional terms in the contraction + >>> pos = (0, 2) + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) + """ + + idx_contract = set() + idx_remain = output_set.copy() + remaining = [] + for ind, value in enumerate(input_sets): + if ind in positions: + idx_contract |= value + else: + remaining.append(value) + idx_remain |= value + + new_result = idx_remain & idx_contract + idx_removed = (idx_contract - new_result) + remaining.append(new_result) + + return (new_result, remaining, idx_removed, idx_contract) + + +def _optimal_path(input_sets, output_set, idx_dict, memory_limit): + """ + Computes all possible pair contractions, sieves the results based + on ``memory_limit`` and returns the lowest cost path. This algorithm + scales factorial with respect to the elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The optimal contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _optimal_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + full_results = [(0, [], input_sets)] + for iteration in range(len(input_sets) - 1): + iter_results = [] + + # Compute all unique pairs + for curr in full_results: + cost, positions, remaining = curr + for con in itertools.combinations(range(len(input_sets) - iteration), 2): + + # Find the contraction + cont = _find_contraction(con, remaining, output_set) + new_result, new_input_sets, idx_removed, idx_contract = cont + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(new_result, idx_dict) + if new_size > memory_limit: + continue + + # Build (total_cost, positions, indices_remaining) + total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict) + new_pos = positions + [con] + iter_results.append((total_cost, new_pos, new_input_sets)) + + # Update combinatorial list, if we did not find anything return best + # path + remaining contractions + if iter_results: + full_results = iter_results + else: + path = min(full_results, key=lambda x: x[0])[1] + path += [tuple(range(len(input_sets) - iteration))] + return path + + # If we have not found anything return single einsum contraction + if len(full_results) == 0: + return [tuple(range(len(input_sets)))] + + path = min(full_results, key=lambda x: x[0])[1] + return path + +def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost): + """Compute the cost (removed size + flops) and resultant indices for + performing the contraction specified by ``positions``. + + Parameters + ---------- + positions : tuple of int + The locations of the proposed tensors to contract. + input_sets : list of sets + The indices found on each tensors. + output_set : set + The output indices of the expression. + idx_dict : dict + Mapping of each index to its size. + memory_limit : int + The total allowed size for an intermediary tensor. + path_cost : int + The contraction cost so far. + naive_cost : int + The cost of the unoptimized expression. + + Returns + ------- + cost : (int, int) + A tuple containing the size of any indices removed, and the flop cost. + positions : tuple of int + The locations of the proposed tensors to contract. + new_input_sets : list of sets + The resulting new list of indices if this proposed contraction is performed. + + """ + + # Find the contraction + contract = _find_contraction(positions, input_sets, output_set) + idx_result, new_input_sets, idx_removed, idx_contract = contract + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(idx_result, idx_dict) + if new_size > memory_limit: + return None + + # Build sort tuple + old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions) + removed_size = sum(old_sizes) - new_size + + # NB: removed_size used to be just the size of any removed indices i.e.: + # helpers.compute_size_by_dict(idx_removed, idx_dict) + cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) + sort = (-removed_size, cost) + + # Sieve based on total cost as well + if (path_cost + cost) > naive_cost: + return None + + # Add contraction to possible choices + return [sort, positions, new_input_sets] + + +def _update_other_results(results, best): + """Update the positions and provisional input_sets of ``results`` based on + performing the contraction result ``best``. Remove any involving the tensors + contracted. + + Parameters + ---------- + results : list + List of contraction results produced by ``_parse_possible_contraction``. + best : list + The best contraction of ``results`` i.e. the one that will be performed. + + Returns + ------- + mod_results : list + The list of modified results, updated with outcome of ``best`` contraction. + """ + + best_con = best[1] + bx, by = best_con + mod_results = [] + + for cost, (x, y), con_sets in results: + + # Ignore results involving tensors just contracted + if x in best_con or y in best_con: + continue + + # Update the input_sets + del con_sets[by - int(by > x) - int(by > y)] + del con_sets[bx - int(bx > x) - int(bx > y)] + con_sets.insert(-1, best[2][-1]) + + # Update the position indices + mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) + mod_results.append((cost, mod_con, con_sets)) + + return mod_results + +def _greedy_path(input_sets, output_set, idx_dict, memory_limit): + """ + Finds the path by contracting the best pair until the input list is + exhausted. The best pair is found by minimizing the tuple + ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing + matrix multiplication or inner product operations, then Hadamard like + operations, and finally outer operations. Outer products are limited by + ``memory_limit``. This algorithm scales cubically with respect to the + number of elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The greedy contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _greedy_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + # Handle trivial cases that leaked through + if len(input_sets) == 1: + return [(0,)] + elif len(input_sets) == 2: + return [(0, 1)] + + # Build up a naive cost + contract = _find_contraction(range(len(input_sets)), input_sets, output_set) + idx_result, new_input_sets, idx_removed, idx_contract = contract + naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict) + + # Initially iterate over all pairs + comb_iter = itertools.combinations(range(len(input_sets)), 2) + known_contractions = [] + + path_cost = 0 + path = [] + + for iteration in range(len(input_sets) - 1): + + # Iterate over all pairs on first step, only previously found pairs on subsequent steps + for positions in comb_iter: + + # Always initially ignore outer products + if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): + continue + + result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, + naive_cost) + if result is not None: + known_contractions.append(result) + + # If we do not have a inner contraction, rescan pairs including outer products + if len(known_contractions) == 0: + + # Then check the outer products + for positions in itertools.combinations(range(len(input_sets)), 2): + result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, + path_cost, naive_cost) + if result is not None: + known_contractions.append(result) + + # If we still did not find any remaining contractions, default back to einsum like behavior + if len(known_contractions) == 0: + path.append(tuple(range(len(input_sets)))) + break + + # Sort based on first index + best = min(known_contractions, key=lambda x: x[0]) + + # Now propagate as many unused contractions as possible to next iteration + known_contractions = _update_other_results(known_contractions, best) + + # Next iteration only compute contractions with the new tensor + # All other contractions have been accounted for + input_sets = best[2] + new_tensor_pos = len(input_sets) - 1 + comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) + + # Update path and total cost + path.append(best[1]) + path_cost += best[0][1] + + return path + + +def _can_dot(inputs, result, idx_removed): + """ + Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. + + Parameters + ---------- + inputs : list of str + Specifies the subscripts for summation. + result : str + Resulting summation. + idx_removed : set + Indices that are removed in the summation + + + Returns + ------- + type : bool + Returns true if BLAS should and can be used, else False + + Notes + ----- + If the operations is BLAS level 1 or 2 and is not already aligned + we default back to einsum as the memory movement to copy is more + costly than the operation itself. + + + Examples + -------- + + # Standard GEMM operation + >>> _can_dot(['ij', 'jk'], 'ik', set('j')) + True + + # Can use the standard BLAS, but requires odd data movement + >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) + False + + # DDOT where the memory is not aligned + >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) + False + + """ + + # All `dot` calls remove indices + if len(idx_removed) == 0: + return False + + # BLAS can only handle two operands + if len(inputs) != 2: + return False + + input_left, input_right = inputs + + for c in set(input_left + input_right): + # can't deal with repeated indices on same input or more than 2 total + nl, nr = input_left.count(c), input_right.count(c) + if (nl > 1) or (nr > 1) or (nl + nr > 2): + return False + + # can't do implicit summation or dimension collapse e.g. + # "ab,bc->c" (implicitly sum over 'a') + # "ab,ca->ca" (take diagonal of 'a') + if nl + nr - 1 == int(c in result): + return False + + # Build a few temporaries + set_left = set(input_left) + set_right = set(input_right) + keep_left = set_left - idx_removed + keep_right = set_right - idx_removed + rs = len(idx_removed) + + # At this point we are a DOT, GEMV, or GEMM operation + + # Handle inner products + + # DDOT with aligned data + if input_left == input_right: + return True + + # DDOT without aligned data (better to use einsum) + if set_left == set_right: + return False + + # Handle the 4 possible (aligned) GEMV or GEMM cases + + # GEMM or GEMV no transpose + if input_left[-rs:] == input_right[:rs]: + return True + + # GEMM or GEMV transpose both + if input_left[:rs] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose right + if input_left[-rs:] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose left + if input_left[:rs] == input_right[:rs]: + return True + + # Einsum is faster than GEMV if we have to copy data + if not keep_left or not keep_right: + return False + + # We are a matrix-matrix product, but we need to copy data + return True + + +def _parse_einsum_input(operands): + """ + A reproduction of einsum c side einsum parsing in python. + + Returns + ------- + input_strings : str + Parsed input strings + output_string : str + Parsed output string + operands : list of array_like + The operands to use in the numpy contraction + + Examples + -------- + The operand list is simplified to reduce printing: + + >>> np.random.seed(123) + >>> a = np.random.rand(4, 4) + >>> b = np.random.rand(4, 4, 4) + >>> _parse_einsum_input(('...a,...a->...', a, b)) + ('za,xza', 'xz', [a, b]) # may vary + + >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) + ('za,xza', 'xz', [a, b]) # may vary + """ + + if len(operands) == 0: + raise ValueError("No input operands") + + if isinstance(operands[0], str): + subscripts = operands[0].replace(" ", "") + operands = [asanyarray(v) for v in operands[1:]] + + # Ensure all characters are valid + for s in subscripts: + if s in '.,->': + continue + if s not in einsum_symbols: + raise ValueError("Character %s is not a valid symbol." % s) + + else: + tmp_operands = list(operands) + operand_list = [] + subscript_list = [] + for p in range(len(operands) // 2): + operand_list.append(tmp_operands.pop(0)) + subscript_list.append(tmp_operands.pop(0)) + + output_list = tmp_operands[-1] if len(tmp_operands) else None + operands = [asanyarray(v) for v in operand_list] + subscripts = "" + last = len(subscript_list) - 1 + for num, sub in enumerate(subscript_list): + for s in sub: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError("For this input type lists must contain " + "either int or Ellipsis") from e + subscripts += einsum_symbols[s] + if num != last: + subscripts += "," + + if output_list is not None: + subscripts += "->" + for s in output_list: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError("For this input type lists must contain " + "either int or Ellipsis") from e + subscripts += einsum_symbols[s] + # Check for proper "->" + if ("-" in subscripts) or (">" in subscripts): + invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) + if invalid or (subscripts.count("->") != 1): + raise ValueError("Subscripts can only contain one '->'.") + + # Parse ellipses + if "." in subscripts: + used = subscripts.replace(".", "").replace(",", "").replace("->", "") + unused = list(einsum_symbols_set - set(used)) + ellipse_inds = "".join(unused) + longest = 0 + + if "->" in subscripts: + input_tmp, output_sub = subscripts.split("->") + split_subscripts = input_tmp.split(",") + out_sub = True + else: + split_subscripts = subscripts.split(',') + out_sub = False + + for num, sub in enumerate(split_subscripts): + if "." in sub: + if (sub.count(".") != 3) or (sub.count("...") != 1): + raise ValueError("Invalid Ellipses.") + + # Take into account numerical values + if operands[num].shape == (): + ellipse_count = 0 + else: + ellipse_count = max(operands[num].ndim, 1) + ellipse_count -= (len(sub) - 3) + + if ellipse_count > longest: + longest = ellipse_count + + if ellipse_count < 0: + raise ValueError("Ellipses lengths do not match.") + elif ellipse_count == 0: + split_subscripts[num] = sub.replace('...', '') + else: + rep_inds = ellipse_inds[-ellipse_count:] + split_subscripts[num] = sub.replace('...', rep_inds) + + subscripts = ",".join(split_subscripts) + if longest == 0: + out_ellipse = "" + else: + out_ellipse = ellipse_inds[-longest:] + + if out_sub: + subscripts += "->" + output_sub.replace("...", out_ellipse) + else: + # Special care for outputless ellipses + output_subscript = "" + tmp_subscripts = subscripts.replace(",", "") + for s in sorted(set(tmp_subscripts)): + if s not in (einsum_symbols): + raise ValueError("Character %s is not a valid symbol." % s) + if tmp_subscripts.count(s) == 1: + output_subscript += s + normal_inds = ''.join(sorted(set(output_subscript) - + set(out_ellipse))) + + subscripts += "->" + out_ellipse + normal_inds + + # Build output string if does not exist + if "->" in subscripts: + input_subscripts, output_subscript = subscripts.split("->") + else: + input_subscripts = subscripts + # Build output subscripts + tmp_subscripts = subscripts.replace(",", "") + output_subscript = "" + for s in sorted(set(tmp_subscripts)): + if s not in einsum_symbols: + raise ValueError("Character %s is not a valid symbol." % s) + if tmp_subscripts.count(s) == 1: + output_subscript += s + + # Make sure output subscripts are in the input + for char in output_subscript: + if char not in input_subscripts: + raise ValueError("Output character %s did not appear in the input" + % char) + + # Make sure number operands is equivalent to the number of terms + if len(input_subscripts.split(',')) != len(operands): + raise ValueError("Number of einsum subscripts must be equal to the " + "number of operands.") + + return (input_subscripts, output_subscript, operands) + + +def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None): + # NOTE: technically, we should only dispatch on array-like arguments, not + # subscripts (given as strings). But separating operands into + # arrays/subscripts is a little tricky/slow (given einsum's two supported + # signatures), so as a practical shortcut we dispatch on everything. + # Strings will be ignored for dispatching since they don't define + # __array_function__. + return operands + + +@array_function_dispatch(_einsum_path_dispatcher, module='numpy') +def einsum_path(*operands, optimize='greedy', einsum_call=False): + """ + einsum_path(subscripts, *operands, optimize='greedy') + + Evaluates the lowest cost contraction order for an einsum expression by + considering the creation of intermediate arrays. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation. + *operands : list of array_like + These are the arrays for the operation. + optimize : {bool, list, tuple, 'greedy', 'optimal'} + Choose the type of path. If a tuple is provided, the second argument is + assumed to be the maximum intermediate size created. If only a single + argument is provided the largest input or output array size is used + as a maximum intermediate size. + + * if a list is given that starts with ``einsum_path``, uses this as the + contraction path + * if False no optimization is taken + * if True defaults to the 'greedy' algorithm + * 'optimal' An algorithm that combinatorially explores all possible + ways of contracting the listed tensors and choosest the least costly + path. Scales exponentially with the number of terms in the + contraction. + * 'greedy' An algorithm that chooses the best pair contraction + at each step. Effectively, this algorithm searches the largest inner, + Hadamard, and then outer products at each step. Scales cubically with + the number of terms in the contraction. Equivalent to the 'optimal' + path for most contractions. + + Default is 'greedy'. + + Returns + ------- + path : list of tuples + A list representation of the einsum path. + string_repr : str + A printable representation of the einsum path. + + Notes + ----- + The resulting path indicates which terms of the input contraction should be + contracted first, the result of this contraction is then appended to the + end of the contraction list. This list can then be iterated over until all + intermediate contractions are complete. + + See Also + -------- + einsum, linalg.multi_dot + + Examples + -------- + + We can begin with a chain dot example. In this case, it is optimal to + contract the ``b`` and ``c`` tensors first as represented by the first + element of the path ``(1, 2)``. The resulting tensor is added to the end + of the contraction and the remaining contraction ``(0, 1)`` is then + completed. + + >>> np.random.seed(123) + >>> a = np.random.rand(2, 2) + >>> b = np.random.rand(2, 5) + >>> c = np.random.rand(5, 2) + >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') + >>> print(path_info[0]) + ['einsum_path', (1, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ij,jk,kl->il # may vary + Naive scaling: 4 + Optimized scaling: 3 + Naive FLOP count: 1.600e+02 + Optimized FLOP count: 5.600e+01 + Theoretical speedup: 2.857 + Largest intermediate: 4.000e+00 elements + ------------------------------------------------------------------------- + scaling current remaining + ------------------------------------------------------------------------- + 3 kl,jk->jl ij,jl->il + 3 jl,ij->il il->il + + + A more complex index transformation example. + + >>> I = np.random.rand(10, 10, 10, 10) + >>> C = np.random.rand(10, 10) + >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, + ... optimize='greedy') + + >>> print(path_info[0]) + ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary + Naive scaling: 8 + Optimized scaling: 5 + Naive FLOP count: 8.000e+08 + Optimized FLOP count: 8.000e+05 + Theoretical speedup: 1000.000 + Largest intermediate: 1.000e+04 elements + -------------------------------------------------------------------------- + scaling current remaining + -------------------------------------------------------------------------- + 5 abcd,ea->bcde fb,gc,hd,bcde->efgh + 5 bcde,fb->cdef gc,hd,cdef->efgh + 5 cdef,gc->defg hd,defg->efgh + 5 defg,hd->efgh efgh->efgh + """ + + # Figure out what the path really is + path_type = optimize + if path_type is True: + path_type = 'greedy' + if path_type is None: + path_type = False + + memory_limit = None + + # No optimization or a named path algorithm + if (path_type is False) or isinstance(path_type, str): + pass + + # Given an explicit path + elif len(path_type) and (path_type[0] == 'einsum_path'): + pass + + # Path tuple with memory limit + elif ((len(path_type) == 2) and isinstance(path_type[0], str) and + isinstance(path_type[1], (int, float))): + memory_limit = int(path_type[1]) + path_type = path_type[0] + + else: + raise TypeError("Did not understand the path: %s" % str(path_type)) + + # Hidden option, only einsum should call this + einsum_call_arg = einsum_call + + # Python side parsing + input_subscripts, output_subscript, operands = _parse_einsum_input(operands) + + # Build a few useful list and sets + input_list = input_subscripts.split(',') + input_sets = [set(x) for x in input_list] + output_set = set(output_subscript) + indices = set(input_subscripts.replace(',', '')) + + # Get length of each unique dimension and ensure all dimensions are correct + dimension_dict = {} + broadcast_indices = [[] for x in range(len(input_list))] + for tnum, term in enumerate(input_list): + sh = operands[tnum].shape + if len(sh) != len(term): + raise ValueError("Einstein sum subscript %s does not contain the " + "correct number of indices for operand %d." + % (input_subscripts[tnum], tnum)) + for cnum, char in enumerate(term): + dim = sh[cnum] + + # Build out broadcast indices + if dim == 1: + broadcast_indices[tnum].append(char) + + if char in dimension_dict.keys(): + # For broadcasting cases we always want the largest dim size + if dimension_dict[char] == 1: + dimension_dict[char] = dim + elif dim not in (1, dimension_dict[char]): + raise ValueError("Size of label '%s' for operand %d (%d) " + "does not match previous terms (%d)." + % (char, tnum, dimension_dict[char], dim)) + else: + dimension_dict[char] = dim + + # Convert broadcast inds to sets + broadcast_indices = [set(x) for x in broadcast_indices] + + # Compute size of each input array plus the output array + size_list = [_compute_size_by_dict(term, dimension_dict) + for term in input_list + [output_subscript]] + max_size = max(size_list) + + if memory_limit is None: + memory_arg = max_size + else: + memory_arg = memory_limit + + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 + naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict) + + # Compute the path + if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set): + # Nothing to be optimized, leave it to einsum + path = [tuple(range(len(input_list)))] + elif path_type == "greedy": + path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg) + elif path_type == "optimal": + path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg) + elif path_type[0] == 'einsum_path': + path = path_type[1:] + else: + raise KeyError("Path name %s not found", path_type) + + cost_list, scale_list, size_list, contraction_list = [], [], [], [] + + # Build contraction tuple (positions, gemm, einsum_str, remaining) + for cnum, contract_inds in enumerate(path): + # Make sure we remove inds from right to left + contract_inds = tuple(sorted(list(contract_inds), reverse=True)) + + contract = _find_contraction(contract_inds, input_sets, output_set) + out_inds, input_sets, idx_removed, idx_contract = contract + + cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + + bcast = set() + tmp_inputs = [] + for x in contract_inds: + tmp_inputs.append(input_list.pop(x)) + bcast |= broadcast_indices.pop(x) + + new_bcast_inds = bcast - idx_removed + + # If we're broadcasting, nix blas + if not len(idx_removed & bcast): + do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) + else: + do_blas = False + + # Last contraction + if (cnum - len(path)) == -1: + idx_result = output_subscript + else: + sort_result = [(dimension_dict[ind], ind) for ind in out_inds] + idx_result = "".join([x[1] for x in sorted(sort_result)]) + + input_list.append(idx_result) + broadcast_indices.append(new_bcast_inds) + einsum_str = ",".join(tmp_inputs) + "->" + idx_result + + contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas) + contraction_list.append(contraction) + + opt_cost = sum(cost_list) + 1 + + if einsum_call_arg: + return (operands, contraction_list) + + # Return the path along with a nice string representation + overall_contraction = input_subscripts + "->" + output_subscript + header = ("scaling", "current", "remaining") + + speedup = naive_cost / opt_cost + max_i = max(size_list) + + path_print = " Complete contraction: %s\n" % overall_contraction + path_print += " Naive scaling: %d\n" % len(indices) + path_print += " Optimized scaling: %d\n" % max(scale_list) + path_print += " Naive FLOP count: %.3e\n" % naive_cost + path_print += " Optimized FLOP count: %.3e\n" % opt_cost + path_print += " Theoretical speedup: %3.3f\n" % speedup + path_print += " Largest intermediate: %.3e elements\n" % max_i + path_print += "-" * 74 + "\n" + path_print += "%6s %24s %40s\n" % header + path_print += "-" * 74 + + for n, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + remaining_str = ",".join(remaining) + "->" + output_subscript + path_run = (scale_list[n], einsum_str, remaining_str) + path_print += "\n%4d %24s %40s" % path_run + + path = ['einsum_path'] + path + return (path, path_print) + + +def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): + # Arguably we dispatch on more arguments that we really should; see note in + # _einsum_path_dispatcher for why. + yield from operands + yield out + + +# Rewrite einsum to handle different cases +@array_function_dispatch(_einsum_dispatcher, module='numpy') +def einsum(*operands, out=None, optimize=False, **kwargs): + """ + einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe', optimize=False) + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout as the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + einops : + similar verbose interface is provided by + `einops `_ package to cover + additional operations: transpose, reshape/flatten, repeat/tile, + squeeze/unsqueeze and reductions. + opt_einsum : + `opt_einsum `_ + optimizes contraction order for einsum-like expressions + in backend-agnostic manner. + + Notes + ----- + .. versionadded:: 1.6.0 + + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` produces a + view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` + describes traditional matrix multiplication and is equivalent to + :py:func:`np.matmul(a,b) `. Repeated subscript labels in one + operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent + to :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `, + and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts + and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + .. versionadded:: 1.10.0 + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + .. versionadded:: 1.12.0 + + Added the ``optimize`` argument which will optimize the contraction order + of an einsum expression. For a contraction with three or more operands this + can greatly increase the computational efficiency at the cost of a larger + memory footprint during computation. + + Typically a 'greedy' algorithm is applied which empirical tests have shown + returns the optimal path in the majority of cases. In some cases 'optimal' + will return the superlative path through a more expensive, exhaustive search. + For iterative calculations it may be advisable to calculate the optimal path + once and reuse that path by supplying it as an argument. An example is given + below. + + See :py:func:`numpy.einsum_path` for more details. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + Chained array operations. For more complicated contractions, speed ups + might be achieved by repeatedly computing a 'greedy' path or pre-computing the + 'optimal' path and repeatedly applying it, using an + `einsum_path` insertion (since version 1.12.0). Performance improvements can be + particularly significant with larger arrays: + + >>> a = np.ones(64).reshape(2,4,8) + + Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) + + Sub-optimal `einsum` (due to repeated path calculation time): ~330ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') + + Greedy `einsum` (faster optimal path approximation): ~160ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') + + Optimal `einsum` (best usage pattern in some use cases): ~110ms + + >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) + + """ + # Special handling if out is specified + specified_out = out is not None + + # If no optimization, run pure einsum + if optimize is False: + if specified_out: + kwargs['out'] = out + return c_einsum(*operands, **kwargs) + + # Check the kwargs to avoid a more cryptic error later, without having to + # repeat default values here + valid_einsum_kwargs = ['dtype', 'order', 'casting'] + unknown_kwargs = [k for (k, v) in kwargs.items() if + k not in valid_einsum_kwargs] + if len(unknown_kwargs): + raise TypeError("Did not understand the following kwargs: %s" + % unknown_kwargs) + + # Build the contraction list and operand + operands, contraction_list = einsum_path(*operands, optimize=optimize, + einsum_call=True) + + # Handle order kwarg for output array, c_einsum allows mixed case + output_order = kwargs.pop('order', 'K') + if output_order.upper() == 'A': + if all(arr.flags.f_contiguous for arr in operands): + output_order = 'F' + else: + output_order = 'C' + + # Start contraction loop + for num, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + tmp_operands = [operands.pop(x) for x in inds] + + # Do we need to deal with the output? + handle_out = specified_out and ((num + 1) == len(contraction_list)) + + # Call tensordot if still possible + if blas: + # Checks have already been handled + input_str, results_index = einsum_str.split('->') + input_left, input_right = input_str.split(',') + + tensor_result = input_left + input_right + for s in idx_rm: + tensor_result = tensor_result.replace(s, "") + + # Find indices to contract over + left_pos, right_pos = [], [] + for s in sorted(idx_rm): + left_pos.append(input_left.find(s)) + right_pos.append(input_right.find(s)) + + # Contract! + new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))) + + # Build a new view if needed + if (tensor_result != results_index) or handle_out: + if handle_out: + kwargs["out"] = out + new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs) + + # Call einsum + else: + # If out was specified + if handle_out: + kwargs["out"] = out + + # Do the contraction + new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) + + # Append new items and dereference what we can + operands.append(new_view) + del tmp_operands, new_view + + if specified_out: + return out + else: + return asanyarray(operands[0], order=output_order) diff --git a/MLPY/Lib/site-packages/numpy/core/einsumfunc.pyi b/MLPY/Lib/site-packages/numpy/core/einsumfunc.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4708da916507a21cfc4cf119e77c1bf226ccabe1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/einsumfunc.pyi @@ -0,0 +1,142 @@ +import sys +from typing import List, TypeVar, Optional, Any, overload, Union, Tuple, Sequence + +from numpy import ( + ndarray, + dtype, + bool_, + unsignedinteger, + signedinteger, + floating, + complexfloating, + number, + _OrderKACF, +) +from numpy.typing import ( + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _DTypeLikeBool, + _DTypeLikeUInt, + _DTypeLikeInt, + _DTypeLikeFloat, + _DTypeLikeComplex, + _DTypeLikeComplex_co, +) + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +_ArrayType = TypeVar( + "_ArrayType", + bound=ndarray[Any, dtype[Union[bool_, number[Any]]]], +) + +_OptimizeKind = Union[ + None, bool, Literal["greedy", "optimal"], Sequence[Any] +] +_CastingSafe = Literal["no", "equiv", "safe", "same_kind"] +_CastingUnsafe = Literal["unsafe"] + +__all__: List[str] + +# TODO: Properly handle the `casting`-based combinatorics +# TODO: We need to evaluate the content `__subscripts` in order +# to identify whether or an array or scalar is returned. At a cursory +# glance this seems like something that can quite easilly be done with +# a mypy plugin. +# Something like `is_scalar = bool(__subscripts.partition("->")[-1])` +@overload +def einsum( + __subscripts: str, + *operands: _ArrayLikeBool_co, + out: None = ..., + dtype: Optional[_DTypeLikeBool] = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + __subscripts: str, + *operands: _ArrayLikeUInt_co, + out: None = ..., + dtype: Optional[_DTypeLikeUInt] = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + __subscripts: str, + *operands: _ArrayLikeInt_co, + out: None = ..., + dtype: Optional[_DTypeLikeInt] = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + __subscripts: str, + *operands: _ArrayLikeFloat_co, + out: None = ..., + dtype: Optional[_DTypeLikeFloat] = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + __subscripts: str, + *operands: _ArrayLikeComplex_co, + out: None = ..., + dtype: Optional[_DTypeLikeComplex] = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + __subscripts: str, + *operands: Any, + casting: _CastingUnsafe, + dtype: Optional[_DTypeLikeComplex_co] = ..., + out: None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + __subscripts: str, + *operands: _ArrayLikeComplex_co, + out: _ArrayType, + dtype: Optional[_DTypeLikeComplex_co] = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayType: ... +@overload +def einsum( + __subscripts: str, + *operands: Any, + out: _ArrayType, + casting: _CastingUnsafe, + dtype: Optional[_DTypeLikeComplex_co] = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayType: ... + +# NOTE: `einsum_call` is a hidden kwarg unavailable for public use. +# It is therefore excluded from the signatures below. +# NOTE: In practice the list consists of a `str` (first element) +# and a variable number of integer tuples. +def einsum_path( + __subscripts: str, + *operands: _ArrayLikeComplex_co, + optimize: _OptimizeKind = ..., +) -> Tuple[List[Any], str]: ... diff --git a/MLPY/Lib/site-packages/numpy/core/fromnumeric.py b/MLPY/Lib/site-packages/numpy/core/fromnumeric.py new file mode 100644 index 0000000000000000000000000000000000000000..74abf8f02ae758f4613ed95263dbae1d5ab784ee --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/fromnumeric.py @@ -0,0 +1,3789 @@ +"""Module containing non-deprecated functions borrowed from Numeric. + +""" +import functools +import types +import warnings + +import numpy as np +from . import multiarray as mu +from . import overrides +from . import umath as um +from . import numerictypes as nt +from .multiarray import asarray, array, asanyarray, concatenate +from . import _methods + +_dt_ = nt.sctype2char + +# functions that are methods +__all__ = [ + 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', + 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', + 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', + 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', + 'ravel', 'repeat', 'reshape', 'resize', 'round_', + 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', + 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', +] + +_gentype = types.GeneratorType +# save away Python sum +_sum_ = sum + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +# functions that are now methods +def _wrapit(obj, method, *args, **kwds): + try: + wrap = obj.__array_wrap__ + except AttributeError: + wrap = None + result = getattr(asarray(obj), method)(*args, **kwds) + if wrap: + if not isinstance(result, mu.ndarray): + result = asarray(result) + result = wrap(result) + return result + + +def _wrapfunc(obj, method, *args, **kwds): + bound = getattr(obj, method, None) + if bound is None: + return _wrapit(obj, method, *args, **kwds) + + try: + return bound(*args, **kwds) + except TypeError: + # A TypeError occurs if the object does have such a method in its + # class, but its signature is not identical to that of NumPy's. This + # situation has occurred in the case of a downstream library like + # 'pandas'. + # + # Call _wrapit from within the except clause to ensure a potential + # exception has a traceback chain. + return _wrapit(obj, method, *args, **kwds) + + +def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs): + passkwargs = {k: v for k, v in kwargs.items() + if v is not np._NoValue} + + if type(obj) is not mu.ndarray: + try: + reduction = getattr(obj, method) + except AttributeError: + pass + else: + # This branch is needed for reductions like any which don't + # support a dtype. + if dtype is not None: + return reduction(axis=axis, dtype=dtype, out=out, **passkwargs) + else: + return reduction(axis=axis, out=out, **passkwargs) + + return ufunc.reduce(obj, axis, dtype, out, **passkwargs) + + +def _take_dispatcher(a, indices, axis=None, out=None, mode=None): + return (a, out) + + +@array_function_dispatch(_take_dispatcher) +def take(a, indices, axis=None, out=None, mode='raise'): + """ + Take elements from an array along an axis. + + When axis is not None, this function does the same thing as "fancy" + indexing (indexing arrays using arrays); however, it can be easier to use + if you need elements along a given axis. A call such as + ``np.take(arr, indices, axis=3)`` is equivalent to + ``arr[:,:,:,indices,...]``. + + Explained without fancy indexing, this is equivalent to the following use + of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of + indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + Nj = indices.shape + for ii in ndindex(Ni): + for jj in ndindex(Nj): + for kk in ndindex(Nk): + out[ii + jj + kk] = a[ii + (indices[jj],) + kk] + + Parameters + ---------- + a : array_like (Ni..., M, Nk...) + The source array. + indices : array_like (Nj...) + The indices of the values to extract. + + .. versionadded:: 1.8.0 + + Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : ndarray, optional (Ni..., Nj..., Nk...) + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The returned array has the same type as `a`. + + See Also + -------- + compress : Take elements using a boolean mask + ndarray.take : equivalent method + take_along_axis : Take elements by matching the array and the index arrays + + Notes + ----- + + By eliminating the inner loop in the description above, and using `s_` to + build simple slice objects, `take` can be expressed in terms of applying + fancy indexing to each 1-d slice:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nj): + out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] + + For this reason, it is equivalent to (but faster than) the following use + of `apply_along_axis`:: + + out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) + + Examples + -------- + >>> a = [4, 3, 5, 7, 6, 8] + >>> indices = [0, 1, 4] + >>> np.take(a, indices) + array([4, 3, 6]) + + In this example if `a` is an ndarray, "fancy" indexing can be used. + + >>> a = np.array(a) + >>> a[indices] + array([4, 3, 6]) + + If `indices` is not one dimensional, the output also has these dimensions. + + >>> np.take(a, [[0, 1], [2, 3]]) + array([[4, 3], + [5, 7]]) + """ + return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) + + +def _reshape_dispatcher(a, newshape, order=None): + return (a,) + + +# not deprecated --- copy if necessary, view otherwise +@array_function_dispatch(_reshape_dispatcher) +def reshape(a, newshape, order='C'): + """ + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + newshape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is + inferred from the length of the array and remaining dimensions. + order : {'C', 'F', 'A'}, optional + Read the elements of `a` using this index order, and place the + elements into the reshaped array using this index order. 'C' + means to read / write the elements using C-like index order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to read / write the + elements using Fortran-like index order, with the first index + changing fastest, and the last index changing slowest. Note that + the 'C' and 'F' options take no account of the memory layout of + the underlying array, and only refer to the order of indexing. + 'A' means to read / write the elements in Fortran-like index + order if `a` is Fortran *contiguous* in memory, C-like order + otherwise. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. Note there is no guarantee of the *memory layout* (C- or + Fortran- contiguous) of the returned array. + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + It is not always possible to change the shape of an array without + copying the data. If you want an error to be raised when the data is copied, + you should assign the new shape to the shape attribute of the array:: + + >>> a = np.zeros((10, 2)) + + # A transpose makes the array non-contiguous + >>> b = a.T + + # Taking a view makes it possible to modify the shape without modifying + # the initial object. + >>> c = b.view() + >>> c.shape = (20) + Traceback (most recent call last): + ... + AttributeError: Incompatible shape for in-place modification. Use + `.reshape()` to make a copy with the desired shape. + + The `order` keyword gives the index ordering both for *fetching* the values + from `a`, and then *placing* the values into the output array. + For example, let's say you have an array: + + >>> a = np.arange(6).reshape((3, 2)) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + + You can think of reshaping as first raveling the array (using the given + index order), then inserting the elements from the raveled array into the + new array using the same kind of index ordering as was used for the + raveling. + + >>> np.reshape(a, (2, 3)) # C-like index ordering + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering + array([[0, 4, 3], + [2, 1, 5]]) + >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') + array([[0, 4, 3], + [2, 1, 5]]) + + Examples + -------- + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> np.reshape(a, 6) + array([1, 2, 3, 4, 5, 6]) + >>> np.reshape(a, 6, order='F') + array([1, 4, 2, 5, 3, 6]) + + >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 + array([[1, 2], + [3, 4], + [5, 6]]) + """ + return _wrapfunc(a, 'reshape', newshape, order=order) + + +def _choose_dispatcher(a, choices, out=None, mode=None): + yield a + yield from choices + yield out + + +@array_function_dispatch(_choose_dispatcher) +def choose(a, choices, out=None, mode='raise'): + """ + Construct an array from an index array and a list of arrays to choose from. + + First of all, if confused or uncertain, definitely look at the Examples - + in its full generality, this function is less simple than it might + seem from the following code description (below ndi = + `numpy.lib.index_tricks`): + + ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. + + But this omits some subtleties. Here is a fully general summary: + + Given an "index" array (`a`) of integers and a sequence of ``n`` arrays + (`choices`), `a` and each choice array are first broadcast, as necessary, + to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = + 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` + for each ``i``. Then, a new array with shape ``Ba.shape`` is created as + follows: + + * if ``mode='raise'`` (the default), then, first of all, each element of + ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose + that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)`` + position in ``Ba`` - then the value at the same position in the new array + is the value in ``Bchoices[i]`` at that same position; + + * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed) + integer; modular arithmetic is used to map integers outside the range + `[0, n-1]` back into that range; and then the new array is constructed + as above; + + * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed) + integer; negative integers are mapped to 0; values greater than ``n-1`` + are mapped to ``n-1``; and then the new array is constructed as above. + + Parameters + ---------- + a : int array + This array must contain integers in ``[0, n-1]``, where ``n`` is the + number of choices, unless ``mode=wrap`` or ``mode=clip``, in which + cases any integers are permissible. + choices : sequence of arrays + Choice arrays. `a` and all of the choices must be broadcastable to the + same shape. If `choices` is itself an array (not recommended), then + its outermost dimension (i.e., the one corresponding to + ``choices.shape[0]``) is taken as defining the "sequence". + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if ``mode='raise'``; use other modes for better performance. + mode : {'raise' (default), 'wrap', 'clip'}, optional + Specifies how indices outside ``[0, n-1]`` will be treated: + + * 'raise' : an exception is raised + * 'wrap' : value becomes value mod ``n`` + * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 + + Returns + ------- + merged_array : array + The merged result. + + Raises + ------ + ValueError: shape mismatch + If `a` and each choice array are not all broadcastable to the same + shape. + + See Also + -------- + ndarray.choose : equivalent method + numpy.take_along_axis : Preferable if `choices` is an array + + Notes + ----- + To reduce the chance of misinterpretation, even though the following + "abuse" is nominally supported, `choices` should neither be, nor be + thought of as, a single array, i.e., the outermost sequence-like container + should be either a list or a tuple. + + Examples + -------- + + >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], + ... [20, 21, 22, 23], [30, 31, 32, 33]] + >>> np.choose([2, 3, 1, 0], choices + ... # the first element of the result will be the first element of the + ... # third (2+1) "array" in choices, namely, 20; the second element + ... # will be the second element of the fourth (3+1) choice array, i.e., + ... # 31, etc. + ... ) + array([20, 31, 12, 3]) + >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) + array([20, 31, 12, 3]) + >>> # because there are 4 choice arrays + >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) + array([20, 1, 12, 3]) + >>> # i.e., 0 + + A couple examples illustrating how choose broadcasts: + + >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] + >>> choices = [-10, 10] + >>> np.choose(a, choices) + array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]]) + + >>> # With thanks to Anne Archibald + >>> a = np.array([0, 1]).reshape((2,1,1)) + >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) + >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) + >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 + array([[[ 1, 1, 1, 1, 1], + [ 2, 2, 2, 2, 2], + [ 3, 3, 3, 3, 3]], + [[-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5]]]) + + """ + return _wrapfunc(a, 'choose', choices, out=out, mode=mode) + + +def _repeat_dispatcher(a, repeats, axis=None): + return (a,) + + +@array_function_dispatch(_repeat_dispatcher) +def repeat(a, repeats, axis=None): + """ + Repeat elements of an array. + + Parameters + ---------- + a : array_like + Input array. + repeats : int or array of ints + The number of repetitions for each element. `repeats` is broadcasted + to fit the shape of the given axis. + axis : int, optional + The axis along which to repeat values. By default, use the + flattened input array, and return a flat output array. + + Returns + ------- + repeated_array : ndarray + Output array which has the same shape as `a`, except along + the given axis. + + See Also + -------- + tile : Tile an array. + unique : Find the unique elements of an array. + + Examples + -------- + >>> np.repeat(3, 4) + array([3, 3, 3, 3]) + >>> x = np.array([[1,2],[3,4]]) + >>> np.repeat(x, 2) + array([1, 1, 2, 2, 3, 3, 4, 4]) + >>> np.repeat(x, 3, axis=1) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> np.repeat(x, [1, 2], axis=0) + array([[1, 2], + [3, 4], + [3, 4]]) + + """ + return _wrapfunc(a, 'repeat', repeats, axis=axis) + + +def _put_dispatcher(a, ind, v, mode=None): + return (a, ind, v) + + +@array_function_dispatch(_put_dispatcher) +def put(a, ind, v, mode='raise'): + """ + Replaces specified elements of an array with given values. + + The indexing works on the flattened target array. `put` is roughly + equivalent to: + + :: + + a.flat[ind] = v + + Parameters + ---------- + a : ndarray + Target array. + ind : array_like + Target indices, interpreted as integers. + v : array_like + Values to place in `a` at target indices. If `v` is shorter than + `ind` it will be repeated as necessary. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. In 'raise' mode, + if an exception occurs the target array may still be modified. + + See Also + -------- + putmask, place + put_along_axis : Put elements by matching the array and the index arrays + + Examples + -------- + >>> a = np.arange(5) + >>> np.put(a, [0, 2], [-44, -55]) + >>> a + array([-44, 1, -55, 3, 4]) + + >>> a = np.arange(5) + >>> np.put(a, 22, -5, mode='clip') + >>> a + array([ 0, 1, 2, 3, -5]) + + """ + try: + put = a.put + except AttributeError as e: + raise TypeError("argument 1 must be numpy.ndarray, " + "not {name}".format(name=type(a).__name__)) from e + + return put(ind, v, mode=mode) + + +def _swapaxes_dispatcher(a, axis1, axis2): + return (a,) + + +@array_function_dispatch(_swapaxes_dispatcher) +def swapaxes(a, axis1, axis2): + """ + Interchange two axes of an array. + + Parameters + ---------- + a : array_like + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + + Returns + ------- + a_swapped : ndarray + For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is + returned; otherwise a new array is created. For earlier NumPy + versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + Examples + -------- + >>> x = np.array([[1,2,3]]) + >>> np.swapaxes(x,0,1) + array([[1], + [2], + [3]]) + + >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + >>> np.swapaxes(x,0,2) + array([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]]) + + """ + return _wrapfunc(a, 'swapaxes', axis1, axis2) + + +def _transpose_dispatcher(a, axes=None): + return (a,) + + +@array_function_dispatch(_transpose_dispatcher) +def transpose(a, axes=None): + """ + Reverse or permute the axes of an array; returns the modified array. + + For an array a with two axes, transpose(a) gives the matrix transpose. + + Refer to `numpy.ndarray.transpose` for full documentation. + + Parameters + ---------- + a : array_like + Input array. + axes : tuple or list of ints, optional + If specified, it must be a tuple or list which contains a permutation of + [0,1,..,N-1] where N is the number of axes of a. The i'th axis of the + returned array will correspond to the axis numbered ``axes[i]`` of the + input. If not specified, defaults to ``range(a.ndim)[::-1]``, which + reverses the order of the axes. + + Returns + ------- + p : ndarray + `a` with its axes permuted. A view is returned whenever + possible. + + See Also + -------- + ndarray.transpose : Equivalent method + moveaxis + argsort + + Notes + ----- + Use `transpose(a, argsort(axes))` to invert the transposition of tensors + when using the `axes` keyword argument. + + Transposing a 1-D array returns an unchanged view of the original array. + + Examples + -------- + >>> x = np.arange(4).reshape((2,2)) + >>> x + array([[0, 1], + [2, 3]]) + + >>> np.transpose(x) + array([[0, 2], + [1, 3]]) + + >>> x = np.ones((1, 2, 3)) + >>> np.transpose(x, (1, 0, 2)).shape + (2, 1, 3) + + >>> x = np.ones((2, 3, 4, 5)) + >>> np.transpose(x).shape + (5, 4, 3, 2) + + """ + return _wrapfunc(a, 'transpose', axes) + + +def _partition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_partition_dispatcher) +def partition(a, kth, axis=-1, kind='introselect', order=None): + """ + Return a partitioned copy of an array. + + Creates a copy of the array with its elements rearranged in such a + way that the value of the element in k-th position is in the + position it would be in a sorted array. All elements smaller than + the k-th element are moved before this element and all equal or + greater are moved behind it. The ordering of the elements in the two + partitions is undefined. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array to be sorted. + kth : int or sequence of ints + Element index to partition by. The k-th value of the element + will be in its final sorted position and all smaller elements + will be moved before it and all equal or greater elements behind + it. The order of all elements in the partitions is undefined. If + provided with a sequence of k-th it will partition all elements + indexed by k-th of them into their sorted position at once. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string. Not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + partitioned_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.partition : Method to sort an array in-place. + argpartition : Indirect partition. + sort : Full sorting + + Notes + ----- + The various selection algorithms are characterized by their average + speed, worst case performance, work space size, and whether they are + stable. A stable sort keeps items with the same key in the same + relative order. The available algorithms have the following + properties: + + ================= ======= ============= ============ ======= + kind speed worst case work space stable + ================= ======= ============= ============ ======= + 'introselect' 1 O(n) 0 no + ================= ======= ============= ============ ======= + + All the partition algorithms make temporary copies of the data when + partitioning along any but the last axis. Consequently, + partitioning along the last axis is faster and uses less space than + partitioning along any other axis. + + The sort order for complex numbers is lexicographic. If both the + real and imaginary parts are non-nan then the order is determined by + the real parts except when they are equal, in which case the order + is determined by the imaginary parts. + + Examples + -------- + >>> a = np.array([3, 4, 2, 1]) + >>> np.partition(a, 3) + array([2, 1, 3, 4]) + + >>> np.partition(a, (1, 3)) + array([1, 2, 3, 4]) + + """ + if axis is None: + # flatten returns (1, N) for np.matrix, so always use the last axis + a = asanyarray(a).flatten() + axis = -1 + else: + a = asanyarray(a).copy(order="K") + a.partition(kth, axis=axis, kind=kind, order=order) + return a + + +def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_argpartition_dispatcher) +def argpartition(a, kth, axis=-1, kind='introselect', order=None): + """ + Perform an indirect partition along the given axis using the + algorithm specified by the `kind` keyword. It returns an array of + indices of the same shape as `a` that index data along the given + axis in partitioned order. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array to sort. + kth : int or sequence of ints + Element index to partition by. The k-th element will be in its + final sorted position and all smaller elements will be moved + before it and all larger elements behind it. The order all + elements in the partitions is undefined. If provided with a + sequence of k-th it will partition all of them into their sorted + position at once. + axis : int or None, optional + Axis along which to sort. The default is -1 (the last axis). If + None, the flattened array is used. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect' + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string, and not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + index_array : ndarray, int + Array of indices that partition `a` along the specified axis. + If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`. + More generally, ``np.take_along_axis(a, index_array, axis=a)`` always + yields the partitioned `a`, irrespective of dimensionality. + + See Also + -------- + partition : Describes partition algorithms used. + ndarray.partition : Inplace partition. + argsort : Full indirect sort. + take_along_axis : Apply ``index_array`` from argpartition + to an array as if by calling partition. + + Notes + ----- + See `partition` for notes on the different selection algorithms. + + Examples + -------- + One dimensional array: + + >>> x = np.array([3, 4, 2, 1]) + >>> x[np.argpartition(x, 3)] + array([2, 1, 3, 4]) + >>> x[np.argpartition(x, (1, 3))] + array([1, 2, 3, 4]) + + >>> x = [3, 4, 2, 1] + >>> np.array(x)[np.argpartition(x, 3)] + array([2, 1, 3, 4]) + + Multi-dimensional array: + + >>> x = np.array([[3, 4, 2], [1, 3, 1]]) + >>> index_array = np.argpartition(x, kth=1, axis=-1) + >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1) + array([[2, 3, 4], + [1, 1, 3]]) + + """ + return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order) + + +def _sort_dispatcher(a, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_sort_dispatcher) +def sort(a, axis=-1, kind=None, order=None): + """ + Return a sorted copy of an array. + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort or radix sort under the covers and, in general, + the actual implementation will vary with data type. The 'mergesort' option + is retained for backwards compatibility. + + .. versionchanged:: 1.15.0. + The 'stable' option was added. + + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + partition : Partial sort. + + Notes + ----- + The various sorting algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative + order. The four algorithms implemented in NumPy have the following + properties: + + =========== ======= ============= ============ ======== + kind speed worst case work space stable + =========== ======= ============= ============ ======== + 'quicksort' 1 O(n^2) 0 no + 'heapsort' 3 O(n*log(n)) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'timsort' 2 O(n*log(n)) ~n/2 yes + =========== ======= ============= ============ ======== + + .. note:: The datatype determines which of 'mergesort' or 'timsort' + is actually used, even if 'mergesort' is specified. User selection + at a finer scale is not currently available. + + All the sort algorithms make temporary copies of the data when + sorting along any but the last axis. Consequently, sorting along + the last axis is faster and uses less space than sorting along + any other axis. + + The sort order for complex numbers is lexicographic. If both the real + and imaginary parts are non-nan then the order is determined by the + real parts except when they are equal, in which case the order is + determined by the imaginary parts. + + Previous to numpy 1.4.0 sorting real and complex arrays containing nan + values led to undefined behaviour. In numpy versions >= 1.4.0 nan + values are sorted to the end. The extended sort order is: + + * Real: [R, nan] + * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] + + where R is a non-nan real value. Complex values with the same nan + placements are sorted according to the non-nan part if it exists. + Non-nan values are sorted as before. + + .. versionadded:: 1.12.0 + + quicksort has been changed to `introsort `_. + When sorting does not make enough progress it switches to + `heapsort `_. + This implementation makes quicksort O(n*log(n)) in the worst case. + + 'stable' automatically chooses the best stable sorting algorithm + for the data type being sorted. + It, along with 'mergesort' is currently mapped to + `timsort `_ + or `radix sort `_ + depending on the data type. + API forward compatibility currently limits the + ability to select the implementation and it is hardwired for the different + data types. + + .. versionadded:: 1.17.0 + + Timsort is added for better performance on already or nearly + sorted data. On random data timsort is almost identical to + mergesort. It is now used for stable sort while quicksort is still the + default sort if none is chosen. For timsort details, refer to + `CPython listsort.txt `_. + 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an + O(n) sort instead of O(n log n). + + .. versionchanged:: 1.18.0 + + NaT now sorts to the end of arrays for consistency with NaN. + + Examples + -------- + >>> a = np.array([[1,4],[3,1]]) + >>> np.sort(a) # sort along the last axis + array([[1, 4], + [1, 3]]) + >>> np.sort(a, axis=None) # sort the flattened array + array([1, 1, 3, 4]) + >>> np.sort(a, axis=0) # sort along the first axis + array([[1, 1], + [3, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] + >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), + ... ('Galahad', 1.7, 38)] + >>> a = np.array(values, dtype=dtype) # create a structured array + >>> np.sort(a, order='height') # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.8999999999999999, 38)], + dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), + ('Arthur', 1.8, 41)], + dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2]) + >>> np.argsort(x) + array([1, 2, 0]) + + Two-dimensional array: + + >>> x = np.array([[0, 3], [2, 2]]) + >>> x + array([[0, 3], + [2, 2]]) + + >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) + >>> ind + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) + array([[0, 2], + [2, 3]]) + + >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) + >>> ind + array([[0, 1], + [0, 1]]) + >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) + array([[0, 3], + [2, 2]]) + + Indices of the sorted elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) + >>> ind + (array([0, 1, 1, 0]), array([0, 0, 1, 1])) + >>> x[ind] # same as np.sort(x, axis=None) + array([0, 2, 2, 3]) + + Sorting with keys: + + >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x + array([(1, 0), (0, 1)], + dtype=[('x', '>> np.argsort(x, order=('x','y')) + array([1, 0]) + + >>> np.argsort(x, order=('y','x')) + array([0, 1]) + + """ + return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order) + + +def _argmax_dispatcher(a, axis=None, out=None): + return (a, out) + + +@array_function_dispatch(_argmax_dispatcher) +def argmax(a, axis=None, out=None): + """ + Returns the indices of the maximum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. + + See Also + -------- + ndarray.argmax, argmin + amax : The maximum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmax to an array as if by calling max. + + Notes + ----- + In case of multiple occurrences of the maximum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmax(a) + 5 + >>> np.argmax(a, axis=0) + array([1, 1, 1]) + >>> np.argmax(a, axis=1) + array([2, 2]) + + Indexes of the maximal elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) + >>> ind + (1, 2) + >>> a[ind] + 15 + + >>> b = np.arange(6) + >>> b[1] = 5 + >>> b + array([0, 5, 2, 3, 4, 5]) + >>> np.argmax(b) # Only the first occurrence is returned. + 1 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmax(x, axis=-1) + >>> # Same as np.max(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[4], + [3]]) + >>> # Same as np.max(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) + array([4, 3]) + + """ + return _wrapfunc(a, 'argmax', axis=axis, out=out) + + +def _argmin_dispatcher(a, axis=None, out=None): + return (a, out) + + +@array_function_dispatch(_argmin_dispatcher) +def argmin(a, axis=None, out=None): + """ + Returns the indices of the minimum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. + + See Also + -------- + ndarray.argmin, argmax + amin : The minimum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmin to an array as if by calling min. + + Notes + ----- + In case of multiple occurrences of the minimum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmin(a) + 0 + >>> np.argmin(a, axis=0) + array([0, 0, 0]) + >>> np.argmin(a, axis=1) + array([0, 0]) + + Indices of the minimum elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) + >>> ind + (0, 0) + >>> a[ind] + 10 + + >>> b = np.arange(6) + 10 + >>> b[4] = 10 + >>> b + array([10, 11, 12, 13, 10, 15]) + >>> np.argmin(b) # Only the first occurrence is returned. + 0 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmin(x, axis=-1) + >>> # Same as np.min(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[2], + [0]]) + >>> # Same as np.max(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1) + array([2, 0]) + + """ + return _wrapfunc(a, 'argmin', axis=axis, out=out) + + +def _searchsorted_dispatcher(a, v, side=None, sorter=None): + return (a, v, sorter) + + +@array_function_dispatch(_searchsorted_dispatcher) +def searchsorted(a, v, side='left', sorter=None): + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `a` such that, if the + corresponding elements in `v` were inserted before the indices, the + order of `a` would be preserved. + + Assuming that `a` is sorted: + + ====== ============================ + `side` returned index `i` satisfies + ====== ============================ + left ``a[i-1] < v <= a[i]`` + right ``a[i-1] <= v < a[i]`` + ====== ============================ + + Parameters + ---------- + a : 1-D array_like + Input array. If `sorter` is None, then it must be sorted in + ascending order, otherwise `sorter` must be an array of indices + that sort it. + v : array_like + Values to insert into `a`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `a`). + sorter : 1-D array_like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + .. versionadded:: 1.7.0 + + Returns + ------- + indices : array of ints + Array of insertion points with the same shape as `v`. + + See Also + -------- + sort : Return a sorted copy of an array. + histogram : Produce histogram from 1-D data. + + Notes + ----- + Binary search is used to find the required insertion points. + + As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing + `nan` values. The enhanced sort order is documented in `sort`. + + This function uses the same algorithm as the builtin python `bisect.bisect_left` + (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions, + which is also vectorized in the `v` argument. + + Examples + -------- + >>> np.searchsorted([1,2,3,4,5], 3) + 2 + >>> np.searchsorted([1,2,3,4,5], 3, side='right') + 3 + >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]) + array([0, 5, 1, 2]) + + """ + return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) + + +def _resize_dispatcher(a, new_shape): + return (a,) + + +@array_function_dispatch(_resize_dispatcher) +def resize(a, new_shape): + """ + Return a new array with the specified shape. + + If the new array is larger than the original array, then the new + array is filled with repeated copies of `a`. Note that this behavior + is different from a.resize(new_shape) which fills with zeros instead + of repeated copies of `a`. + + Parameters + ---------- + a : array_like + Array to be resized. + + new_shape : int or tuple of int + Shape of resized array. + + Returns + ------- + reshaped_array : ndarray + The new array is formed from the data in the old array, repeated + if necessary to fill out the required number of elements. The + data are repeated iterating over the array in C-order. + + See Also + -------- + np.reshape : Reshape an array without changing the total size. + np.pad : Enlarge and pad an array. + np.repeat : Repeat elements of an array. + ndarray.resize : resize an array in-place. + + Notes + ----- + When the total size of the array does not change `~numpy.reshape` should + be used. In most other cases either indexing (to reduce the size) + or padding (to increase the size) may be a more appropriate solution. + + Warning: This functionality does **not** consider axes separately, + i.e. it does not apply interpolation/extrapolation. + It fills the return array with the required number of elements, iterating + over `a` in C-order, disregarding axes (and cycling back from the start if + the new shape is larger). This functionality is therefore not suitable to + resize images, or data where each axis represents a separate and distinct + entity. + + Examples + -------- + >>> a=np.array([[0,1],[2,3]]) + >>> np.resize(a,(2,3)) + array([[0, 1, 2], + [3, 0, 1]]) + >>> np.resize(a,(1,4)) + array([[0, 1, 2, 3]]) + >>> np.resize(a,(2,4)) + array([[0, 1, 2, 3], + [0, 1, 2, 3]]) + + """ + if isinstance(new_shape, (int, nt.integer)): + new_shape = (new_shape,) + + a = ravel(a) + + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError('all elements of `new_shape` must be non-negative') + + if a.size == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return np.zeros_like(a, shape=new_shape) + + repeats = -(-new_size // a.size) # ceil division + a = concatenate((a,) * repeats)[:new_size] + + return reshape(a, new_shape) + + +def _squeeze_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_squeeze_dispatcher) +def squeeze(a, axis=None): + """ + Remove axes of length one from `a`. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + .. versionadded:: 1.7.0 + + Selects a subset of the entries of length one in the + shape. If an axis is selected with shape entry greater than + one, an error is raised. + + Returns + ------- + squeezed : ndarray + The input array, but with all or a subset of the + dimensions of length 1 removed. This is always `a` itself + or a view into `a`. Note that if all axes are squeezed, + the result is a 0d array and not a scalar. + + Raises + ------ + ValueError + If `axis` is not None, and an axis being squeezed is not of length 1 + + See Also + -------- + expand_dims : The inverse operation, adding entries of length one + reshape : Insert, remove, and combine dimensions, and resize existing ones + + Examples + -------- + >>> x = np.array([[[0], [1], [2]]]) + >>> x.shape + (1, 3, 1) + >>> np.squeeze(x).shape + (3,) + >>> np.squeeze(x, axis=0).shape + (3, 1) + >>> np.squeeze(x, axis=1).shape + Traceback (most recent call last): + ... + ValueError: cannot select an axis to squeeze out which has size not equal to one + >>> np.squeeze(x, axis=2).shape + (1, 3) + >>> x = np.array([[1234]]) + >>> x.shape + (1, 1) + >>> np.squeeze(x) + array(1234) # 0d array + >>> np.squeeze(x).shape + () + >>> np.squeeze(x)[()] + 1234 + + """ + try: + squeeze = a.squeeze + except AttributeError: + return _wrapit(a, 'squeeze', axis=axis) + if axis is None: + return squeeze() + else: + return squeeze(axis=axis) + + +def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None): + return (a,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(a, offset=0, axis1=0, axis2=1): + """ + Return specified diagonals. + + If `a` is 2-D, returns the diagonal of `a` with the given offset, + i.e., the collection of elements of the form ``a[i, i+offset]``. If + `a` has more than two dimensions, then the axes specified by `axis1` + and `axis2` are used to determine the 2-D sub-array whose diagonal is + returned. The shape of the resulting array can be determined by + removing `axis1` and `axis2` and appending an index to the right equal + to the size of the resulting diagonals. + + In versions of NumPy prior to 1.7, this function always returned a new, + independent array containing a copy of the values in the diagonal. + + In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, + but depending on this fact is deprecated. Writing to the resulting + array continues to work as it used to, but a FutureWarning is issued. + + Starting in NumPy 1.9 it returns a read-only view on the original array. + Attempting to write to the resulting array will produce an error. + + In some future release, it will return a read/write view and writing to + the returned array will alter your original array. The returned array + will have the same type as the input array. + + If you don't write to the array returned by this function, then you can + just ignore all of the above. + + If you depend on the current behavior, then we suggest copying the + returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead + of just ``np.diagonal(a)``. This will work with both past and future + versions of NumPy. + + Parameters + ---------- + a : array_like + Array from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be positive or + negative. Defaults to main diagonal (0). + axis1 : int, optional + Axis to be used as the first axis of the 2-D sub-arrays from which + the diagonals should be taken. Defaults to first axis (0). + axis2 : int, optional + Axis to be used as the second axis of the 2-D sub-arrays from + which the diagonals should be taken. Defaults to second axis (1). + + Returns + ------- + array_of_diagonals : ndarray + If `a` is 2-D, then a 1-D array containing the diagonal and of the + same type as `a` is returned unless `a` is a `matrix`, in which case + a 1-D array rather than a (2-D) `matrix` is returned in order to + maintain backward compatibility. + + If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` + are removed, and a new axis inserted at the end corresponding to the + diagonal. + + Raises + ------ + ValueError + If the dimension of `a` is less than 2. + + See Also + -------- + diag : MATLAB work-a-like for 1-D and 2-D arrays. + diagflat : Create diagonal arrays. + trace : Sum along diagonals. + + Examples + -------- + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> a.diagonal() + array([0, 3]) + >>> a.diagonal(1) + array([1]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2,2,2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> a.diagonal(0, # Main diagonals of two arrays created by skipping + ... 0, # across the outer(left)-most axis last and + ... 1) # the "middle" (row) axis first. + array([[0, 6], + [1, 7]]) + + The sub-arrays whose main diagonals we just obtained; note that each + corresponds to fixing the right-most (column) axis, and that the + diagonals are "packed" in rows. + + >>> a[:,:,0] # main diagonal is [0 6] + array([[0, 2], + [4, 6]]) + >>> a[:,:,1] # main diagonal is [1 7] + array([[1, 3], + [5, 7]]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.fliplr(a).diagonal() # Horizontal flip + array([2, 4, 6]) + >>> np.flipud(a).diagonal() # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + """ + if isinstance(a, np.matrix): + # Make diagonal of matrix 1-D to preserve backward compatibility. + return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + else: + return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + + +def _trace_dispatcher( + a, offset=None, axis1=None, axis2=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_trace_dispatcher) +def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + Return the sum along diagonals of the array. + + If `a` is 2-D, the sum along its diagonal with the given offset + is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. + + If `a` has more than two dimensions, then the axes specified by axis1 and + axis2 are used to determine the 2-D sub-arrays whose traces are returned. + The shape of the resulting array is the same as that of `a` with `axis1` + and `axis2` removed. + + Parameters + ---------- + a : array_like + Input array, from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be both positive + and negative. Defaults to 0. + axis1, axis2 : int, optional + Axes to be used as the first and second axis of the 2-D sub-arrays + from which the diagonals should be taken. Defaults are the first two + axes of `a`. + dtype : dtype, optional + Determines the data-type of the returned array and of the accumulator + where the elements are summed. If dtype has the value None and `a` is + of integer type of precision less than the default integer + precision, then the default integer precision is used. Otherwise, + the precision is the same as that of `a`. + out : ndarray, optional + Array into which the output is placed. Its type is preserved and + it must be of the right shape to hold the output. + + Returns + ------- + sum_along_diagonals : ndarray + If `a` is 2-D, the sum along the diagonal is returned. If `a` has + larger dimensions, then an array of sums along diagonals is returned. + + See Also + -------- + diag, diagonal, diagflat + + Examples + -------- + >>> np.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2,2,2)) + >>> np.trace(a) + array([6, 8]) + + >>> a = np.arange(24).reshape((2,2,2,3)) + >>> np.trace(a).shape + (2, 3) + + """ + if isinstance(a, np.matrix): + # Get trace of matrix via an array to preserve backward compatibility. + return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) + else: + return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) + + +def _ravel_dispatcher(a, order=None): + return (a,) + + +@array_function_dispatch(_ravel_dispatcher) +def ravel(a, order='C'): + """Return a contiguous flattened array. + + A 1-D array, containing the elements of the input, is returned. A copy is + made only if needed. + + As of NumPy 1.10, the returned array will have the same type as the input + array. (for example, a masked array will be returned for a masked array + input) + + Parameters + ---------- + a : array_like + Input array. The elements in `a` are read in the order specified by + `order`, and packed as a 1-D array. + order : {'C','F', 'A', 'K'}, optional + + The elements of `a` are read using this index order. 'C' means + to index the elements in row-major, C-style order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to index the elements + in column-major, Fortran-style order, with the + first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of + the memory layout of the underlying array, and only refer to + the order of axis indexing. 'A' means to read the elements in + Fortran-like index order if `a` is Fortran *contiguous* in + memory, C-like order otherwise. 'K' means to read the + elements in the order they occur in memory, except for + reversing the data when strides are negative. By default, 'C' + index order is used. + + Returns + ------- + y : array_like + y is an array of the same subtype as `a`, with shape ``(a.size,)``. + Note that matrices are special cased for backward compatibility, if `a` + is a matrix, then y is a 1-D ndarray. + + See Also + -------- + ndarray.flat : 1-D iterator over an array. + ndarray.flatten : 1-D array copy of the elements of an array + in row-major order. + ndarray.reshape : Change the shape of an array without changing its data. + + Notes + ----- + In row-major, C-style order, in two dimensions, the row index + varies the slowest, and the column index the quickest. This can + be generalized to multiple dimensions, where row-major order + implies that the index along the first axis varies slowest, and + the index along the last quickest. The opposite holds for + column-major, Fortran-style index ordering. + + When a view is desired in as many cases as possible, ``arr.reshape(-1)`` + may be preferable. + + Examples + -------- + It is equivalent to ``reshape(-1, order=order)``. + + >>> x = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.ravel(x) + array([1, 2, 3, 4, 5, 6]) + + >>> x.reshape(-1) + array([1, 2, 3, 4, 5, 6]) + + >>> np.ravel(x, order='F') + array([1, 4, 2, 5, 3, 6]) + + When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: + + >>> np.ravel(x.T) + array([1, 4, 2, 5, 3, 6]) + >>> np.ravel(x.T, order='A') + array([1, 2, 3, 4, 5, 6]) + + When ``order`` is 'K', it will preserve orderings that are neither 'C' + nor 'F', but won't reverse axes: + + >>> a = np.arange(3)[::-1]; a + array([2, 1, 0]) + >>> a.ravel(order='C') + array([2, 1, 0]) + >>> a.ravel(order='K') + array([2, 1, 0]) + + >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a + array([[[ 0, 2, 4], + [ 1, 3, 5]], + [[ 6, 8, 10], + [ 7, 9, 11]]]) + >>> a.ravel(order='C') + array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) + >>> a.ravel(order='K') + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + """ + if isinstance(a, np.matrix): + return asarray(a).ravel(order=order) + else: + return asanyarray(a).ravel(order=order) + + +def _nonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_nonzero_dispatcher) +def nonzero(a): + """ + Return the indices of the elements that are non-zero. + + Returns a tuple of arrays, one for each dimension of `a`, + containing the indices of the non-zero elements in that + dimension. The values in `a` are always tested and returned in + row-major, C-style order. + + To group the indices by element, rather than dimension, use `argwhere`, + which returns a row for each non-zero element. + + .. note:: + + When called on a zero-d array or scalar, ``nonzero(a)`` is treated + as ``nonzero(atleast_1d(a))``. + + .. deprecated:: 1.17.0 + + Use `atleast_1d` explicitly if this behavior is deliberate. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Notes + ----- + While the nonzero values can be obtained with ``a[nonzero(a)]``, it is + recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + will correctly handle 0-d arrays. + + Examples + -------- + >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) + >>> x + array([[3, 0, 0], + [0, 4, 0], + [5, 6, 0]]) + >>> np.nonzero(x) + (array([0, 1, 2, 2]), array([0, 1, 0, 1])) + + >>> x[np.nonzero(x)] + array([3, 4, 5, 6]) + >>> np.transpose(np.nonzero(x)) + array([[0, 0], + [1, 1], + [2, 0], + [2, 1]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, np.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> a > 3 + array([[False, False, False], + [ True, True, True], + [ True, True, True]]) + >>> np.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + Using this result to index `a` is equivalent to using the mask directly: + + >>> a[np.nonzero(a > 3)] + array([4, 5, 6, 7, 8, 9]) + >>> a[a > 3] # prefer this spelling + array([4, 5, 6, 7, 8, 9]) + + ``nonzero`` can also be called as a method of the array. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return _wrapfunc(a, 'nonzero') + + +def _shape_dispatcher(a): + return (a,) + + +@array_function_dispatch(_shape_dispatcher) +def shape(a): + """ + Return the shape of an array. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + shape : tuple of ints + The elements of the shape tuple give the lengths of the + corresponding array dimensions. + + See Also + -------- + len + ndarray.shape : Equivalent array method. + + Examples + -------- + >>> np.shape(np.eye(3)) + (3, 3) + >>> np.shape([[1, 2]]) + (1, 2) + >>> np.shape([0]) + (1,) + >>> np.shape(0) + () + + >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + >>> np.shape(a) + (2,) + >>> a.shape + (2,) + + """ + try: + result = a.shape + except AttributeError: + result = asarray(a).shape + return result + + +def _compress_dispatcher(condition, a, axis=None, out=None): + return (condition, a, out) + + +@array_function_dispatch(_compress_dispatcher) +def compress(condition, a, axis=None, out=None): + """ + Return selected slices of an array along given axis. + + When working along a given axis, a slice along that axis is returned in + `output` for each index where `condition` evaluates to True. When + working on a 1-D array, `compress` is equivalent to `extract`. + + Parameters + ---------- + condition : 1-D array of bools + Array that selects which entries to return. If len(condition) + is less than the size of `a` along the given axis, then output is + truncated to the length of the condition array. + a : array_like + Array from which to extract a part. + axis : int, optional + Axis along which to take slices. If None (default), work on the + flattened array. + out : ndarray, optional + Output array. Its type is preserved and it must be of the right + shape to hold the output. + + Returns + ------- + compressed_array : ndarray + A copy of `a` without the slices along axis for which `condition` + is false. + + See Also + -------- + take, choose, diag, diagonal, select + ndarray.compress : Equivalent method in ndarray + extract : Equivalent method when working on 1-D arrays + :ref:`ufuncs-output-type` + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4], [5, 6]]) + >>> a + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.compress([0, 1], a, axis=0) + array([[3, 4]]) + >>> np.compress([False, True, True], a, axis=0) + array([[3, 4], + [5, 6]]) + >>> np.compress([False, True], a, axis=1) + array([[2], + [4], + [6]]) + + Working on the flattened array does not return slices along an axis but + selects elements. + + >>> np.compress([False, True], a) + array([2]) + + """ + return _wrapfunc(a, 'compress', condition, axis=axis, out=out) + + +def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs): + return (a, a_min, a_max) + + +@array_function_dispatch(_clip_dispatcher) +def clip(a, a_min, a_max, out=None, **kwargs): + """ + Clip (limit) the values in an array. + + Given an interval, values outside the interval are clipped to + the interval edges. For example, if an interval of ``[0, 1]`` + is specified, values smaller than 0 become 0, and values larger + than 1 become 1. + + Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``. + + No check is performed to ensure ``a_min < a_max``. + + Parameters + ---------- + a : array_like + Array containing elements to clip. + a_min, a_max : array_like or None + Minimum and maximum value. If ``None``, clipping is not performed on + the corresponding edge. Only one of `a_min` and `a_max` may be + ``None``. Both are broadcast against `a`. + out : ndarray, optional + The results will be placed in this array. It may be the input + array for in-place clipping. `out` must be of the right shape + to hold the output. Its type is preserved. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + .. versionadded:: 1.17.0 + + Returns + ------- + clipped_array : ndarray + An array with the elements of `a`, but where values + < `a_min` are replaced with `a_min`, and those > `a_max` + with `a_max`. + + See Also + -------- + :ref:`ufuncs-output-type` + + Notes + ----- + When `a_min` is greater than `a_max`, `clip` returns an + array in which all values are equal to `a_max`, + as shown in the second example. + + Examples + -------- + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> np.clip(a, 8, 1) + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + >>> np.clip(a, 3, 6, out=a) + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) + array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) + + """ + return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) + + +def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_sum_dispatcher) +def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Sum of array elements over a given axis. + + Parameters + ---------- + a : array_like + Elements to sum. + axis : None or int or tuple of ints, optional + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, a sum is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + dtype : dtype, optional + The type of the returned array and of the accumulator in which the + elements are summed. The dtype of `a` is used by default unless `a` + has an integer dtype of less precision than the default platform + integer. In that case, if `a` is signed then the platform integer + is used while if `a` is unsigned then an unsigned integer of the + same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.17.0 + + Returns + ------- + sum_along_axis : ndarray + An array with the same shape as `a`, with the specified + axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar + is returned. If an output array is specified, a reference to + `out` is returned. + + See Also + -------- + ndarray.sum : Equivalent method. + + add.reduce : Equivalent functionality of `add`. + + cumsum : Cumulative sum of array elements. + + trapz : Integration of array values using the composite trapezoidal rule. + + mean, average + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + The sum of an empty array is the neutral element 0: + + >>> np.sum([]) + 0.0 + + For floating point numbers the numerical precision of sum (and + ``np.add.reduce``) is in general limited by directly adding each number + individually to the result causing rounding errors in every step. + However, often numpy will use a numerically better approach (partial + pairwise summation) leading to improved precision in many use-cases. + This improved precision is always provided when no ``axis`` is given. + When ``axis`` is given, it will depend on which axis is summed. + Technically, to provide the best speed possible, the improved precision + is only used when the summation is along the fast axis in memory. + Note that the exact precision may vary depending on other parameters. + In contrast to NumPy, Python's ``math.fsum`` function uses a slower but + more precise approach to summation. + Especially when summing a large number of lower precision floating point + numbers, such as ``float32``, numerical errors can become significant. + In such cases it can be advisable to use `dtype="float64"` to use a higher + precision for the output. + + Examples + -------- + >>> np.sum([0.5, 1.5]) + 2.0 + >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) + 1 + >>> np.sum([[0, 1], [0, 5]]) + 6 + >>> np.sum([[0, 1], [0, 5]], axis=0) + array([0, 6]) + >>> np.sum([[0, 1], [0, 5]], axis=1) + array([1, 5]) + >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1) + array([1., 5.]) + + If the accumulator is too small, overflow occurs: + + >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) + -128 + + You can also start the sum with a value other than zero: + + >>> np.sum([10], initial=5) + 15 + """ + if isinstance(a, _gentype): + # 2018-02-25, 1.15.0 + warnings.warn( + "Calling np.sum(generator) is deprecated, and in the future will give a different result. " + "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.", + DeprecationWarning, stacklevel=3) + + res = _sum_(a) + if out is not None: + out[...] = res + return out + return res + + return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, + initial=initial, where=where) + + +def _any_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=np._NoValue): + return (a, where, out) + + +@array_function_dispatch(_any_dispatcher) +def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether any array element along a given axis evaluates to True. + + Returns single boolean unless `axis` is not ``None`` + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical OR reduction is performed. + The default (``axis=None``) is to perform a logical OR over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output and its type is preserved + (e.g., if it is of type float, then it will remain so, returning + 1.0 for True and 0.0 for False, regardless of the type of `a`). + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `any` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for any `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + any : bool or ndarray + A new boolean or `ndarray` is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.any : equivalent method + + all : Test whether all elements along a given axis evaluate to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity evaluate + to `True` because these are not equal to zero. + + Examples + -------- + >>> np.any([[True, False], [True, True]]) + True + + >>> np.any([[True, False], [False, False]], axis=0) + array([ True, False]) + + >>> np.any([-1, 0, 5]) + True + + >>> np.any(np.nan) + True + + >>> np.any([[True, False], [False, False]], where=[[False], [True]]) + False + + >>> o=np.array(False) + >>> z=np.any([-1, 4, 5], out=o) + >>> z, o + (array(True), array(True)) + >>> # Check now that z is a reference to o + >>> z is o + True + >>> id(z), id(o) # identity of z and o # doctest: +SKIP + (191614240, 191614240) + + """ + return _wrapreduction(a, np.logical_or, 'any', axis, None, out, + keepdims=keepdims, where=where) + + +def _all_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_all_dispatcher) +def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether all array elements along a given axis evaluate to True. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical AND reduction is performed. + The default (``axis=None``) is to perform a logical AND over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. + It must have the same shape as the expected output and its + type is preserved (e.g., if ``dtype(out)`` is float, the result + will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more + details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `all` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for all `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + all : ndarray, bool + A new boolean or array is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.all : equivalent method + + any : Test whether any element along a given axis evaluates to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to `True` because these are not equal to zero. + + Examples + -------- + >>> np.all([[True,False],[True,True]]) + False + + >>> np.all([[True,False],[True,True]], axis=0) + array([ True, False]) + + >>> np.all([-1, 4, 5]) + True + + >>> np.all([1.0, np.nan]) + True + + >>> np.all([[True, True], [False, True]], where=[[True], [False]]) + True + + >>> o=np.array(False) + >>> z=np.all([-1, 4, 5], out=o) + >>> id(z), id(o), z + (28293632, 28293632, array(True)) # may vary + + """ + return _wrapreduction(a, np.logical_and, 'all', axis, None, out, + keepdims=keepdims, where=where) + + +def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumsum_dispatcher) +def cumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` for + more details. + + Returns + ------- + cumsum_along_axis : ndarray. + A new array holding the result is returned unless `out` is + specified, in which case a reference to `out` is returned. The + result has the same size as `a`, and the same shape as `a` if + `axis` is not None or `a` is a 1-d array. + + See Also + -------- + sum : Sum array elements. + trapz : Integration of array values using the composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point + values since ``sum`` may use a pairwise summation routine, reducing + the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.cumsum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumsum(b)[-1]`` may not be equal to ``sum(b)`` + + >>> b = np.array([1, 2e-9, 3e-9] * 1000000) + >>> b.cumsum()[-1] + 1000000.0050045159 + >>> b.sum() + 1000000.0050000029 + + """ + return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out) + + +def _ptp_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_ptp_dispatcher) +def ptp(a, axis=None, out=None, keepdims=np._NoValue): + """ + Range of values (maximum - minimum) along an axis. + + The name of the function comes from the acronym for 'peak to peak'. + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + a : array_like + Input values. + axis : None or int or tuple of ints, optional + Axis along which to find the peaks. By default, flatten the + array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.15.0 + + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : array_like + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type of the output values will be cast if necessary. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `ptp` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + ptp : ndarray + A new array holding the result, unless `out` was + specified, in which case a reference to `out` is returned. + + Examples + -------- + >>> x = np.array([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> np.ptp(x, axis=1) + array([8, 6]) + + >>> np.ptp(x, axis=0) + array([2, 0, 5, 2]) + + >>> np.ptp(x) + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.array([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> np.ptp(y, axis=1) + array([ 126, 127, -128, -127], dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> np.ptp(y, axis=1).view(np.uint8) + array([126, 127, 128, 129], dtype=uint8) + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is not mu.ndarray: + try: + ptp = a.ptp + except AttributeError: + pass + else: + return ptp(axis=axis, out=out, **kwargs) + return _methods._ptp(a, axis=axis, out=out, **kwargs) + + +def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_amax_dispatcher) +def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, the maximum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `amax` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.17.0 + + Returns + ------- + amax : ndarray or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is an array of dimension + ``a.ndim - 1``. + + See Also + -------- + amin : + The minimum value of an array along a given axis, propagating any NaNs. + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + argmax : + Return the indices of the maximum values. + + nanmin, minimum, fmin + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding max value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmax. + + Don't use `amax` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than + ``amax(a, axis=0)``. + + Examples + -------- + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.amax(a) # Maximum of the flattened array + 3 + >>> np.amax(a, axis=0) # Maxima along the first axis + array([2, 3]) + >>> np.amax(a, axis=1) # Maxima along the second axis + array([1, 3]) + >>> np.amax(a, where=[False, True], initial=-1, axis=0) + array([-1, 3]) + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> np.amax(b) + nan + >>> np.amax(b, where=~np.isnan(b), initial=-1) + 4.0 + >>> np.nanmax(b) + 4.0 + + You can use an initial value to compute the maximum of an empty slice, or + to initialize it to a different value: + + >>> np.max([[-50], [10]], axis=-1, initial=0) + array([ 0, 10]) + + Notice that the initial value is used as one of the elements for which the + maximum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + >>> np.max([5], initial=6) + 6 + >>> max([5], default=6) + 5 + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_amin_dispatcher) +def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, the minimum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `amin` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.17.0 + + Returns + ------- + amin : ndarray or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is an array of dimension + ``a.ndim - 1``. + + See Also + -------- + amax : + The maximum value of an array along a given axis, propagating any NaNs. + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + argmin : + Return the indices of the minimum values. + + nanmax, maximum, fmax + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding min value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmin. + + Don't use `amin` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than + ``amin(a, axis=0)``. + + Examples + -------- + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.amin(a) # Minimum of the flattened array + 0 + >>> np.amin(a, axis=0) # Minima along the first axis + array([0, 1]) + >>> np.amin(a, axis=1) # Minima along the second axis + array([0, 2]) + >>> np.amin(a, where=[False, True], initial=10, axis=0) + array([10, 1]) + + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> np.amin(b) + nan + >>> np.amin(b, where=~np.isnan(b), initial=10) + 0.0 + >>> np.nanmin(b) + 0.0 + + >>> np.min([[-50], [10]], axis=-1, initial=0) + array([-50, 0]) + + Notice that the initial value is used as one of the elements for which the + minimum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + Notice that this isn't the same as Python's ``default`` argument. + + >>> np.min([6], initial=5) + 5 + >>> min([6], default=5) + 6 + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _alen_dispathcer(a): + return (a,) + + +@array_function_dispatch(_alen_dispathcer) +def alen(a): + """ + Return the length of the first dimension of the input array. + + .. deprecated:: 1.18 + `numpy.alen` is deprecated, use `len` instead. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + alen : int + Length of the first dimension of `a`. + + See Also + -------- + shape, size + + Examples + -------- + >>> a = np.zeros((7,4,5)) + >>> a.shape[0] + 7 + >>> np.alen(a) + 7 + + """ + # NumPy 1.18.0, 2019-08-02 + warnings.warn( + "`np.alen` is deprecated, use `len` instead", + DeprecationWarning, stacklevel=2) + try: + return len(a) + except TypeError: + return len(array(a, ndmin=1)) + + +def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_prod_dispatcher) +def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which a product is performed. The default, + axis=None, will calculate the product of all the elements in the + input array. If axis is negative it counts from the last to the + first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, a product is performed on all of the + axes specified in the tuple instead of a single axis or all the + axes as before. + dtype : dtype, optional + The type of the returned array, as well as of the accumulator in + which the elements are multiplied. The dtype of `a` is used by + default unless `a` has an integer dtype of less precision than the + default platform integer. In that case, if `a` is signed then the + platform integer is used while if `a` is unsigned then an unsigned + integer of the same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `prod` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.15.0 + + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.17.0 + + Returns + ------- + product_along_axis : ndarray, see `dtype` parameter above. + An array shaped as `a` but with the specified axis removed. + Returns a reference to `out` if specified. + + See Also + -------- + ndarray.prod : equivalent method + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. That means that, on a 32-bit platform: + + >>> x = np.array([536870910, 536870910, 536870910, 536870910]) + >>> np.prod(x) + 16 # may vary + + The product of an empty array is the neutral element 1: + + >>> np.prod([]) + 1.0 + + Examples + -------- + By default, calculate the product of all elements: + + >>> np.prod([1.,2.]) + 2.0 + + Even when the input array is two-dimensional: + + >>> np.prod([[1.,2.],[3.,4.]]) + 24.0 + + But we can also specify the axis over which to multiply: + + >>> np.prod([[1.,2.],[3.,4.]], axis=1) + array([ 2., 12.]) + + Or select specific elements to include: + + >>> np.prod([1., np.nan, 3.], where=[True, False, True]) + 3.0 + + If the type of `x` is unsigned, then the output type is + the unsigned platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.uint8) + >>> np.prod(x).dtype == np.uint + True + + If `x` is of a signed integer type, then the output type + is the default platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.int8) + >>> np.prod(x).dtype == int + True + + You can also start the product with a value other than one: + + >>> np.prod([1, 2], initial=5) + 10 + """ + return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, + keepdims=keepdims, initial=initial, where=where) + + +def _cumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumprod_dispatcher) +def cumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + cumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case a reference to out is returned. + + See Also + -------- + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1,2,3]) + >>> np.cumprod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumprod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of `a`: + + >>> np.cumprod(a, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of `a`: + + >>> np.cumprod(a,axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out) + + +def _ndim_dispatcher(a): + return (a,) + + +@array_function_dispatch(_ndim_dispatcher) +def ndim(a): + """ + Return the number of dimensions of an array. + + Parameters + ---------- + a : array_like + Input array. If it is not already an ndarray, a conversion is + attempted. + + Returns + ------- + number_of_dimensions : int + The number of dimensions in `a`. Scalars are zero-dimensional. + + See Also + -------- + ndarray.ndim : equivalent method + shape : dimensions of array + ndarray.shape : dimensions of array + + Examples + -------- + >>> np.ndim([[1,2,3],[4,5,6]]) + 2 + >>> np.ndim(np.array([[1,2,3],[4,5,6]])) + 2 + >>> np.ndim(1) + 0 + + """ + try: + return a.ndim + except AttributeError: + return asarray(a).ndim + + +def _size_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_size_dispatcher) +def size(a, axis=None): + """ + Return the number of elements along a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which the elements are counted. By default, give + the total number of elements. + + Returns + ------- + element_count : int + Number of elements along the specified axis. + + See Also + -------- + shape : dimensions of array + ndarray.shape : dimensions of array + ndarray.size : number of elements in array + + Examples + -------- + >>> a = np.array([[1,2,3],[4,5,6]]) + >>> np.size(a) + 6 + >>> np.size(a,1) + 3 + >>> np.size(a,0) + 2 + + """ + if axis is None: + try: + return a.size + except AttributeError: + return asarray(a).size + else: + try: + return a.shape[axis] + except AttributeError: + return asarray(a).shape[axis] + + +def _around_dispatcher(a, decimals=None, out=None): + return (a, out) + + +@array_function_dispatch(_around_dispatcher) +def around(a, decimals=0, out=None): + """ + Evenly round to the given number of decimals. + + Parameters + ---------- + a : array_like + Input data. + decimals : int, optional + Number of decimal places to round to (default: 0). If + decimals is negative, it specifies the number of positions to + the left of the decimal point. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. See :ref:`ufuncs-output-type` for more + details. + + Returns + ------- + rounded_array : ndarray + An array of the same type as `a`, containing the rounded values. + Unless `out` was specified, a new array is created. A reference to + the result is returned. + + The real and imaginary parts of complex numbers are rounded + separately. The result of rounding a float is a float. + + See Also + -------- + ndarray.round : equivalent method + + ceil, fix, floor, rint, trunc + + + Notes + ----- + For values exactly halfway between rounded decimal values, NumPy + rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, + -0.5 and 0.5 round to 0.0, etc. + + ``np.around`` uses a fast but sometimes inexact algorithm to round + floating-point datatypes. For positive `decimals` it is equivalent to + ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has + error due to the inexact representation of decimal fractions in the IEEE + floating point standard [1]_ and errors introduced when scaling by powers + of ten. For instance, note the extra "1" in the following: + + >>> np.round(56294995342131.5, 3) + 56294995342131.51 + + If your goal is to print such values with a fixed number of decimals, it is + preferable to use numpy's float printing routines to limit the number of + printed decimals: + + >>> np.format_float_positional(56294995342131.5, precision=3) + '56294995342131.5' + + The float printing routines use an accurate but much more computationally + demanding algorithm to compute the number of digits after the decimal + point. + + Alternatively, Python's builtin `round` function uses a more accurate + but slower algorithm for 64-bit floating point values: + + >>> round(56294995342131.5, 3) + 56294995342131.5 + >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997 + (16.06, 16.05) + + + References + ---------- + .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, + https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF + .. [2] "How Futile are Mindless Assessments of + Roundoff in Floating-Point Computation?", William Kahan, + https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf + + Examples + -------- + >>> np.around([0.37, 1.64]) + array([0., 2.]) + >>> np.around([0.37, 1.64], decimals=1) + array([0.4, 1.6]) + >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value + array([0., 2., 2., 4., 4.]) + >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned + array([ 1, 2, 3, 11]) + >>> np.around([1,2,3,11], decimals=-1) + array([ 0, 0, 0, 10]) + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_mean_dispatcher) +def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, + where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the means are computed. The default is to + compute the mean of the flattened array. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a mean is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for floating point inputs, it is the same as the + input dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `mean` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. + + See Also + -------- + average : Weighted average + std, var, nanmean, nanstd, nanvar + + Notes + ----- + The arithmetic mean is the sum of the elements along the axis divided + by the number of elements. + + Note that for floating-point input, the mean is computed using the + same precision the input has. Depending on the input data, this can + cause the results to be inaccurate, especially for `float32` (see + example below). Specifying a higher-precision accumulator using the + `dtype` keyword can alleviate this issue. + + By default, `float16` results are computed using `float32` intermediates + for extra precision. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.mean(a) + 2.5 + >>> np.mean(a, axis=0) + array([2., 3.]) + >>> np.mean(a, axis=1) + array([1.5, 3.5]) + + In single precision, `mean` can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.mean(a) + 0.54999924 + + Computing the mean in float64 is more accurate: + + >>> np.mean(a, dtype=np.float64) + 0.55000000074505806 # may vary + + Specifying a where argument: + >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]]) + >>> np.mean(a) + 12.0 + >>> np.mean(a, where=[[True], [False], [False]]) + 9.0 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if type(a) is not mu.ndarray: + try: + mean = a.mean + except AttributeError: + pass + else: + return mean(axis=axis, dtype=dtype, out=out, **kwargs) + + return _methods._mean(a, axis=axis, dtype=dtype, + out=out, **kwargs) + + +def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None): + return (a, where, out) + + +@array_function_dispatch(_std_dispatcher) +def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue): + """ + Compute the standard deviation along the specified axis. + + Returns the standard deviation, a measure of the spread of a distribution, + of the array elements. The standard deviation is computed for the + flattened array by default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of these values. + axis : None or int or tuple of ints, optional + Axis or axes along which the standard deviation is computed. The + default is to compute the standard deviation of the flattened array. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a standard deviation is performed over + multiple axes, instead of a single axis or all the axes as before. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it is + the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + ddof : int, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `std` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard deviation, + otherwise return a reference to the output array. + + See Also + -------- + var, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean, i.e., ``std = sqrt(mean(x))``, where + ``x = abs(a - a.mean())**2``. + + The average squared deviation is typically calculated as ``x.sum() / N``, + where ``N = len(x)``. If, however, `ddof` is specified, the divisor + ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1`` + provides an unbiased estimator of the variance of the infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables. The standard deviation computed in this + function is the square root of the estimated variance, so even with + ``ddof=1``, it will not be an unbiased estimate of the standard deviation + per se. + + Note that, for complex numbers, `std` takes the absolute + value before squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example below). + Specifying a higher-accuracy accumulator using the `dtype` keyword can + alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.std(a) + 1.1180339887498949 # may vary + >>> np.std(a, axis=0) + array([1., 1.]) + >>> np.std(a, axis=1) + array([0.5, 0.5]) + + In single precision, std() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.std(a) + 0.45000005 + + Computing the standard deviation in float64 is more accurate: + + >>> np.std(a, dtype=np.float64) + 0.44999999925494177 # may vary + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.std(a) + 2.614064523559687 # may vary + >>> np.std(a, where=[[True], [True], [False]]) + 2.0 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if type(a) is not mu.ndarray: + try: + std = a.std + except AttributeError: + pass + else: + return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) + + +def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None): + return (a, where, out) + + +@array_function_dispatch(_var_dispatcher) +def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue): + """ + Compute the variance along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + + .. versionadded:: 1.7.0 + + If this is a tuple of ints, a variance is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : int, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `var` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.20.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If ``out=None``, returns a new array containing the variance; + otherwise, a reference to the output array is returned. + + See Also + -------- + std, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``. + + The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.var(a) + 1.25 + >>> np.var(a, axis=0) + array([1., 1.]) + >>> np.var(a, axis=1) + array([0.25, 0.25]) + + In single precision, var() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.var(a) + 0.20250003 + + Computing the variance in float64 is more accurate: + + >>> np.var(a, dtype=np.float64) + 0.20249999932944759 # may vary + >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 + 0.2025 + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.var(a) + 6.833333333333333 # may vary + >>> np.var(a, where=[[True], [True], [False]]) + 4.0 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + + if type(a) is not mu.ndarray: + try: + var = a.var + + except AttributeError: + pass + else: + return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) + + +# Aliases of other functions. These have their own definitions only so that +# they can have unique docstrings. + +@array_function_dispatch(_around_dispatcher) +def round_(a, decimals=0, out=None): + """ + Round an array to the given number of decimals. + + See Also + -------- + around : equivalent function; see for details. + """ + return around(a, decimals=decimals, out=out) + + +@array_function_dispatch(_prod_dispatcher, verify=False) +def product(*args, **kwargs): + """ + Return the product of array elements over a given axis. + + See Also + -------- + prod : equivalent function; see for details. + """ + return prod(*args, **kwargs) + + +@array_function_dispatch(_cumprod_dispatcher, verify=False) +def cumproduct(*args, **kwargs): + """ + Return the cumulative product over the given axis. + + See Also + -------- + cumprod : equivalent function; see for details. + """ + return cumprod(*args, **kwargs) + + +@array_function_dispatch(_any_dispatcher, verify=False) +def sometrue(*args, **kwargs): + """ + Check whether some values are true. + + Refer to `any` for full documentation. + + See Also + -------- + any : equivalent function; see for details. + """ + return any(*args, **kwargs) + + +@array_function_dispatch(_all_dispatcher, verify=False) +def alltrue(*args, **kwargs): + """ + Check if all elements of input array are true. + + See Also + -------- + numpy.all : Equivalent function; see for details. + """ + return all(*args, **kwargs) diff --git a/MLPY/Lib/site-packages/numpy/core/fromnumeric.pyi b/MLPY/Lib/site-packages/numpy/core/fromnumeric.pyi new file mode 100644 index 0000000000000000000000000000000000000000..71f9ea2a1096ccff72482bdf39400f0256892618 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/fromnumeric.pyi @@ -0,0 +1,361 @@ +import sys +import datetime as dt +from typing import Optional, Union, Sequence, Tuple, Any, overload, TypeVar + +from numpy import ( + ndarray, + number, + integer, + intp, + bool_, + generic, + _OrderKACF, + _OrderACF, + _ModeKind, + _PartitionKind, + _SortKind, + _SortSide, +) +from numpy.typing import ( + DTypeLike, + ArrayLike, + _ShapeLike, + _Shape, + _ArrayLikeBool_co, + _ArrayLikeInt_co, + _NumberLike_co, +) + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +# Various annotations for scalars + +# While dt.datetime and dt.timedelta are not technically part of NumPy, +# they are one of the rare few builtin scalars which serve as valid return types. +# See https://github.com/numpy/numpy-stubs/pull/67#discussion_r412604113. +_ScalarNumpy = Union[generic, dt.datetime, dt.timedelta] +_ScalarBuiltin = Union[str, bytes, dt.date, dt.timedelta, bool, int, float, complex] +_Scalar = Union[_ScalarBuiltin, _ScalarNumpy] + +# Integers and booleans can generally be used interchangeably +_ScalarGeneric = TypeVar("_ScalarGeneric", bound=generic) + +_Number = TypeVar("_Number", bound=number) + +# The signature of take() follows a common theme with its overloads: +# 1. A generic comes in; the same generic comes out +# 2. A scalar comes in; a generic comes out +# 3. An array-like object comes in; some keyword ensures that a generic comes out +# 4. An array-like object comes in; an ndarray or generic comes out +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: Optional[int] = ..., + out: Optional[ndarray] = ..., + mode: _ModeKind = ..., +) -> Any: ... + +def reshape( + a: ArrayLike, + newshape: _ShapeLike, + order: _OrderACF = ..., +) -> ndarray: ... + +def choose( + a: _ArrayLikeInt_co, + choices: ArrayLike, + out: Optional[ndarray] = ..., + mode: _ModeKind = ..., +) -> Any: ... + +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: Optional[int] = ..., +) -> ndarray: ... + +def put( + a: ndarray, + ind: _ArrayLikeInt_co, + v: ArrayLike, + mode: _ModeKind = ..., +) -> None: ... + +def swapaxes( + a: ArrayLike, + axis1: int, + axis2: int, +) -> ndarray: ... + +def transpose( + a: ArrayLike, + axes: Union[None, Sequence[int], ndarray] = ... +) -> ndarray: ... + +def partition( + a: ArrayLike, + kth: _ArrayLikeInt_co, + axis: Optional[int] = ..., + kind: _PartitionKind = ..., + order: Union[None, str, Sequence[str]] = ..., +) -> ndarray: ... + +def argpartition( + a: ArrayLike, + kth: _ArrayLikeInt_co, + axis: Optional[int] = ..., + kind: _PartitionKind = ..., + order: Union[None, str, Sequence[str]] = ..., +) -> Any: ... + +def sort( + a: ArrayLike, + axis: Optional[int] = ..., + kind: Optional[_SortKind] = ..., + order: Union[None, str, Sequence[str]] = ..., +) -> ndarray: ... + +def argsort( + a: ArrayLike, + axis: Optional[int] = ..., + kind: Optional[_SortKind] = ..., + order: Union[None, str, Sequence[str]] = ..., +) -> ndarray: ... + +@overload +def argmax( + a: ArrayLike, + axis: None = ..., + out: Optional[ndarray] = ..., +) -> intp: ... +@overload +def argmax( + a: ArrayLike, + axis: Optional[int] = ..., + out: Optional[ndarray] = ..., +) -> Any: ... + +@overload +def argmin( + a: ArrayLike, + axis: None = ..., + out: Optional[ndarray] = ..., +) -> intp: ... +@overload +def argmin( + a: ArrayLike, + axis: Optional[int] = ..., + out: Optional[ndarray] = ..., +) -> Any: ... + +@overload +def searchsorted( + a: ArrayLike, + v: _Scalar, + side: _SortSide = ..., + sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array +) -> intp: ... +@overload +def searchsorted( + a: ArrayLike, + v: ArrayLike, + side: _SortSide = ..., + sorter: Optional[_ArrayLikeInt_co] = ..., # 1D int array +) -> ndarray: ... + +def resize( + a: ArrayLike, + new_shape: _ShapeLike, +) -> ndarray: ... + +@overload +def squeeze( + a: _ScalarGeneric, + axis: Optional[_ShapeLike] = ..., +) -> _ScalarGeneric: ... +@overload +def squeeze( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., +) -> ndarray: ... + +def diagonal( + a: ArrayLike, + offset: int = ..., + axis1: int = ..., + axis2: int = ..., # >= 2D array +) -> ndarray: ... + +def trace( + a: ArrayLike, # >= 2D array + offset: int = ..., + axis1: int = ..., + axis2: int = ..., + dtype: DTypeLike = ..., + out: Optional[ndarray] = ..., +) -> Any: ... + +def ravel(a: ArrayLike, order: _OrderKACF = ...) -> ndarray: ... + +def nonzero(a: ArrayLike) -> Tuple[ndarray, ...]: ... + +def shape(a: ArrayLike) -> _Shape: ... + +def compress( + condition: ArrayLike, # 1D bool array + a: ArrayLike, + axis: Optional[int] = ..., + out: Optional[ndarray] = ..., +) -> ndarray: ... + +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike, + a_max: Optional[ArrayLike], + out: Optional[ndarray] = ..., + **kwargs: Any, +) -> Any: ... +@overload +def clip( + a: ArrayLike, + a_min: None, + a_max: ArrayLike, + out: Optional[ndarray] = ..., + **kwargs: Any, +) -> Any: ... + +def sum( + a: ArrayLike, + axis: _ShapeLike = ..., + dtype: DTypeLike = ..., + out: Optional[ndarray] = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... + +@overload +def all( + a: ArrayLike, + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., +) -> bool_: ... +@overload +def all( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., + out: Optional[ndarray] = ..., + keepdims: bool = ..., +) -> Any: ... + +@overload +def any( + a: ArrayLike, + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., +) -> bool_: ... +@overload +def any( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., + out: Optional[ndarray] = ..., + keepdims: bool = ..., +) -> Any: ... + +def cumsum( + a: ArrayLike, + axis: Optional[int] = ..., + dtype: DTypeLike = ..., + out: Optional[ndarray] = ..., +) -> ndarray: ... + +def ptp( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., + out: Optional[ndarray] = ..., + keepdims: bool = ..., +) -> Any: ... + +def amax( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., + out: Optional[ndarray] = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... + +def amin( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., + out: Optional[ndarray] = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... + +# TODO: `np.prod()``: For object arrays `initial` does not necessarily +# have to be a numerical scalar. +# The only requirement is that it is compatible +# with the `.__mul__()` method(s) of the passed array's elements. + +# Note that the same situation holds for all wrappers around +# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +def prod( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: Optional[ndarray] = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... + +def cumprod( + a: ArrayLike, + axis: Optional[int] = ..., + dtype: DTypeLike = ..., + out: Optional[ndarray] = ..., +) -> ndarray: ... + +def ndim(a: ArrayLike) -> int: ... + +def size(a: ArrayLike, axis: Optional[int] = ...) -> int: ... + +def around( + a: ArrayLike, + decimals: int = ..., + out: Optional[ndarray] = ..., +) -> Any: ... + +def mean( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: Optional[ndarray] = ..., + keepdims: bool = ..., +) -> Any: ... + +def std( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: Optional[ndarray] = ..., + ddof: int = ..., + keepdims: bool = ..., +) -> Any: ... + +def var( + a: ArrayLike, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: Optional[ndarray] = ..., + ddof: int = ..., + keepdims: bool = ..., +) -> Any: ... diff --git a/MLPY/Lib/site-packages/numpy/core/function_base.py b/MLPY/Lib/site-packages/numpy/core/function_base.py new file mode 100644 index 0000000000000000000000000000000000000000..81a59805454e42125a4f3c6a6322f3b0d8bb6811 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/function_base.py @@ -0,0 +1,529 @@ +import functools +import warnings +import operator +import types + +from . import numeric as _nx +from .numeric import result_type, NaN, asanyarray, ndim +from numpy.core.multiarray import add_docstring +from numpy.core import overrides + +__all__ = ['logspace', 'linspace', 'geomspace'] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, + dtype=None, axis=None): + return (start, stop) + + +@array_function_dispatch(_linspace_dispatcher) +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, + axis=0): + """ + Return evenly spaced numbers over a specified interval. + + Returns `num` evenly spaced samples, calculated over the + interval [`start`, `stop`]. + + The endpoint of the interval can optionally be excluded. + + .. versionchanged:: 1.16.0 + Non-scalar `start` and `stop` are now supported. + + .. versionchanged:: 1.20.0 + Values are rounded towards ``-inf`` instead of ``0`` when an + integer ``dtype`` is specified. The old behavior can + still be obtained with ``np.linspace(start, stop, num).astype(int)`` + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The end value of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced samples, so that `stop` is excluded. Note that the step + size changes when `endpoint` is False. + num : int, optional + Number of samples to generate. Default is 50. Must be non-negative. + endpoint : bool, optional + If True, `stop` is the last sample. Otherwise, it is not included. + Default is True. + retstep : bool, optional + If True, return (`samples`, `step`), where `step` is the spacing + between samples. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + + .. versionadded:: 1.9.0 + + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + .. versionadded:: 1.16.0 + + Returns + ------- + samples : ndarray + There are `num` equally spaced samples in the closed interval + ``[start, stop]`` or the half-open interval ``[start, stop)`` + (depending on whether `endpoint` is True or False). + step : float, optional + Only returned if `retstep` is True + + Size of spacing between samples. + + + See Also + -------- + arange : Similar to `linspace`, but uses a step size (instead of the + number of samples). + geomspace : Similar to `linspace`, but with numbers spaced evenly on a log + scale (a geometric progression). + logspace : Similar to `geomspace`, but with the end points specified as + logarithms. + + Examples + -------- + >>> np.linspace(2.0, 3.0, num=5) + array([2. , 2.25, 2.5 , 2.75, 3. ]) + >>> np.linspace(2.0, 3.0, num=5, endpoint=False) + array([2. , 2.2, 2.4, 2.6, 2.8]) + >>> np.linspace(2.0, 3.0, num=5, retstep=True) + (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 8 + >>> y = np.zeros(N) + >>> x1 = np.linspace(0, 10, N, endpoint=True) + >>> x2 = np.linspace(0, 10, N, endpoint=False) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + num = operator.index(num) + if num < 0: + raise ValueError("Number of samples, %s, must be non-negative." % num) + div = (num - 1) if endpoint else num + + # Convert float/complex array scalars to float, gh-3504 + # and make sure one can use variables that have an __array_interface__, gh-6634 + start = asanyarray(start) * 1.0 + stop = asanyarray(stop) * 1.0 + + dt = result_type(start, stop, float(num)) + if dtype is None: + dtype = dt + + delta = stop - start + y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta)) + # In-place multiplication y *= delta/div is faster, but prevents the multiplicant + # from overriding what class is produced, and thus prevents, e.g. use of Quantities, + # see gh-7142. Hence, we multiply in place only for standard scalar types. + _mult_inplace = _nx.isscalar(delta) + if div > 0: + step = delta / div + if _nx.any(step == 0): + # Special handling for denormal numbers, gh-5437 + y /= div + if _mult_inplace: + y *= delta + else: + y = y * delta + else: + if _mult_inplace: + y *= step + else: + y = y * step + else: + # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) + # have an undefined step + step = NaN + # Multiply with delta to allow possible override of output class. + y = y * delta + + y += start + + if endpoint and num > 1: + y[-1] = stop + + if axis != 0: + y = _nx.moveaxis(y, 0, axis) + + if _nx.issubdtype(dtype, _nx.integer): + _nx.floor(y, out=y) + + if retstep: + return y.astype(dtype, copy=False), step + else: + return y.astype(dtype, copy=False) + + +def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, + dtype=None, axis=None): + return (start, stop) + + +@array_function_dispatch(_logspace_dispatcher) +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, + axis=0): + """ + Return numbers spaced evenly on a log scale. + + In linear space, the sequence starts at ``base ** start`` + (`base` to the power of `start`) and ends with ``base ** stop`` + (see `endpoint` below). + + .. versionchanged:: 1.16.0 + Non-scalar `start` and `stop` are now supported. + + Parameters + ---------- + start : array_like + ``base ** start`` is the starting value of the sequence. + stop : array_like + ``base ** stop`` is the final value of the sequence, unless `endpoint` + is False. In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + base : array_like, optional + The base of the log space. The step size between the elements in + ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. + Default is 10.0. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred type will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + .. versionadded:: 1.16.0 + + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + arange : Similar to linspace, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the + endpoint may or may not be included. + linspace : Similar to logspace, but with the samples uniformly distributed + in linear space, instead of log space. + geomspace : Similar to logspace, but with endpoints specified directly. + + Notes + ----- + Logspace is equivalent to the code + + >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) + ... # doctest: +SKIP + >>> power(base, y).astype(dtype) + ... # doctest: +SKIP + + Examples + -------- + >>> np.logspace(2.0, 3.0, num=4) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) + >>> np.logspace(2.0, 3.0, num=4, endpoint=False) + array([100. , 177.827941 , 316.22776602, 562.34132519]) + >>> np.logspace(2.0, 3.0, num=4, base=2.0) + array([4. , 5.0396842 , 6.34960421, 8. ]) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> x1 = np.logspace(0.1, 1, N, endpoint=True) + >>> x2 = np.logspace(0.1, 1, N, endpoint=False) + >>> y = np.zeros(N) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) + if dtype is None: + return _nx.power(base, y) + return _nx.power(base, y).astype(dtype, copy=False) + + +def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None, + axis=None): + return (start, stop) + + +@array_function_dispatch(_geomspace_dispatcher) +def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): + """ + Return numbers spaced evenly on a log scale (a geometric progression). + + This is similar to `logspace`, but with endpoints specified directly. + Each output sample is a constant multiple of the previous. + + .. versionchanged:: 1.16.0 + Non-scalar `start` and `stop` are now supported. + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The final value of the sequence, unless `endpoint` is False. + In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + .. versionadded:: 1.16.0 + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + logspace : Similar to geomspace, but with endpoints specified using log + and base. + linspace : Similar to geomspace, but with arithmetic instead of geometric + progression. + arange : Similar to linspace, with the step size specified instead of the + number of samples. + + Notes + ----- + If the inputs or dtype are complex, the output will follow a logarithmic + spiral in the complex plane. (There are an infinite number of spirals + passing through two points; the output will follow the shortest such path.) + + Examples + -------- + >>> np.geomspace(1, 1000, num=4) + array([ 1., 10., 100., 1000.]) + >>> np.geomspace(1, 1000, num=3, endpoint=False) + array([ 1., 10., 100.]) + >>> np.geomspace(1, 1000, num=4, endpoint=False) + array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) + >>> np.geomspace(1, 256, num=9) + array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) + + Note that the above may not produce exact integers: + + >>> np.geomspace(1, 256, num=9, dtype=int) + array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) + >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) + + Negative, decreasing, and complex inputs are allowed: + + >>> np.geomspace(1000, 1, num=4) + array([1000., 100., 10., 1.]) + >>> np.geomspace(-1000, -1, num=4) + array([-1000., -100., -10., -1.]) + >>> np.geomspace(1j, 1000j, num=4) # Straight line + array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) + >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle + array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, + 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, + 1.00000000e+00+0.00000000e+00j]) + + Graphical illustration of `endpoint` parameter: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> y = np.zeros(N) + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') + [] + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') + [] + >>> plt.axis([0.5, 2000, 0, 3]) + [0.5, 2000, 0, 3] + >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') + >>> plt.show() + + """ + start = asanyarray(start) + stop = asanyarray(stop) + if _nx.any(start == 0) or _nx.any(stop == 0): + raise ValueError('Geometric sequence cannot include zero') + + dt = result_type(start, stop, float(num), _nx.zeros((), dtype)) + if dtype is None: + dtype = dt + else: + # complex to dtype('complex128'), for instance + dtype = _nx.dtype(dtype) + + # Promote both arguments to the same dtype in case, for instance, one is + # complex and another is negative and log would produce NaN otherwise. + # Copy since we may change things in-place further down. + start = start.astype(dt, copy=True) + stop = stop.astype(dt, copy=True) + + out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt) + # Avoid negligible real or imaginary parts in output by rotating to + # positive real, calculating, then undoing rotation + if _nx.issubdtype(dt, _nx.complexfloating): + all_imag = (start.real == 0.) & (stop.real == 0.) + if _nx.any(all_imag): + start[all_imag] = start[all_imag].imag + stop[all_imag] = stop[all_imag].imag + out_sign[all_imag] = 1j + + both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1) + if _nx.any(both_negative): + _nx.negative(start, out=start, where=both_negative) + _nx.negative(stop, out=stop, where=both_negative) + _nx.negative(out_sign, out=out_sign, where=both_negative) + + log_start = _nx.log10(start) + log_stop = _nx.log10(stop) + result = logspace(log_start, log_stop, num=num, + endpoint=endpoint, base=10.0, dtype=dtype) + + # Make sure the endpoints match the start and stop arguments. This is + # necessary because np.exp(np.log(x)) is not necessarily equal to x. + if num > 0: + result[0] = start + if num > 1 and endpoint: + result[-1] = stop + + result = out_sign * result + + if axis != 0: + result = _nx.moveaxis(result, 0, axis) + + return result.astype(dtype, copy=False) + + +def _needs_add_docstring(obj): + """ + Returns true if the only way to set the docstring of `obj` from python is + via add_docstring. + + This function errs on the side of being overly conservative. + """ + Py_TPFLAGS_HEAPTYPE = 1 << 9 + + if isinstance(obj, (types.FunctionType, types.MethodType, property)): + return False + + if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE: + return False + + return True + + +def _add_docstring(obj, doc, warn_on_python): + if warn_on_python and not _needs_add_docstring(obj): + warnings.warn( + "add_newdoc was used on a pure-python object {}. " + "Prefer to attach it directly to the source." + .format(obj), + UserWarning, + stacklevel=3) + try: + add_docstring(obj, doc) + except Exception: + pass + + +def add_newdoc(place, obj, doc, warn_on_python=True): + """ + Add documentation to an existing object, typically one defined in C + + The purpose is to allow easier editing of the docstrings without requiring + a re-compile. This exists primarily for internal use within numpy itself. + + Parameters + ---------- + place : str + The absolute name of the module to import from + obj : str + The name of the object to add documentation to, typically a class or + function name + doc : {str, Tuple[str, str], List[Tuple[str, str]]} + If a string, the documentation to apply to `obj` + + If a tuple, then the first element is interpreted as an attribute of + `obj` and the second as the docstring to apply - ``(method, docstring)`` + + If a list, then each element of the list should be a tuple of length + two - ``[(method1, docstring1), (method2, docstring2), ...]`` + warn_on_python : bool + If True, the default, emit `UserWarning` if this is used to attach + documentation to a pure-python object. + + Notes + ----- + This routine never raises an error if the docstring can't be written, but + will raise an error if the object being documented does not exist. + + This routine cannot modify read-only docstrings, as appear + in new-style classes or built-in functions. Because this + routine never raises an error the caller must check manually + that the docstrings were changed. + + Since this function grabs the ``char *`` from a c-level str object and puts + it into the ``tp_doc`` slot of the type of `obj`, it violates a number of + C-API best-practices, by: + + - modifying a `PyTypeObject` after calling `PyType_Ready` + - calling `Py_INCREF` on the str and losing the reference, so the str + will never be released + + If possible it should be avoided. + """ + new = getattr(__import__(place, globals(), {}, [obj]), obj) + if isinstance(doc, str): + _add_docstring(new, doc.strip(), warn_on_python) + elif isinstance(doc, tuple): + attr, docstring = doc + _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) + elif isinstance(doc, list): + for attr, docstring in doc: + _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) diff --git a/MLPY/Lib/site-packages/numpy/core/function_base.pyi b/MLPY/Lib/site-packages/numpy/core/function_base.pyi new file mode 100644 index 0000000000000000000000000000000000000000..291936ae9ecca1b45e27a26e81c36201e7106b26 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/function_base.pyi @@ -0,0 +1,55 @@ +import sys +from typing import overload, Tuple, Union, Sequence, Any + +from numpy import ndarray +from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co + +if sys.version_info >= (3, 8): + from typing import SupportsIndex, Literal +else: + from typing_extensions import SupportsIndex, Literal + +# TODO: wait for support for recursive types +_ArrayLikeNested = Sequence[Sequence[Any]] +_ArrayLikeNumber = Union[ + _NumberLike_co, Sequence[_NumberLike_co], ndarray, _SupportsArray, _ArrayLikeNested +] +@overload +def linspace( + start: _ArrayLikeNumber, + stop: _ArrayLikeNumber, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: Literal[False] = ..., + dtype: DTypeLike = ..., + axis: SupportsIndex = ..., +) -> ndarray: ... +@overload +def linspace( + start: _ArrayLikeNumber, + stop: _ArrayLikeNumber, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: Literal[True] = ..., + dtype: DTypeLike = ..., + axis: SupportsIndex = ..., +) -> Tuple[ndarray, Any]: ... + +def logspace( + start: _ArrayLikeNumber, + stop: _ArrayLikeNumber, + num: SupportsIndex = ..., + endpoint: bool = ..., + base: _ArrayLikeNumber = ..., + dtype: DTypeLike = ..., + axis: SupportsIndex = ..., +) -> ndarray: ... + +def geomspace( + start: _ArrayLikeNumber, + stop: _ArrayLikeNumber, + num: SupportsIndex = ..., + endpoint: bool = ..., + dtype: DTypeLike = ..., + axis: SupportsIndex = ..., +) -> ndarray: ... diff --git a/MLPY/Lib/site-packages/numpy/core/generate_numpy_api.py b/MLPY/Lib/site-packages/numpy/core/generate_numpy_api.py new file mode 100644 index 0000000000000000000000000000000000000000..96378df82bf97d0c119f0ff4c79810b327da7b10 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/generate_numpy_api.py @@ -0,0 +1,239 @@ +import os +import genapi + +from genapi import \ + TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi + +import numpy_api + +# use annotated api when running under cpychecker +h_template = r""" +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +%s + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern void **PyArray_API; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +void **PyArray_API; +#else +static void **PyArray_API=NULL; +#endif +#endif + +%s + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* Perform runtime check of C API version */ + if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version 0x%%x but this version of numpy is 0x%%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "API version 0x%%x but this version of numpy is 0x%%x", \ + (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "big endian, but detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "little endian, but detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } } + +#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } + +#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } + +#endif + +#endif +""" + + +c_template = r""" +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyArray_API[] = { +%s +}; +""" + +c_api_header = """ +=========== +NumPy C-API +=========== +""" + +def generate_api(output_dir, force=False): + basename = 'multiarray_api' + + h_file = os.path.join(output_dir, '__%s.h' % basename) + c_file = os.path.join(output_dir, '__%s.c' % basename) + d_file = os.path.join(output_dir, '%s.txt' % basename) + targets = (h_file, c_file, d_file) + + sources = numpy_api.multiarray_api + + if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])): + return targets + else: + do_generate_api(targets, sources) + + return targets + +def do_generate_api(targets, sources): + header_file = targets[0] + c_file = targets[1] + doc_file = targets[2] + + global_vars = sources[0] + scalar_bool_values = sources[1] + types_api = sources[2] + multiarray_funcs = sources[3] + + multiarray_api = sources[:] + + module_list = [] + extension_list = [] + init_list = [] + + # Check multiarray api indexes + multiarray_api_index = genapi.merge_api_dicts(multiarray_api) + genapi.check_api_dict(multiarray_api_index) + + numpyapi_list = genapi.get_api_functions('NUMPY_API', + multiarray_funcs) + + # FIXME: ordered_funcs_api is unused + ordered_funcs_api = genapi.order_dict(multiarray_funcs) + + # Create dict name -> *Api instance + api_name = 'PyArray_API' + multiarray_api_dict = {} + for f in numpyapi_list: + name = f.name + index = multiarray_funcs[name][0] + annotations = multiarray_funcs[name][1:] + multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations, + f.return_type, + f.args, api_name) + + for name, val in global_vars.items(): + index, type = val + multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name) + + for name, val in scalar_bool_values.items(): + index = val[0] + multiarray_api_dict[name] = BoolValuesApi(name, index, api_name) + + for name, val in types_api.items(): + index = val[0] + internal_type = None if len(val) == 1 else val[1] + multiarray_api_dict[name] = TypeApi( + name, index, 'PyTypeObject', api_name, internal_type) + + if len(multiarray_api_dict) != len(multiarray_api_index): + keys_dict = set(multiarray_api_dict.keys()) + keys_index = set(multiarray_api_index.keys()) + raise AssertionError( + "Multiarray API size mismatch - " + "index has extra keys {}, dict has extra keys {}" + .format(keys_index - keys_dict, keys_dict - keys_index) + ) + + extension_list = [] + for name, index in genapi.order_dict(multiarray_api_index): + api_item = multiarray_api_dict[name] + extension_list.append(api_item.define_from_array_api_string()) + init_list.append(api_item.array_api_define()) + module_list.append(api_item.internal_define()) + + # Write to header + s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) + genapi.write_file(header_file, s) + + # Write to c-code + s = c_template % ',\n'.join(init_list) + genapi.write_file(c_file, s) + + # write to documentation + s = c_api_header + for func in numpyapi_list: + s += func.to_ReST() + s += '\n\n' + genapi.write_file(doc_file, s) + + return targets diff --git a/MLPY/Lib/site-packages/numpy/core/getlimits.py b/MLPY/Lib/site-packages/numpy/core/getlimits.py new file mode 100644 index 0000000000000000000000000000000000000000..ae498cd14b0da7109648797606853e4f9c2b3ccb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/getlimits.py @@ -0,0 +1,564 @@ +"""Machine limits for Float32 and Float64 and (long double) if available... + +""" +__all__ = ['finfo', 'iinfo'] + +import warnings + +from .machar import MachAr +from .overrides import set_module +from . import numeric +from . import numerictypes as ntypes +from .numeric import array, inf +from .umath import log10, exp2 +from . import umath + + +def _fr0(a): + """fix rank-0 --> rank-1""" + if a.ndim == 0: + a = a.copy() + a.shape = (1,) + return a + + +def _fr1(a): + """fix rank > 0 --> rank-0""" + if a.size == 1: + a = a.copy() + a.shape = () + return a + +class MachArLike: + """ Object to simulate MachAr instance """ + + def __init__(self, + ftype, + *, eps, epsneg, huge, tiny, ibeta, **kwargs): + params = _MACHAR_PARAMS[ftype] + float_conv = lambda v: array([v], ftype) + float_to_float = lambda v : _fr1(float_conv(v)) + float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype)) + + self.title = params['title'] + # Parameter types same as for discovered MachAr object. + self.epsilon = self.eps = float_to_float(eps) + self.epsneg = float_to_float(epsneg) + self.xmax = self.huge = float_to_float(huge) + self.xmin = self.tiny = float_to_float(tiny) + self.ibeta = params['itype'](ibeta) + self.__dict__.update(kwargs) + self.precision = int(-log10(self.eps)) + self.resolution = float_to_float(float_conv(10) ** (-self.precision)) + self._str_eps = float_to_str(self.eps) + self._str_epsneg = float_to_str(self.epsneg) + self._str_xmin = float_to_str(self.xmin) + self._str_xmax = float_to_str(self.xmax) + self._str_resolution = float_to_str(self.resolution) + +_convert_to_float = { + ntypes.csingle: ntypes.single, + ntypes.complex_: ntypes.float_, + ntypes.clongfloat: ntypes.longfloat + } + +# Parameters for creating MachAr / MachAr-like objects +_title_fmt = 'numpy {} precision floating point number' +_MACHAR_PARAMS = { + ntypes.double: dict( + itype = ntypes.int64, + fmt = '%24.16e', + title = _title_fmt.format('double')), + ntypes.single: dict( + itype = ntypes.int32, + fmt = '%15.7e', + title = _title_fmt.format('single')), + ntypes.longdouble: dict( + itype = ntypes.longlong, + fmt = '%s', + title = _title_fmt.format('long double')), + ntypes.half: dict( + itype = ntypes.int16, + fmt = '%12.5e', + title = _title_fmt.format('half'))} + +# Key to identify the floating point type. Key is result of +# ftype('-0.1').newbyteorder('<').tobytes() +# See: +# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure +_KNOWN_TYPES = {} +def _register_type(machar, bytepat): + _KNOWN_TYPES[bytepat] = machar +_float_ma = {} + +def _register_known_types(): + # Known parameters for float16 + # See docstring of MachAr class for description of parameters. + f16 = ntypes.float16 + float16_ma = MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + it=10, + iexp=5, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f16(-10)), + epsneg=exp2(f16(-11)), + huge=f16(65504), + tiny=f16(2 ** -14)) + _register_type(float16_ma, b'f\xae') + _float_ma[16] = float16_ma + + # Known parameters for float32 + f32 = ntypes.float32 + float32_ma = MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + it=23, + iexp=8, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f32(-23)), + epsneg=exp2(f32(-24)), + huge=f32((1 - 2 ** -24) * 2**128), + tiny=exp2(f32(-126))) + _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') + _float_ma[32] = float32_ma + + # Known parameters for float64 + f64 = ntypes.float64 + epsneg_f64 = 2.0 ** -53.0 + tiny_f64 = 2.0 ** -1022.0 + float64_ma = MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + it=52, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=2.0 ** -52.0, + epsneg=epsneg_f64, + huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), + tiny=tiny_f64) + _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') + _float_ma[64] = float64_ma + + # Known parameters for IEEE 754 128-bit binary float + ld = ntypes.longdouble + epsneg_f128 = exp2(ld(-113)) + tiny_f128 = exp2(ld(-16382)) + # Ignore runtime error when this is not f128 + with numeric.errstate(all='ignore'): + huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) + float128_ma = MachArLike(ld, + machep=-112, + negep=-113, + minexp=-16382, + maxexp=16384, + it=112, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-112)), + epsneg=epsneg_f128, + huge=huge_f128, + tiny=tiny_f128) + # IEEE 754 128-bit binary float + _register_type(float128_ma, + b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') + _register_type(float128_ma, + b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') + _float_ma[128] = float128_ma + + # Known parameters for float80 (Intel 80-bit extended precision) + epsneg_f80 = exp2(ld(-64)) + tiny_f80 = exp2(ld(-16382)) + # Ignore runtime error when this is not f80 + with numeric.errstate(all='ignore'): + huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) + float80_ma = MachArLike(ld, + machep=-63, + negep=-64, + minexp=-16382, + maxexp=16384, + it=63, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-63)), + epsneg=epsneg_f80, + huge=huge_f80, + tiny=tiny_f80) + # float80, first 10 bytes containing actual storage + _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') + _float_ma[80] = float80_ma + + # Guessed / known parameters for double double; see: + # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic + # These numbers have the same exponent range as float64, but extended number of + # digits in the significand. + huge_dd = (umath.nextafter(ld(inf), ld(0)) + if hasattr(umath, 'nextafter') # Missing on some platforms? + else float64_ma.huge) + float_dd_ma = MachArLike(ld, + machep=-105, + negep=-106, + minexp=-1022, + maxexp=1024, + it=105, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-105)), + epsneg= exp2(ld(-106)), + huge=huge_dd, + tiny=exp2(ld(-1022))) + # double double; low, high order (e.g. PPC 64) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') + # double double; high, low order (e.g. PPC 64 le) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') + _float_ma['dd'] = float_dd_ma + + +def _get_machar(ftype): + """ Get MachAr instance or MachAr-like instance + + Get parameters for floating point type, by first trying signatures of + various known floating point types, then, if none match, attempting to + identify parameters by analysis. + + Parameters + ---------- + ftype : class + Numpy floating point type class (e.g. ``np.float64``) + + Returns + ------- + ma_like : instance of :class:`MachAr` or :class:`MachArLike` + Object giving floating point parameters for `ftype`. + + Warns + ----- + UserWarning + If the binary signature of the float type is not in the dictionary of + known float types. + """ + params = _MACHAR_PARAMS.get(ftype) + if params is None: + raise ValueError(repr(ftype)) + # Detect known / suspected types + key = ftype('-0.1').newbyteorder('<').tobytes() + ma_like = None + if ftype == ntypes.longdouble: + # Could be 80 bit == 10 byte extended precision, where last bytes can + # be random garbage. + # Comparing first 10 bytes to pattern first to avoid branching on the + # random garbage. + ma_like = _KNOWN_TYPES.get(key[:10]) + if ma_like is None: + ma_like = _KNOWN_TYPES.get(key) + if ma_like is not None: + return ma_like + # Fall back to parameter discovery + warnings.warn( + 'Signature {} for {} does not match any known type: ' + 'falling back to type probe function'.format(key, ftype), + UserWarning, stacklevel=2) + return _discovered_machar(ftype) + + +def _discovered_machar(ftype): + """ Create MachAr instance with found information on float types + """ + params = _MACHAR_PARAMS[ftype] + return MachAr(lambda v: array([v], ftype), + lambda v:_fr0(v.astype(params['itype']))[0], + lambda v:array(_fr0(v)[0], ftype), + lambda v: params['fmt'] % array(_fr0(v)[0], ftype), + params['title']) + + +@set_module('numpy') +class finfo: + """ + finfo(dtype) + + Machine limits for floating point types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + eps : float + The difference between 1.0 and the next smallest representable float + larger than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``eps = 2**-52``, approximately 2.22e-16. + epsneg : float + The difference between 1.0 and the next smallest representable float + less than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``epsneg = 2**-53``, approximately 1.11e-16. + iexp : int + The number of bits in the exponent portion of the floating point + representation. + machar : MachAr + The object which calculated these parameters and holds more + detailed information. + machep : int + The exponent that yields `eps`. + max : floating point number of the appropriate type + The largest representable number. + maxexp : int + The smallest positive power of the base (2) that causes overflow. + min : floating point number of the appropriate type + The smallest representable number, typically ``-max``. + minexp : int + The most negative power of the base (2) consistent with there + being no leading 0's in the mantissa. + negep : int + The exponent that yields `epsneg`. + nexp : int + The number of bits in the exponent including its sign and bias. + nmant : int + The number of bits in the mantissa. + precision : int + The approximate number of decimal digits to which this kind of + float is precise. + resolution : floating point number of the appropriate type + The approximate decimal resolution of this type, i.e., + ``10**-precision``. + tiny : float + The smallest positive floating point number with full precision + (see Notes). + + Parameters + ---------- + dtype : float, dtype, or instance + Kind of floating point data-type about which to get information. + + See Also + -------- + MachAr : The implementation of the tests that produce this information. + iinfo : The equivalent for integer data types. + spacing : The distance between a value and the nearest adjacent number + nextafter : The next floating point value after x1 towards x2 + + Notes + ----- + For developers of NumPy: do not instantiate this at the module level. + The initial calculation of these parameters is expensive and negatively + impacts import times. These objects are cached, so calling ``finfo()`` + repeatedly inside your functions is not a problem. + + Note that ``tiny`` is not actually the smallest positive representable + value in a NumPy floating point type. As in the IEEE-754 standard [1]_, + NumPy floating point types make use of subnormal numbers to fill the + gap between 0 and ``tiny``. However, subnormal numbers may have + significantly reduced precision [2]_. + + References + ---------- + .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008, + pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935 + .. [2] Wikipedia, "Denormal Numbers", + https://en.wikipedia.org/wiki/Denormal_number + """ + + _finfo_cache = {} + + def __new__(cls, dtype): + try: + dtype = numeric.dtype(dtype) + except TypeError: + # In case a float instance was given + dtype = numeric.dtype(type(dtype)) + + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + return obj + dtypes = [dtype] + newdtype = numeric.obj2sctype(dtype) + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + if not issubclass(dtype, numeric.inexact): + raise ValueError("data type %r not inexact" % (dtype)) + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + return obj + if not issubclass(dtype, numeric.floating): + newdtype = _convert_to_float[dtype] + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + return obj + obj = object.__new__(cls)._init(dtype) + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + + def _init(self, dtype): + self.dtype = numeric.dtype(dtype) + machar = _get_machar(dtype) + + for word in ['precision', 'iexp', + 'maxexp', 'minexp', 'negep', + 'machep']: + setattr(self, word, getattr(machar, word)) + for word in ['tiny', 'resolution', 'epsneg']: + setattr(self, word, getattr(machar, word).flat[0]) + self.bits = self.dtype.itemsize * 8 + self.max = machar.huge.flat[0] + self.min = -self.max + self.eps = machar.eps.flat[0] + self.nexp = machar.iexp + self.nmant = machar.it + self.machar = machar + self._str_tiny = machar._str_xmin.strip() + self._str_max = machar._str_xmax.strip() + self._str_epsneg = machar._str_epsneg.strip() + self._str_eps = machar._str_eps.strip() + self._str_resolution = machar._str_resolution.strip() + return self + + def __str__(self): + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'precision = %(precision)3s resolution = %(_str_resolution)s\n' + 'machep = %(machep)6s eps = %(_str_eps)s\n' + 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' + 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' + 'maxexp = %(maxexp)6s max = %(_str_max)s\n' + 'nexp = %(nexp)6s min = -max\n' + '---------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + def __repr__(self): + c = self.__class__.__name__ + d = self.__dict__.copy() + d['klass'] = c + return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," + " max=%(_str_max)s, dtype=%(dtype)s)") % d) + + +@set_module('numpy') +class iinfo: + """ + iinfo(type) + + Machine limits for integer types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + min : int + The smallest integer expressible by the type. + max : int + The largest integer expressible by the type. + + Parameters + ---------- + int_type : integer type, dtype, or instance + The kind of integer data type to get information about. + + See Also + -------- + finfo : The equivalent for floating point data types. + + Examples + -------- + With types: + + >>> ii16 = np.iinfo(np.int16) + >>> ii16.min + -32768 + >>> ii16.max + 32767 + >>> ii32 = np.iinfo(np.int32) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + With instances: + + >>> ii32 = np.iinfo(np.int32(10)) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + """ + + _min_vals = {} + _max_vals = {} + + def __init__(self, int_type): + try: + self.dtype = numeric.dtype(int_type) + except TypeError: + self.dtype = numeric.dtype(type(int_type)) + self.kind = self.dtype.kind + self.bits = self.dtype.itemsize * 8 + self.key = "%s%d" % (self.kind, self.bits) + if self.kind not in 'iu': + raise ValueError("Invalid integer data type %r." % (self.kind,)) + + @property + def min(self): + """Minimum value of given dtype.""" + if self.kind == 'u': + return 0 + else: + try: + val = iinfo._min_vals[self.key] + except KeyError: + val = int(-(1 << (self.bits-1))) + iinfo._min_vals[self.key] = val + return val + + @property + def max(self): + """Maximum value of given dtype.""" + try: + val = iinfo._max_vals[self.key] + except KeyError: + if self.kind == 'u': + val = int((1 << self.bits) - 1) + else: + val = int((1 << (self.bits-1)) - 1) + iinfo._max_vals[self.key] = val + return val + + def __str__(self): + """String representation.""" + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'min = %(min)s\n' + 'max = %(max)s\n' + '---------------------------------------------------------------\n' + ) + return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} + + def __repr__(self): + return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, + self.min, self.max, self.dtype) diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/__multiarray_api.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/__multiarray_api.h new file mode 100644 index 0000000000000000000000000000000000000000..93ee0227ea19bdae5322c3b37f7c535d756fc22e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/__multiarray_api.h @@ -0,0 +1,1540 @@ + +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ + (void); +extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArray_Type; + +extern NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull; +#define PyArrayDescr_Type (*(PyTypeObject *)(&PyArrayDescr_TypeFull)) + +extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; + +extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; + +extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; + +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; + +NPY_NO_EXPORT int PyArray_SetNumericOps \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_GetNumericOps \ + (void); +NPY_NO_EXPORT int PyArray_INCREF \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_XDECREF \ + (PyArrayObject *); +NPY_NO_EXPORT void PyArray_SetStringFunction \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ + (int); +NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ + (int); +NPY_NO_EXPORT char * PyArray_Zero \ + (PyArrayObject *); +NPY_NO_EXPORT char * PyArray_One \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_CastToType \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_CastTo \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CastAnyTo \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CanCastSafely \ + (int, int); +NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_ObjectType \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ + (PyObject *, int *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ + (PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_Size \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Scalar \ + (void *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_ScalarAsCtype \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_CastScalarToCtype \ + (PyObject *, void *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_CastScalarDirect \ + (PyObject *, PyArray_Descr *, void *, int); +NPY_NO_EXPORT PyObject * PyArray_ScalarFromObject \ + (PyObject *); +NPY_NO_EXPORT PyArray_VectorUnaryFunc * PyArray_GetCastFunc \ + (PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromDims \ + (int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type)); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \ + (int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data)); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ + (PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromFile \ + (FILE *, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromString \ + (char *, npy_intp, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ + (PyObject *, PyArray_Descr *, npy_intp, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ + (PyObject *, PyArray_Descr *, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_GetField \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) int PyArray_SetField \ + (PyArrayObject *, PyArray_Descr *, int, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Byteswap \ + (PyArrayObject *, npy_bool); +NPY_NO_EXPORT PyObject * PyArray_Resize \ + (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order)); +NPY_NO_EXPORT int PyArray_MoveInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyAnyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_NewCopy \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_ToList \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ToString \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT int PyArray_ToFile \ + (PyArrayObject *, FILE *, char *, char *); +NPY_NO_EXPORT int PyArray_Dump \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Dumps \ + (PyObject *, int); +NPY_NO_EXPORT int PyArray_ValidType \ + (int); +NPY_NO_EXPORT void PyArray_UpdateFlags \ + (PyArrayObject *, int); +NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_New \ + (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(1) NPY_GCC_NONNULL(2) PyObject * PyArray_NewFromDescr \ + (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ + (PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ + (int); +NPY_NO_EXPORT double PyArray_GetPriority \ + (PyObject *, double); +NPY_NO_EXPORT PyObject * PyArray_IterNew \ + (PyObject *); +NPY_NO_EXPORT PyObject* PyArray_MultiIterNew \ + (int, ...); +NPY_NO_EXPORT int PyArray_PyIntAsInt \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ + (PyObject *); +NPY_NO_EXPORT int PyArray_Broadcast \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT void PyArray_FillObjectArray \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT int PyArray_FillWithScalar \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ + (int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ + (PyArray_Descr *, char); +NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ + (PyObject *, int *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ + (PyObject *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ + (int, PyArrayObject **); +NPY_NO_EXPORT int PyArray_CanCoerceScalar \ + (int, int, NPY_SCALARKIND); +NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject \ + (PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ + (PyTypeObject *, PyTypeObject *); +NPY_NO_EXPORT int PyArray_CompareUCS4 \ + (npy_ucs4 const *, npy_ucs4 const *, size_t); +NPY_NO_EXPORT int PyArray_RemoveSmallest \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_ElementStrides \ + (PyObject *); +NPY_NO_EXPORT void PyArray_Item_INCREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_Item_XDECREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT PyObject * PyArray_FieldNames \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Transpose \ + (PyArrayObject *, PyArray_Dims *); +NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ + (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutTo \ + (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutMask \ + (PyArrayObject *, PyObject*, PyObject*); +NPY_NO_EXPORT PyObject * PyArray_Repeat \ + (PyArrayObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Choose \ + (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT int PyArray_Sort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgSort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ + (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMax \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMin \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Reshape \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Newshape \ + (PyArrayObject *, PyArray_Dims *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Squeeze \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ + (PyArrayObject *, PyArray_Descr *, PyTypeObject *); +NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ + (PyArrayObject *, int, int); +NPY_NO_EXPORT PyObject * PyArray_Max \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Min \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Ptp \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Mean \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Trace \ + (PyArrayObject *, int, int, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Diagonal \ + (PyArrayObject *, int, int, int); +NPY_NO_EXPORT PyObject * PyArray_Clip \ + (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Conjugate \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Nonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Std \ + (PyArrayObject *, int, int, PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Sum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumSum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Prod \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumProd \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_All \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Any \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Compress \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Flatten \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Ravel \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT int PyArray_MultiplyIntList \ + (int const *, int); +NPY_NO_EXPORT void * PyArray_GetPtr \ + (PyArrayObject *, npy_intp const*); +NPY_NO_EXPORT int PyArray_CompareLists \ + (npy_intp const *, npy_intp const *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ + (PyObject **, void *, npy_intp *, int, PyArray_Descr*); +NPY_NO_EXPORT int PyArray_As1D \ + (PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode)); +NPY_NO_EXPORT int PyArray_As2D \ + (PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode)); +NPY_NO_EXPORT int PyArray_Free \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_Converter \ + (PyObject *, PyObject **); +NPY_NO_EXPORT int PyArray_IntpFromSequence \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT PyObject * PyArray_Concatenate \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_CopyAndTranspose \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Correlate \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT int PyArray_TypestrConvert \ + (int, int); +NPY_NO_EXPORT int PyArray_DescrConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_IntpConverter \ + (PyObject *, PyArray_Dims *); +NPY_NO_EXPORT int PyArray_BufferConverter \ + (PyObject *, PyArray_Chunk *); +NPY_NO_EXPORT int PyArray_AxisConverter \ + (PyObject *, int *); +NPY_NO_EXPORT int PyArray_BoolConverter \ + (PyObject *, npy_bool *); +NPY_NO_EXPORT int PyArray_ByteorderConverter \ + (PyObject *, char *); +NPY_NO_EXPORT int PyArray_OrderConverter \ + (PyObject *, NPY_ORDER *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_Where \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Arange \ + (double, double, double, int); +NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ + (PyObject *, PyObject *, PyObject *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_SortkindConverter \ + (PyObject *, NPY_SORTKIND *); +NPY_NO_EXPORT PyObject * PyArray_LexSort \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Round \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ + (int, int); +NPY_NO_EXPORT int PyArray_RegisterDataType \ + (PyArray_Descr *); +NPY_NO_EXPORT int PyArray_RegisterCastFunc \ + (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); +NPY_NO_EXPORT int PyArray_RegisterCanCast \ + (PyArray_Descr *, int, NPY_SCALARKIND); +NPY_NO_EXPORT void PyArray_InitArrFuncs \ + (PyArray_ArrFuncs *); +NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ + (int, npy_intp const *); +NPY_NO_EXPORT int PyArray_TypeNumFromName \ + (char const *); +NPY_NO_EXPORT int PyArray_ClipmodeConverter \ + (PyObject *, NPY_CLIPMODE *); +NPY_NO_EXPORT int PyArray_OutputConverter \ + (PyObject *, PyArrayObject **); +NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT void _PyArray_SigintHandler \ + (int); +NPY_NO_EXPORT void* _PyArray_GetSigintBuf \ + (void); +NPY_NO_EXPORT int PyArray_DescrAlignConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_SearchsideConverter \ + (PyObject *, void *); +NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ + (PyArrayObject *, int *, int); +NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT int PyArray_CompareString \ + (const char *, const char *, size_t); +NPY_NO_EXPORT PyObject* PyArray_MultiIterFromObjects \ + (PyObject **, int, int, ...); +NPY_NO_EXPORT int PyArray_GetEndianness \ + (void); +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ + (void); +NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ + (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*); +extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; + +NPY_NO_EXPORT void PyArray_SetDatetimeParseFunction \ + (PyObject *NPY_UNUSED(op)); +NPY_NO_EXPORT void PyArray_DatetimeToDatetimeStruct \ + (npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *); +NPY_NO_EXPORT void PyArray_TimedeltaToTimedeltaStruct \ + (npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *); +NPY_NO_EXPORT npy_datetime PyArray_DatetimeStructToDatetime \ + (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d)); +NPY_NO_EXPORT npy_datetime PyArray_TimedeltaStructToTimedelta \ + (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d)); +NPY_NO_EXPORT NpyIter * NpyIter_New \ + (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); +NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); +NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); +NPY_NO_EXPORT NpyIter * NpyIter_Copy \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Deallocate \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Reset \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_ResetBasePointers \ + (NpyIter *, char **, char **); +NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ + (NpyIter *, npy_intp, npy_intp, char **); +NPY_NO_EXPORT int NpyIter_GetNDim \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetNOp \ + (NpyIter *); +NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ + (NpyIter *, char **); +NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ + (NpyIter *, npy_intp *, npy_intp *); +NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIterIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetShape \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ + (NpyIter *, npy_intp const *); +NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ + (NpyIter *); +NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT void NpyIter_GetReadFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_GetWriteFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_DebugPrint \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT int NpyIter_RemoveAxis \ + (NpyIter *, int); +NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ + (NpyIter *, int); +NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ + (NpyIter *); +NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ + (NpyIter *, npy_intp, npy_intp *); +NPY_NO_EXPORT int PyArray_CastingConverter \ + (PyObject *, NPY_CASTING *); +NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ + (npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[]); +NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ + (PyArrayObject *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ + (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ + (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) NPY_GCC_NONNULL(1) PyObject * PyArray_NewLikeArray \ + (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject \ + (PyObject *NPY_UNUSED(op), PyArray_Descr *NPY_UNUSED(requested_dtype), npy_bool NPY_UNUSED(writeable), PyArray_Descr **NPY_UNUSED(out_dtype), int *NPY_UNUSED(out_ndim), npy_intp *NPY_UNUSED(out_dims), PyArrayObject **NPY_UNUSED(out_arr), PyObject *NPY_UNUSED(context)); +NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ + (PyObject *, NPY_CLIPMODE *, int); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ + (PyObject *, PyObject *, PyArrayObject*); +NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ + (NpyIter *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ + (int, npy_intp const *, npy_stride_sort_item *); +NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ + (PyArrayObject *, const npy_bool *); +NPY_NO_EXPORT void PyArray_DebugPrint \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ + (PyArrayObject *, const char *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT void * PyDataMem_NEW \ + (size_t); +NPY_NO_EXPORT void PyDataMem_FREE \ + (void *); +NPY_NO_EXPORT void * PyDataMem_RENEW \ + (void *, size_t); +NPY_NO_EXPORT PyDataMem_EventHookFunc * PyDataMem_SetEventHook \ + (PyDataMem_EventHookFunc *, void *, void **); +extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; + +NPY_NO_EXPORT void PyArray_MapIterSwapAxes \ + (PyArrayMapIterObject *, PyArrayObject **, int); +NPY_NO_EXPORT PyObject * PyArray_MapIterArray \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_MapIterNext \ + (PyArrayMapIterObject *); +NPY_NO_EXPORT int PyArray_Partition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT int PyArray_SelectkindConverter \ + (PyObject *, NPY_SELECTKIND *); +NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ + (size_t, size_t); +NPY_NO_EXPORT NPY_GCC_NONNULL(1) int PyArray_CheckAnyScalarExact \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \ + (PyArrayObject *, PyArrayObject *); + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern void **PyArray_API; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +void **PyArray_API; +#else +static void **PyArray_API=NULL; +#endif +#endif + +#define PyArray_GetNDArrayCVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[0]) +#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) +#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) +#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) +#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) +#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) +#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) +#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) +#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) +#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) +#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) +#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) +#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) +#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) +#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) +#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) +#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) +#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) +#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) +#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) +#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) +#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) +#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) +#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) +#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) +#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) +#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) +#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) +#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) +#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) +#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) +#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) +#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) +#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) +#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) +#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) +#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) +#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) +#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) +#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) +#define PyArray_SetNumericOps \ + (*(int (*)(PyObject *)) \ + PyArray_API[40]) +#define PyArray_GetNumericOps \ + (*(PyObject * (*)(void)) \ + PyArray_API[41]) +#define PyArray_INCREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[42]) +#define PyArray_XDECREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[43]) +#define PyArray_SetStringFunction \ + (*(void (*)(PyObject *, int)) \ + PyArray_API[44]) +#define PyArray_DescrFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[45]) +#define PyArray_TypeObjectFromType \ + (*(PyObject * (*)(int)) \ + PyArray_API[46]) +#define PyArray_Zero \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[47]) +#define PyArray_One \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[48]) +#define PyArray_CastToType \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[49]) +#define PyArray_CastTo \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[50]) +#define PyArray_CastAnyTo \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[51]) +#define PyArray_CanCastSafely \ + (*(int (*)(int, int)) \ + PyArray_API[52]) +#define PyArray_CanCastTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[53]) +#define PyArray_ObjectType \ + (*(int (*)(PyObject *, int)) \ + PyArray_API[54]) +#define PyArray_DescrFromObject \ + (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[55]) +#define PyArray_ConvertToCommonType \ + (*(PyArrayObject ** (*)(PyObject *, int *)) \ + PyArray_API[56]) +#define PyArray_DescrFromScalar \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[57]) +#define PyArray_DescrFromTypeObject \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[58]) +#define PyArray_Size \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[59]) +#define PyArray_Scalar \ + (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ + PyArray_API[60]) +#define PyArray_FromScalar \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[61]) +#define PyArray_ScalarAsCtype \ + (*(void (*)(PyObject *, void *)) \ + PyArray_API[62]) +#define PyArray_CastScalarToCtype \ + (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ + PyArray_API[63]) +#define PyArray_CastScalarDirect \ + (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ + PyArray_API[64]) +#define PyArray_ScalarFromObject \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[65]) +#define PyArray_GetCastFunc \ + (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \ + PyArray_API[66]) +#define PyArray_FromDims \ + (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type))) \ + PyArray_API[67]) +#define PyArray_FromDimsAndDataAndDescr \ + (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data))) \ + PyArray_API[68]) +#define PyArray_FromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[69]) +#define PyArray_EnsureArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[70]) +#define PyArray_EnsureAnyArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[71]) +#define PyArray_FromFile \ + (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[72]) +#define PyArray_FromString \ + (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[73]) +#define PyArray_FromBuffer \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ + PyArray_API[74]) +#define PyArray_FromIter \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ + PyArray_API[75]) +#define PyArray_Return \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[76]) +#define PyArray_GetField \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[77]) +#define PyArray_SetField \ + (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ + PyArray_API[78]) +#define PyArray_Byteswap \ + (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ + PyArray_API[79]) +#define PyArray_Resize \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \ + PyArray_API[80]) +#define PyArray_MoveInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[81]) +#define PyArray_CopyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[82]) +#define PyArray_CopyAnyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[83]) +#define PyArray_CopyObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[84]) +#define PyArray_NewCopy \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[85]) +#define PyArray_ToList \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[86]) +#define PyArray_ToString \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[87]) +#define PyArray_ToFile \ + (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ + PyArray_API[88]) +#define PyArray_Dump \ + (*(int (*)(PyObject *, PyObject *, int)) \ + PyArray_API[89]) +#define PyArray_Dumps \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[90]) +#define PyArray_ValidType \ + (*(int (*)(int)) \ + PyArray_API[91]) +#define PyArray_UpdateFlags \ + (*(void (*)(PyArrayObject *, int)) \ + PyArray_API[92]) +#define PyArray_New \ + (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \ + PyArray_API[93]) +#define PyArray_NewFromDescr \ + (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \ + PyArray_API[94]) +#define PyArray_DescrNew \ + (*(PyArray_Descr * (*)(PyArray_Descr *)) \ + PyArray_API[95]) +#define PyArray_DescrNewFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[96]) +#define PyArray_GetPriority \ + (*(double (*)(PyObject *, double)) \ + PyArray_API[97]) +#define PyArray_IterNew \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[98]) +#define PyArray_MultiIterNew \ + (*(PyObject* (*)(int, ...)) \ + PyArray_API[99]) +#define PyArray_PyIntAsInt \ + (*(int (*)(PyObject *)) \ + PyArray_API[100]) +#define PyArray_PyIntAsIntp \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[101]) +#define PyArray_Broadcast \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[102]) +#define PyArray_FillObjectArray \ + (*(void (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[103]) +#define PyArray_FillWithScalar \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[104]) +#define PyArray_CheckStrides \ + (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *)) \ + PyArray_API[105]) +#define PyArray_DescrNewByteorder \ + (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ + PyArray_API[106]) +#define PyArray_IterAllButAxis \ + (*(PyObject * (*)(PyObject *, int *)) \ + PyArray_API[107]) +#define PyArray_CheckFromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[108]) +#define PyArray_FromArray \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[109]) +#define PyArray_FromInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[110]) +#define PyArray_FromStructInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[111]) +#define PyArray_FromArrayAttr \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ + PyArray_API[112]) +#define PyArray_ScalarKind \ + (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ + PyArray_API[113]) +#define PyArray_CanCoerceScalar \ + (*(int (*)(int, int, NPY_SCALARKIND)) \ + PyArray_API[114]) +#define PyArray_NewFlagsObject \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[115]) +#define PyArray_CanCastScalar \ + (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ + PyArray_API[116]) +#define PyArray_CompareUCS4 \ + (*(int (*)(npy_ucs4 const *, npy_ucs4 const *, size_t)) \ + PyArray_API[117]) +#define PyArray_RemoveSmallest \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[118]) +#define PyArray_ElementStrides \ + (*(int (*)(PyObject *)) \ + PyArray_API[119]) +#define PyArray_Item_INCREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[120]) +#define PyArray_Item_XDECREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[121]) +#define PyArray_FieldNames \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[122]) +#define PyArray_Transpose \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ + PyArray_API[123]) +#define PyArray_TakeFrom \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[124]) +#define PyArray_PutTo \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ + PyArray_API[125]) +#define PyArray_PutMask \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ + PyArray_API[126]) +#define PyArray_Repeat \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ + PyArray_API[127]) +#define PyArray_Choose \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[128]) +#define PyArray_Sort \ + (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[129]) +#define PyArray_ArgSort \ + (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[130]) +#define PyArray_SearchSorted \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ + PyArray_API[131]) +#define PyArray_ArgMax \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[132]) +#define PyArray_ArgMin \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[133]) +#define PyArray_Reshape \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[134]) +#define PyArray_Newshape \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ + PyArray_API[135]) +#define PyArray_Squeeze \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[136]) +#define PyArray_View \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ + PyArray_API[137]) +#define PyArray_SwapAxes \ + (*(PyObject * (*)(PyArrayObject *, int, int)) \ + PyArray_API[138]) +#define PyArray_Max \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[139]) +#define PyArray_Min \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[140]) +#define PyArray_Ptp \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[141]) +#define PyArray_Mean \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[142]) +#define PyArray_Trace \ + (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ + PyArray_API[143]) +#define PyArray_Diagonal \ + (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ + PyArray_API[144]) +#define PyArray_Clip \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ + PyArray_API[145]) +#define PyArray_Conjugate \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[146]) +#define PyArray_Nonzero \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[147]) +#define PyArray_Std \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ + PyArray_API[148]) +#define PyArray_Sum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[149]) +#define PyArray_CumSum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[150]) +#define PyArray_Prod \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[151]) +#define PyArray_CumProd \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[152]) +#define PyArray_All \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[153]) +#define PyArray_Any \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[154]) +#define PyArray_Compress \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[155]) +#define PyArray_Flatten \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[156]) +#define PyArray_Ravel \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[157]) +#define PyArray_MultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[158]) +#define PyArray_MultiplyIntList \ + (*(int (*)(int const *, int)) \ + PyArray_API[159]) +#define PyArray_GetPtr \ + (*(void * (*)(PyArrayObject *, npy_intp const*)) \ + PyArray_API[160]) +#define PyArray_CompareLists \ + (*(int (*)(npy_intp const *, npy_intp const *, int)) \ + PyArray_API[161]) +#define PyArray_AsCArray \ + (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ + PyArray_API[162]) +#define PyArray_As1D \ + (*(int (*)(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode))) \ + PyArray_API[163]) +#define PyArray_As2D \ + (*(int (*)(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode))) \ + PyArray_API[164]) +#define PyArray_Free \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[165]) +#define PyArray_Converter \ + (*(int (*)(PyObject *, PyObject **)) \ + PyArray_API[166]) +#define PyArray_IntpFromSequence \ + (*(int (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[167]) +#define PyArray_Concatenate \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[168]) +#define PyArray_InnerProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[169]) +#define PyArray_MatrixProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[170]) +#define PyArray_CopyAndTranspose \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[171]) +#define PyArray_Correlate \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[172]) +#define PyArray_TypestrConvert \ + (*(int (*)(int, int)) \ + PyArray_API[173]) +#define PyArray_DescrConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[174]) +#define PyArray_DescrConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[175]) +#define PyArray_IntpConverter \ + (*(int (*)(PyObject *, PyArray_Dims *)) \ + PyArray_API[176]) +#define PyArray_BufferConverter \ + (*(int (*)(PyObject *, PyArray_Chunk *)) \ + PyArray_API[177]) +#define PyArray_AxisConverter \ + (*(int (*)(PyObject *, int *)) \ + PyArray_API[178]) +#define PyArray_BoolConverter \ + (*(int (*)(PyObject *, npy_bool *)) \ + PyArray_API[179]) +#define PyArray_ByteorderConverter \ + (*(int (*)(PyObject *, char *)) \ + PyArray_API[180]) +#define PyArray_OrderConverter \ + (*(int (*)(PyObject *, NPY_ORDER *)) \ + PyArray_API[181]) +#define PyArray_EquivTypes \ + (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[182]) +#define PyArray_Zeros \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[183]) +#define PyArray_Empty \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[184]) +#define PyArray_Where \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ + PyArray_API[185]) +#define PyArray_Arange \ + (*(PyObject * (*)(double, double, double, int)) \ + PyArray_API[186]) +#define PyArray_ArangeObj \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ + PyArray_API[187]) +#define PyArray_SortkindConverter \ + (*(int (*)(PyObject *, NPY_SORTKIND *)) \ + PyArray_API[188]) +#define PyArray_LexSort \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[189]) +#define PyArray_Round \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[190]) +#define PyArray_EquivTypenums \ + (*(unsigned char (*)(int, int)) \ + PyArray_API[191]) +#define PyArray_RegisterDataType \ + (*(int (*)(PyArray_Descr *)) \ + PyArray_API[192]) +#define PyArray_RegisterCastFunc \ + (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ + PyArray_API[193]) +#define PyArray_RegisterCanCast \ + (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ + PyArray_API[194]) +#define PyArray_InitArrFuncs \ + (*(void (*)(PyArray_ArrFuncs *)) \ + PyArray_API[195]) +#define PyArray_IntTupleFromIntp \ + (*(PyObject * (*)(int, npy_intp const *)) \ + PyArray_API[196]) +#define PyArray_TypeNumFromName \ + (*(int (*)(char const *)) \ + PyArray_API[197]) +#define PyArray_ClipmodeConverter \ + (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ + PyArray_API[198]) +#define PyArray_OutputConverter \ + (*(int (*)(PyObject *, PyArrayObject **)) \ + PyArray_API[199]) +#define PyArray_BroadcastToShape \ + (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[200]) +#define _PyArray_SigintHandler \ + (*(void (*)(int)) \ + PyArray_API[201]) +#define _PyArray_GetSigintBuf \ + (*(void* (*)(void)) \ + PyArray_API[202]) +#define PyArray_DescrAlignConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[203]) +#define PyArray_DescrAlignConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[204]) +#define PyArray_SearchsideConverter \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[205]) +#define PyArray_CheckAxis \ + (*(PyObject * (*)(PyArrayObject *, int *, int)) \ + PyArray_API[206]) +#define PyArray_OverflowMultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[207]) +#define PyArray_CompareString \ + (*(int (*)(const char *, const char *, size_t)) \ + PyArray_API[208]) +#define PyArray_MultiIterFromObjects \ + (*(PyObject* (*)(PyObject **, int, int, ...)) \ + PyArray_API[209]) +#define PyArray_GetEndianness \ + (*(int (*)(void)) \ + PyArray_API[210]) +#define PyArray_GetNDArrayCFeatureVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[211]) +#define PyArray_Correlate2 \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[212]) +#define PyArray_NeighborhoodIterNew \ + (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \ + PyArray_API[213]) +#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) +#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) +#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) +#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) +#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) +#define PyArray_SetDatetimeParseFunction \ + (*(void (*)(PyObject *NPY_UNUSED(op))) \ + PyArray_API[219]) +#define PyArray_DatetimeToDatetimeStruct \ + (*(void (*)(npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *)) \ + PyArray_API[220]) +#define PyArray_TimedeltaToTimedeltaStruct \ + (*(void (*)(npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *)) \ + PyArray_API[221]) +#define PyArray_DatetimeStructToDatetime \ + (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d))) \ + PyArray_API[222]) +#define PyArray_TimedeltaStructToTimedelta \ + (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d))) \ + PyArray_API[223]) +#define NpyIter_New \ + (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ + PyArray_API[224]) +#define NpyIter_MultiNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ + PyArray_API[225]) +#define NpyIter_AdvancedNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ + PyArray_API[226]) +#define NpyIter_Copy \ + (*(NpyIter * (*)(NpyIter *)) \ + PyArray_API[227]) +#define NpyIter_Deallocate \ + (*(int (*)(NpyIter *)) \ + PyArray_API[228]) +#define NpyIter_HasDelayedBufAlloc \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[229]) +#define NpyIter_HasExternalLoop \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[230]) +#define NpyIter_EnableExternalLoop \ + (*(int (*)(NpyIter *)) \ + PyArray_API[231]) +#define NpyIter_GetInnerStrideArray \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[232]) +#define NpyIter_GetInnerLoopSizePtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[233]) +#define NpyIter_Reset \ + (*(int (*)(NpyIter *, char **)) \ + PyArray_API[234]) +#define NpyIter_ResetBasePointers \ + (*(int (*)(NpyIter *, char **, char **)) \ + PyArray_API[235]) +#define NpyIter_ResetToIterIndexRange \ + (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ + PyArray_API[236]) +#define NpyIter_GetNDim \ + (*(int (*)(NpyIter *)) \ + PyArray_API[237]) +#define NpyIter_GetNOp \ + (*(int (*)(NpyIter *)) \ + PyArray_API[238]) +#define NpyIter_GetIterNext \ + (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ + PyArray_API[239]) +#define NpyIter_GetIterSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[240]) +#define NpyIter_GetIterIndexRange \ + (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ + PyArray_API[241]) +#define NpyIter_GetIterIndex \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[242]) +#define NpyIter_GotoIterIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[243]) +#define NpyIter_HasMultiIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[244]) +#define NpyIter_GetShape \ + (*(int (*)(NpyIter *, npy_intp *)) \ + PyArray_API[245]) +#define NpyIter_GetGetMultiIndex \ + (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ + PyArray_API[246]) +#define NpyIter_GotoMultiIndex \ + (*(int (*)(NpyIter *, npy_intp const *)) \ + PyArray_API[247]) +#define NpyIter_RemoveMultiIndex \ + (*(int (*)(NpyIter *)) \ + PyArray_API[248]) +#define NpyIter_HasIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[249]) +#define NpyIter_IsBuffered \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[250]) +#define NpyIter_IsGrowInner \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[251]) +#define NpyIter_GetBufferSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[252]) +#define NpyIter_GetIndexPtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[253]) +#define NpyIter_GotoIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[254]) +#define NpyIter_GetDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[255]) +#define NpyIter_GetDescrArray \ + (*(PyArray_Descr ** (*)(NpyIter *)) \ + PyArray_API[256]) +#define NpyIter_GetOperandArray \ + (*(PyArrayObject ** (*)(NpyIter *)) \ + PyArray_API[257]) +#define NpyIter_GetIterView \ + (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ + PyArray_API[258]) +#define NpyIter_GetReadFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[259]) +#define NpyIter_GetWriteFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[260]) +#define NpyIter_DebugPrint \ + (*(void (*)(NpyIter *)) \ + PyArray_API[261]) +#define NpyIter_IterationNeedsAPI \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[262]) +#define NpyIter_GetInnerFixedStrideArray \ + (*(void (*)(NpyIter *, npy_intp *)) \ + PyArray_API[263]) +#define NpyIter_RemoveAxis \ + (*(int (*)(NpyIter *, int)) \ + PyArray_API[264]) +#define NpyIter_GetAxisStrideArray \ + (*(npy_intp * (*)(NpyIter *, int)) \ + PyArray_API[265]) +#define NpyIter_RequiresBuffering \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[266]) +#define NpyIter_GetInitialDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[267]) +#define NpyIter_CreateCompatibleStrides \ + (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ + PyArray_API[268]) +#define PyArray_CastingConverter \ + (*(int (*)(PyObject *, NPY_CASTING *)) \ + PyArray_API[269]) +#define PyArray_CountNonzero \ + (*(npy_intp (*)(PyArrayObject *)) \ + PyArray_API[270]) +#define PyArray_PromoteTypes \ + (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[271]) +#define PyArray_MinScalarType \ + (*(PyArray_Descr * (*)(PyArrayObject *)) \ + PyArray_API[272]) +#define PyArray_ResultType \ + (*(PyArray_Descr * (*)(npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[])) \ + PyArray_API[273]) +#define PyArray_CanCastArrayTo \ + (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[274]) +#define PyArray_CanCastTypeTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[275]) +#define PyArray_EinsteinSum \ + (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ + PyArray_API[276]) +#define PyArray_NewLikeArray \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ + PyArray_API[277]) +#define PyArray_GetArrayParamsFromObject \ + (*(int (*)(PyObject *NPY_UNUSED(op), PyArray_Descr *NPY_UNUSED(requested_dtype), npy_bool NPY_UNUSED(writeable), PyArray_Descr **NPY_UNUSED(out_dtype), int *NPY_UNUSED(out_ndim), npy_intp *NPY_UNUSED(out_dims), PyArrayObject **NPY_UNUSED(out_arr), PyObject *NPY_UNUSED(context))) \ + PyArray_API[278]) +#define PyArray_ConvertClipmodeSequence \ + (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ + PyArray_API[279]) +#define PyArray_MatrixProduct2 \ + (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ + PyArray_API[280]) +#define NpyIter_IsFirstVisit \ + (*(npy_bool (*)(NpyIter *, int)) \ + PyArray_API[281]) +#define PyArray_SetBaseObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[282]) +#define PyArray_CreateSortedStridePerm \ + (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \ + PyArray_API[283]) +#define PyArray_RemoveAxesInPlace \ + (*(void (*)(PyArrayObject *, const npy_bool *)) \ + PyArray_API[284]) +#define PyArray_DebugPrint \ + (*(void (*)(PyArrayObject *)) \ + PyArray_API[285]) +#define PyArray_FailUnlessWriteable \ + (*(int (*)(PyArrayObject *, const char *)) \ + PyArray_API[286]) +#define PyArray_SetUpdateIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[287]) +#define PyDataMem_NEW \ + (*(void * (*)(size_t)) \ + PyArray_API[288]) +#define PyDataMem_FREE \ + (*(void (*)(void *)) \ + PyArray_API[289]) +#define PyDataMem_RENEW \ + (*(void * (*)(void *, size_t)) \ + PyArray_API[290]) +#define PyDataMem_SetEventHook \ + (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \ + PyArray_API[291]) +#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) +#define PyArray_MapIterSwapAxes \ + (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \ + PyArray_API[293]) +#define PyArray_MapIterArray \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[294]) +#define PyArray_MapIterNext \ + (*(void (*)(PyArrayMapIterObject *)) \ + PyArray_API[295]) +#define PyArray_Partition \ + (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[296]) +#define PyArray_ArgPartition \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[297]) +#define PyArray_SelectkindConverter \ + (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ + PyArray_API[298]) +#define PyDataMem_NEW_ZEROED \ + (*(void * (*)(size_t, size_t)) \ + PyArray_API[299]) +#define PyArray_CheckAnyScalarExact \ + (*(int (*)(PyObject *)) \ + PyArray_API[300]) +#define PyArray_MapIterArrayCopyIfOverlap \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[301]) +#define PyArray_ResolveWritebackIfCopy \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[302]) +#define PyArray_SetWritebackIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[303]) + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* Perform runtime check of C API version */ + if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version 0x%x but this version of numpy is 0x%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "API version 0x%x but this version of numpy is 0x%x", \ + (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "big endian, but detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ + "little endian, but detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } } + +#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } + +#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } + +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/__ufunc_api.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/__ufunc_api.h new file mode 100644 index 0000000000000000000000000000000000000000..a284914f25a8591aae8229e258a50813b66c32fb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/__ufunc_api.h @@ -0,0 +1,311 @@ + +#ifdef _UMATHMODULE + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ + (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *); +NPY_NO_EXPORT int PyUFunc_GenericFunction \ + (PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds), PyArrayObject **NPY_UNUSED(op)); +NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_g_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_G_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_gg_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_GG_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_On_Om \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **); +NPY_NO_EXPORT int PyUFunc_checkfperr \ + (int, PyObject *, int *); +NPY_NO_EXPORT void PyUFunc_clearfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_getfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_handlefperr \ + (int, PyObject *, int, int *); +NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ + (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *); +NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \ + (void **NPY_UNUSED(data), size_t NPY_UNUSED(i)); +NPY_NO_EXPORT void PyUFunc_e_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_ValidateCasting \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ + (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *); + +#else + +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) +extern void **PyUFunc_API; +#else +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +void **PyUFunc_API; +#else +static void **PyUFunc_API=NULL; +#endif +#endif + +#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) +#define PyUFunc_FromFuncAndData \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \ + PyUFunc_API[1]) +#define PyUFunc_RegisterLoopForType \ + (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \ + PyUFunc_API[2]) +#define PyUFunc_GenericFunction \ + (*(int (*)(PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds), PyArrayObject **NPY_UNUSED(op))) \ + PyUFunc_API[3]) +#define PyUFunc_f_f_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[4]) +#define PyUFunc_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[5]) +#define PyUFunc_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[6]) +#define PyUFunc_g_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[7]) +#define PyUFunc_F_F_As_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[8]) +#define PyUFunc_F_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[9]) +#define PyUFunc_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[10]) +#define PyUFunc_G_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[11]) +#define PyUFunc_O_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[12]) +#define PyUFunc_ff_f_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[13]) +#define PyUFunc_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[14]) +#define PyUFunc_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[15]) +#define PyUFunc_gg_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[16]) +#define PyUFunc_FF_F_As_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[17]) +#define PyUFunc_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[18]) +#define PyUFunc_FF_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[19]) +#define PyUFunc_GG_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[20]) +#define PyUFunc_OO_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[21]) +#define PyUFunc_O_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[22]) +#define PyUFunc_OO_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[23]) +#define PyUFunc_On_Om \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[24]) +#define PyUFunc_GetPyValues \ + (*(int (*)(char *, int *, int *, PyObject **)) \ + PyUFunc_API[25]) +#define PyUFunc_checkfperr \ + (*(int (*)(int, PyObject *, int *)) \ + PyUFunc_API[26]) +#define PyUFunc_clearfperr \ + (*(void (*)(void)) \ + PyUFunc_API[27]) +#define PyUFunc_getfperr \ + (*(int (*)(void)) \ + PyUFunc_API[28]) +#define PyUFunc_handlefperr \ + (*(int (*)(int, PyObject *, int, int *)) \ + PyUFunc_API[29]) +#define PyUFunc_ReplaceLoopBySignature \ + (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \ + PyUFunc_API[30]) +#define PyUFunc_FromFuncAndDataAndSignature \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \ + PyUFunc_API[31]) +#define PyUFunc_SetUsesArraysAsData \ + (*(int (*)(void **NPY_UNUSED(data), size_t NPY_UNUSED(i))) \ + PyUFunc_API[32]) +#define PyUFunc_e_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[33]) +#define PyUFunc_e_e_As_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[34]) +#define PyUFunc_e_e_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[35]) +#define PyUFunc_ee_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[36]) +#define PyUFunc_ee_e_As_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[37]) +#define PyUFunc_ee_e_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[38]) +#define PyUFunc_DefaultTypeResolver \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ + PyUFunc_API[39]) +#define PyUFunc_ValidateCasting \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \ + PyUFunc_API[40]) +#define PyUFunc_RegisterLoopForDescr \ + (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ + PyUFunc_API[41]) +#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \ + PyUFunc_API[42]) + +static NPY_INLINE int +_import_umath(void) +{ + PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + PyObject *c_api = NULL; + + if (numpy == NULL) { + PyErr_SetString(PyExc_ImportError, + "numpy.core._multiarray_umath failed to import"); + return -1; + } + c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyUFunc_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); + return -1; + } + return 0; +} + +#define import_umath() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + return NULL;\ + }\ + } while(0) + +#define import_umath1(ret) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + return ret;\ + }\ + } while(0) + +#define import_umath2(ret, msg) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError, msg);\ + return ret;\ + }\ + } while(0) + +#define import_ufunc() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy.core.umath failed to import");\ + }\ + } while(0) + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h new file mode 100644 index 0000000000000000000000000000000000000000..5c606a95e0055717d7de7ae5bed4b6b10962e39f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h @@ -0,0 +1,90 @@ +#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP +#error You should not include this header directly +#endif +/* + * Private API (here for inline) + */ +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); + +/* + * Update to next item of the iterator + * + * Note: this simply increment the coordinates vector, last dimension + * incremented first , i.e, for dimension 3 + * ... + * -1, -1, -1 + * -1, -1, 0 + * -1, -1, 1 + * .... + * -1, 0, -1 + * -1, 0, 0 + * .... + * 0, -1, -1 + * 0, -1, 0 + * .... + */ +#define _UPDATE_COORD_ITER(c) \ + wb = iter->coordinates[c] < iter->bounds[c][1]; \ + if (wb) { \ + iter->coordinates[c] += 1; \ + return 0; \ + } \ + else { \ + iter->coordinates[c] = iter->bounds[c][0]; \ + } + +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i, wb; + + for (i = iter->nd - 1; i >= 0; --i) { + _UPDATE_COORD_ITER(i) + } + + return 0; +} + +/* + * Version optimized for 2d arrays, manual loop unrolling + */ +static NPY_INLINE int +_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp wb; + + _UPDATE_COORD_ITER(1) + _UPDATE_COORD_ITER(0) + + return 0; +} +#undef _UPDATE_COORD_ITER + +/* + * Advance to the next neighbour + */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) +{ + _PyArrayNeighborhoodIter_IncrCoord (iter); + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} + +/* + * Reset functions + */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i; + + for (i = 0; i < iter->nd; ++i) { + iter->coordinates[i] = iter->bounds[i][0]; + } + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/_numpyconfig.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/_numpyconfig.h new file mode 100644 index 0000000000000000000000000000000000000000..5b7f4ceed95c6e9b0d46c24955cd9ca889207bc4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/_numpyconfig.h @@ -0,0 +1,29 @@ +#define NPY_SIZEOF_SHORT SIZEOF_SHORT +#define NPY_SIZEOF_INT SIZEOF_INT +#define NPY_SIZEOF_LONG SIZEOF_LONG +#define NPY_SIZEOF_FLOAT 4 +#define NPY_SIZEOF_COMPLEX_FLOAT 8 +#define NPY_SIZEOF_DOUBLE 8 +#define NPY_SIZEOF_COMPLEX_DOUBLE 16 +#define NPY_SIZEOF_LONGDOUBLE 8 +#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16 +#define NPY_SIZEOF_PY_INTPTR_T 8 +#define NPY_SIZEOF_OFF_T 4 +#define NPY_SIZEOF_PY_LONG_LONG 8 +#define NPY_SIZEOF_LONGLONG 8 +#define NPY_NO_SIGNAL 1 +#define NPY_NO_SMP 0 +#define NPY_HAVE_DECL_ISNAN +#define NPY_HAVE_DECL_ISINF +#define NPY_HAVE_DECL_SIGNBIT +#define NPY_HAVE_DECL_ISFINITE +#define NPY_USE_C99_COMPLEX 1 +#define NPY_RELAXED_STRIDES_CHECKING 1 +#define NPY_USE_C99_FORMATS 1 +#define NPY_VISIBILITY_HIDDEN +#define NPY_ABI_VERSION 0x01000009 +#define NPY_API_VERSION 0x0000000E + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/arrayobject.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/arrayobject.h new file mode 100644 index 0000000000000000000000000000000000000000..c5ea8366c86006e5b76161653267822839d84a57 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/arrayobject.h @@ -0,0 +1,11 @@ +#ifndef Py_ARRAYOBJECT_H +#define Py_ARRAYOBJECT_H + +#include "ndarrayobject.h" +#include "npy_interrupt.h" + +#ifdef NPY_NO_PREFIX +#include "noprefix.h" +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/arrayscalars.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/arrayscalars.h new file mode 100644 index 0000000000000000000000000000000000000000..e43d7dff99204d780d4db1c1caefc19828dcc2be --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/arrayscalars.h @@ -0,0 +1,182 @@ +#ifndef _NPY_ARRAYSCALARS_H_ +#define _NPY_ARRAYSCALARS_H_ + +#ifndef _MULTIARRAYMODULE +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; +#endif + + +typedef struct { + PyObject_HEAD + signed char obval; +} PyByteScalarObject; + + +typedef struct { + PyObject_HEAD + short obval; +} PyShortScalarObject; + + +typedef struct { + PyObject_HEAD + int obval; +} PyIntScalarObject; + + +typedef struct { + PyObject_HEAD + long obval; +} PyLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longlong obval; +} PyLongLongScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned char obval; +} PyUByteScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned short obval; +} PyUShortScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned int obval; +} PyUIntScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned long obval; +} PyULongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_ulonglong obval; +} PyULongLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_half obval; +} PyHalfScalarObject; + + +typedef struct { + PyObject_HEAD + float obval; +} PyFloatScalarObject; + + +typedef struct { + PyObject_HEAD + double obval; +} PyDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longdouble obval; +} PyLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cfloat obval; +} PyCFloatScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cdouble obval; +} PyCDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_clongdouble obval; +} PyCLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + PyObject * obval; +} PyObjectScalarObject; + +typedef struct { + PyObject_HEAD + npy_datetime obval; + PyArray_DatetimeMetaData obmeta; +} PyDatetimeScalarObject; + +typedef struct { + PyObject_HEAD + npy_timedelta obval; + PyArray_DatetimeMetaData obmeta; +} PyTimedeltaScalarObject; + + +typedef struct { + PyObject_HEAD + char obval; +} PyScalarObject; + +#define PyStringScalarObject PyBytesObject +typedef struct { + /* note that the PyObject_HEAD macro lives right here */ + PyUnicodeObject base; + Py_UCS4 *obval; + char *buffer_fmt; +} PyUnicodeScalarObject; + + +typedef struct { + PyObject_VAR_HEAD + char *obval; + PyArray_Descr *descr; + int flags; + PyObject *base; + void *_buffer_info; /* private buffer info, tagged to allow warning */ +} PyVoidScalarObject; + +/* Macros + PyScalarObject + PyArrType_Type + are defined in ndarrayobject.h +*/ + +#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) +#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) +#define PyArrayScalar_FromLong(i) \ + ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ + return Py_INCREF(PyArrayScalar_FromLong(i)), \ + PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_FALSE \ + return Py_INCREF(PyArrayScalar_False), \ + PyArrayScalar_False +#define PyArrayScalar_RETURN_TRUE \ + return Py_INCREF(PyArrayScalar_True), \ + PyArrayScalar_True + +#define PyArrayScalar_New(cls) \ + Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) +#define PyArrayScalar_VAL(obj, cls) \ + ((Py##cls##ScalarObject *)obj)->obval +#define PyArrayScalar_ASSIGN(obj, cls, val) \ + PyArrayScalar_VAL(obj, cls) = val + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/halffloat.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/halffloat.h new file mode 100644 index 0000000000000000000000000000000000000000..e69340b2aad8c5ec605287b0c2b31ebfc5c476e6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/halffloat.h @@ -0,0 +1,70 @@ +#ifndef __NPY_HALFFLOAT_H__ +#define __NPY_HALFFLOAT_H__ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Half-precision routines + */ + +/* Conversions */ +float npy_half_to_float(npy_half h); +double npy_half_to_double(npy_half h); +npy_half npy_float_to_half(float f); +npy_half npy_double_to_half(double d); +/* Comparisons */ +int npy_half_eq(npy_half h1, npy_half h2); +int npy_half_ne(npy_half h1, npy_half h2); +int npy_half_le(npy_half h1, npy_half h2); +int npy_half_lt(npy_half h1, npy_half h2); +int npy_half_ge(npy_half h1, npy_half h2); +int npy_half_gt(npy_half h1, npy_half h2); +/* faster *_nonan variants for when you know h1 and h2 are not NaN */ +int npy_half_eq_nonan(npy_half h1, npy_half h2); +int npy_half_lt_nonan(npy_half h1, npy_half h2); +int npy_half_le_nonan(npy_half h1, npy_half h2); +/* Miscellaneous functions */ +int npy_half_iszero(npy_half h); +int npy_half_isnan(npy_half h); +int npy_half_isinf(npy_half h); +int npy_half_isfinite(npy_half h); +int npy_half_signbit(npy_half h); +npy_half npy_half_copysign(npy_half x, npy_half y); +npy_half npy_half_spacing(npy_half h); +npy_half npy_half_nextafter(npy_half x, npy_half y); +npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus); + +/* + * Half-precision constants + */ + +#define NPY_HALF_ZERO (0x0000u) +#define NPY_HALF_PZERO (0x0000u) +#define NPY_HALF_NZERO (0x8000u) +#define NPY_HALF_ONE (0x3c00u) +#define NPY_HALF_NEGONE (0xbc00u) +#define NPY_HALF_PINF (0x7c00u) +#define NPY_HALF_NINF (0xfc00u) +#define NPY_HALF_NAN (0x7e00u) + +#define NPY_MAX_HALF (0x7bffu) + +/* + * Bit-level conversions + */ + +npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); +npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); +npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); +npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/libdivide/LICENSE.txt b/MLPY/Lib/site-packages/numpy/core/include/numpy/libdivide/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..375a7faa07019536b6b9aad67949fbfa5b4c12db --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/libdivide/LICENSE.txt @@ -0,0 +1,21 @@ + zlib License + ------------ + + Copyright (C) 2010 - 2019 ridiculous_fish, + Copyright (C) 2016 - 2019 Kim Walisch, + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/libdivide/libdivide.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/libdivide/libdivide.h new file mode 100644 index 0000000000000000000000000000000000000000..9da7c9035ed8b3dc23520f68e3aa08b8ca4f2302 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/libdivide/libdivide.h @@ -0,0 +1,2079 @@ +// libdivide.h - Optimized integer division +// https://libdivide.com +// +// Copyright (C) 2010 - 2019 ridiculous_fish, +// Copyright (C) 2016 - 2019 Kim Walisch, +// +// libdivide is dual-licensed under the Boost or zlib licenses. +// You may use libdivide under the terms of either of these. +// See LICENSE.txt for more details. + +#ifndef LIBDIVIDE_H +#define LIBDIVIDE_H + +#define LIBDIVIDE_VERSION "3.0" +#define LIBDIVIDE_VERSION_MAJOR 3 +#define LIBDIVIDE_VERSION_MINOR 0 + +#include + +#if defined(__cplusplus) + #include + #include + #include +#else + #include + #include +#endif + +#if defined(LIBDIVIDE_AVX512) + #include +#elif defined(LIBDIVIDE_AVX2) + #include +#elif defined(LIBDIVIDE_SSE2) + #include +#endif + +#if defined(_MSC_VER) + #include + // disable warning C4146: unary minus operator applied + // to unsigned type, result still unsigned + #pragma warning(disable: 4146) + #define LIBDIVIDE_VC +#endif + +#if !defined(__has_builtin) + #define __has_builtin(x) 0 +#endif + +#if defined(__SIZEOF_INT128__) + #define HAS_INT128_T + // clang-cl on Windows does not yet support 128-bit division + #if !(defined(__clang__) && defined(LIBDIVIDE_VC)) + #define HAS_INT128_DIV + #endif +#endif + +#if defined(__x86_64__) || defined(_M_X64) + #define LIBDIVIDE_X86_64 +#endif + +#if defined(__i386__) + #define LIBDIVIDE_i386 +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define LIBDIVIDE_GCC_STYLE_ASM +#endif + +#if defined(__cplusplus) || defined(LIBDIVIDE_VC) + #define LIBDIVIDE_FUNCTION __FUNCTION__ +#else + #define LIBDIVIDE_FUNCTION __func__ +#endif + +#define LIBDIVIDE_ERROR(msg) \ + do { \ + fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, msg); \ + abort(); \ + } while (0) + +#if defined(LIBDIVIDE_ASSERTIONS_ON) + #define LIBDIVIDE_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, #x); \ + abort(); \ + } \ + } while (0) +#else + #define LIBDIVIDE_ASSERT(x) +#endif + +#ifdef __cplusplus +namespace libdivide { +#endif + +// pack divider structs to prevent compilers from padding. +// This reduces memory usage by up to 43% when using a large +// array of libdivide dividers and improves performance +// by up to 10% because of reduced memory bandwidth. +#pragma pack(push, 1) + +struct libdivide_u32_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_t { + int64_t magic; + uint8_t more; +}; + +struct libdivide_u32_branchfree_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_branchfree_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_branchfree_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_branchfree_t { + int64_t magic; + uint8_t more; +}; + +#pragma pack(pop) + +// Explanation of the "more" field: +// +// * Bits 0-5 is the shift value (for shift path or mult path). +// * Bit 6 is the add indicator for mult path. +// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative +// divisor indicator so that we can efficiently use sign extension to +// create a bitmask with all bits set to 1 (if the divisor is negative) +// or 0 (if the divisor is positive). +// +// u32: [0-4] shift value +// [5] ignored +// [6] add indicator +// magic number of 0 indicates shift path +// +// s32: [0-4] shift value +// [5] ignored +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// u64: [0-5] shift value +// [6] add indicator +// magic number of 0 indicates shift path +// +// s64: [0-5] shift value +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// In s32 and s64 branchfree modes, the magic number is negated according to +// whether the divisor is negated. In branchfree strategy, it is not negated. + +enum { + LIBDIVIDE_32_SHIFT_MASK = 0x1F, + LIBDIVIDE_64_SHIFT_MASK = 0x3F, + LIBDIVIDE_ADD_MARKER = 0x40, + LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 +}; + +static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d); +static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d); +static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d); +static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d); + +static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d); +static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d); +static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d); +static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d); + +static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom); + +static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) { + uint64_t xl = x, yl = y; + uint64_t rl = xl * yl; + return (uint32_t)(rl >> 32); +} + +static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) { + int64_t xl = x, yl = y; + int64_t rl = xl * yl; + // needs to be arithmetic shift + return (int32_t)(rl >> 32); +} + +static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __umulh(x, y); +#elif defined(HAS_INT128_T) + __uint128_t xl = x, yl = y; + __uint128_t rl = xl * yl; + return (uint64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t y0 = (uint32_t)(y & mask); + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + uint64_t x0y1 = x0 * (uint64_t)y1; + uint64_t x1y0 = x1 * (uint64_t)y0; + uint64_t x1y1 = x1 * (uint64_t)y1; + uint64_t temp = x1y0 + x0y0_hi; + uint64_t temp_lo = temp & mask; + uint64_t temp_hi = temp >> 32; + + return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); +#endif +} + +static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __mulh(x, y); +#elif defined(HAS_INT128_T) + __int128_t xl = x, yl = y; + __int128_t rl = xl * yl; + return (int64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t y0 = (uint32_t)(y & mask); + int32_t x1 = (int32_t)(x >> 32); + int32_t y1 = (int32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + int64_t t = x1 * (int64_t)y0 + x0y0_hi; + int64_t w1 = x0 * (int64_t)y1 + (t & mask); + + return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32); +#endif +} + +static inline int32_t libdivide_count_leading_zeros32(uint32_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clz) + // Fast way to count leading zeros + return __builtin_clz(val); +#elif defined(LIBDIVIDE_VC) + unsigned long result; + if (_BitScanReverse(&result, val)) { + return 31 - result; + } + return 0; +#else + if (val == 0) + return 32; + int32_t result = 8; + uint32_t hi = 0xFFU << 24; + while ((val & hi) == 0) { + hi >>= 8; + result += 8; + } + while (val & hi) { + result -= 1; + hi <<= 1; + } + return result; +#endif +} + +static inline int32_t libdivide_count_leading_zeros64(uint64_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clzll) + // Fast way to count leading zeros + return __builtin_clzll(val); +#elif defined(LIBDIVIDE_VC) && defined(_WIN64) + unsigned long result; + if (_BitScanReverse64(&result, val)) { + return 63 - result; + } + return 0; +#else + uint32_t hi = val >> 32; + uint32_t lo = val & 0xFFFFFFFF; + if (hi != 0) return libdivide_count_leading_zeros32(hi); + return 32 + libdivide_count_leading_zeros32(lo); +#endif +} + +// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit +// uint {v}. The result must fit in 32 bits. +// Returns the quotient directly and the remainder in *r +static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint32_t result; + __asm__("divl %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#else + uint64_t n = ((uint64_t)u1 << 32) | u0; + uint32_t result = (uint32_t)(n / v); + *r = (uint32_t)(n - result * (uint64_t)v); + return result; +#endif +} + +// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit +// uint {v}. The result must fit in 64 bits. +// Returns the quotient directly and the remainder in *r +static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { +#if defined(LIBDIVIDE_X86_64) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint64_t result; + __asm__("divq %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#elif defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t n = ((__uint128_t)u1 << 64) | u0; + uint64_t result = (uint64_t)(n / v); + *r = (uint64_t)(n - result * (__uint128_t)v); + return result; +#else + // Code taken from Hacker's Delight: + // http://www.hackersdelight.org/HDcode/divlu.c. + // License permits inclusion here per: + // http://www.hackersdelight.org/permissions.htm + + const uint64_t b = (1ULL << 32); // Number base (32 bits) + uint64_t un1, un0; // Norm. dividend LSD's + uint64_t vn1, vn0; // Norm. divisor digits + uint64_t q1, q0; // Quotient digits + uint64_t un64, un21, un10; // Dividend digit pairs + uint64_t rhat; // A remainder + int32_t s; // Shift amount for norm + + // If overflow, set rem. to an impossible value, + // and return the largest possible quotient + if (u1 >= v) { + *r = (uint64_t) -1; + return (uint64_t) -1; + } + + // count leading zeros + s = libdivide_count_leading_zeros64(v); + if (s > 0) { + // Normalize divisor + v = v << s; + un64 = (u1 << s) | (u0 >> (64 - s)); + un10 = u0 << s; // Shift dividend left + } else { + // Avoid undefined behavior of (u0 >> 64). + // The behavior is undefined if the right operand is + // negative, or greater than or equal to the length + // in bits of the promoted left operand. + un64 = u1; + un10 = u0; + } + + // Break divisor up into two 32-bit digits + vn1 = v >> 32; + vn0 = v & 0xFFFFFFFF; + + // Break right half of dividend into two digits + un1 = un10 >> 32; + un0 = un10 & 0xFFFFFFFF; + + // Compute the first quotient digit, q1 + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + while (q1 >= b || q1 * vn0 > b * rhat + un1) { + q1 = q1 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + // Multiply and subtract + un21 = un64 * b + un1 - q1 * v; + + // Compute the second quotient digit + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= b || q0 * vn0 > b * rhat + un0) { + q0 = q0 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +#endif +} + +// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0) +static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) { + if (signed_shift > 0) { + uint32_t shift = signed_shift; + *u1 <<= shift; + *u1 |= *u0 >> (64 - shift); + *u0 <<= shift; + } + else if (signed_shift < 0) { + uint32_t shift = -signed_shift; + *u0 >>= shift; + *u0 |= *u1 << (64 - shift); + *u1 >>= shift; + } +} + +// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder. +static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) { +#if defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t ufull = u_hi; + __uint128_t vfull = v_hi; + ufull = (ufull << 64) | u_lo; + vfull = (vfull << 64) | v_lo; + uint64_t res = (uint64_t)(ufull / vfull); + __uint128_t remainder = ufull - (vfull * res); + *r_lo = (uint64_t)remainder; + *r_hi = (uint64_t)(remainder >> 64); + return res; +#else + // Adapted from "Unsigned Doubleword Division" in Hacker's Delight + // We want to compute u / v + typedef struct { uint64_t hi; uint64_t lo; } u128_t; + u128_t u = {u_hi, u_lo}; + u128_t v = {v_hi, v_lo}; + + if (v.hi == 0) { + // divisor v is a 64 bit value, so we just need one 128/64 division + // Note that we are simpler than Hacker's Delight here, because we know + // the quotient fits in 64 bits whereas Hacker's Delight demands a full + // 128 bit quotient + *r_hi = 0; + return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo); + } + // Here v >= 2**64 + // We know that v.hi != 0, so count leading zeros is OK + // We have 0 <= n <= 63 + uint32_t n = libdivide_count_leading_zeros64(v.hi); + + // Normalize the divisor so its MSB is 1 + u128_t v1t = v; + libdivide_u128_shift(&v1t.hi, &v1t.lo, n); + uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64 + + // To ensure no overflow + u128_t u1 = u; + libdivide_u128_shift(&u1.hi, &u1.lo, -1); + + // Get quotient from divide unsigned insn. + uint64_t rem_ignored; + uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored); + + // Undo normalization and division of u by 2. + u128_t q0 = {0, q1}; + libdivide_u128_shift(&q0.hi, &q0.lo, n); + libdivide_u128_shift(&q0.hi, &q0.lo, -63); + + // Make q0 correct or too small by 1 + // Equivalent to `if (q0 != 0) q0 = q0 - 1;` + if (q0.hi != 0 || q0.lo != 0) { + q0.hi -= (q0.lo == 0); // borrow + q0.lo -= 1; + } + + // Now q0 is correct. + // Compute q0 * v as q0v + // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo) + // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) + + // (q0.lo * v.hi << 64) + q0.lo * v.lo) + // Each term is 128 bit + // High half of full product (upper 128 bits!) are dropped + u128_t q0v = {0, 0}; + q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo); + q0v.lo = q0.lo*v.lo; + + // Compute u - q0v as u_q0v + // This is the remainder + u128_t u_q0v = u; + u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow + u_q0v.lo -= q0v.lo; + + // Check if u_q0v >= v + // This checks if our remainder is larger than the divisor + if ((u_q0v.hi > v.hi) || + (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) { + // Increment q0 + q0.lo += 1; + q0.hi += (q0.lo == 0); // carry + + // Subtract v from remainder + u_q0v.hi -= v.hi + (u_q0v.lo < v.lo); + u_q0v.lo -= v.lo; + } + + *r_hi = u_q0v.hi; + *r_lo = u_q0v.lo; + + LIBDIVIDE_ASSERT(q0.hi == 0); + return q0.lo; +#endif +} + +////////// UINT32 + +static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u32_t result; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint8_t more; + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint32_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && (e < (1U << floor_log_2_d))) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 33-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases. + } + return result; +} + +struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { + return libdivide_internal_u32_gen(d, 0); +} + +struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1); + struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)}; + return ret; +} + +uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint32_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_32_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + uint32_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(32 + shift) + // Therefore we have d = 2^(32 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint32_t hi_dividend = 1U << shift; + uint32_t rem_ignored; + return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << (shift + 1); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +/////////// UINT64 + +static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u64_t result; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint64_t proposed_m, rem; + uint8_t more; + // (1 << (64 + floor_log_2_d)) / d + proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint64_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 65-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases, + // which is why we do it outside of the if statement. + } + return result; +} + +struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { + return libdivide_internal_u64_gen(d, 0); +} + +struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1); + struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)}; + return ret; +} + +uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint64_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_64_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + uint64_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(64 + shift) + // Therefore we have d = 2^(64 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint64_t hi_dividend = 1ULL << shift; + uint64_t rem_ignored; + return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << (shift + 1); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +/////////// SINT32 + +static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s32_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint32_t ud = (uint32_t)d; + uint32_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and normal paths are exactly the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + LIBDIVIDE_ASSERT(floor_log_2_d >= 1); + + uint8_t more; + // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); + const uint32_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1U << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + + proposed_m += 1; + int32_t magic = (int32_t)proposed_m; + + // Mark if we are negative. Note we only negate the magic number in the + // branchfull case. + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s32_t libdivide_s32_gen(int32_t d) { + return libdivide_internal_s32_gen(d, 0); +} + +struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) { + struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1); + struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more}; + return result; +} + +int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + uint32_t sign = (int8_t)more >> 7; + uint32_t mask = (1U << shift) - 1; + uint32_t uq = numer + ((numer >> 31) & mask); + int32_t q = (int32_t)uq; + q >>= shift; + q = (q ^ sign) - sign; + return q; + } else { + uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint32_t)numer ^ sign) - sign; + } + int32_t q = (int32_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + int32_t magic = denom->magic; + int32_t q = libdivide_mullhi_s32(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + uint32_t q_sign = (uint32_t)(q >> 31); + q += q_sign & ((1U << shift) - is_power_of_2); + + // Now arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + if (!denom->magic) { + uint32_t absD = 1U << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int32_t)absD; + } else { + // Unsigned math is much easier + // We negate the magic number only in the branchfull case, and we don't + // know which case we're in. However we have enough information to + // determine the correct sign of the magic number. The divisor was + // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set, + // the magic number's sign is opposite that of the divisor. + // We want to compute the positive magic number. + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + // Handle the power of 2 case (including branchfree) + if (denom->magic == 0) { + int32_t result = 1U << shift; + return negative_divisor ? -result : result; + } + + uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30 + uint32_t q = (uint32_t)(n / d); + int32_t result = (int32_t)q; + result += 1; + return negative_divisor ? -result : result; + } +} + +int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) { + return libdivide_s32_recover((const struct libdivide_s32_t *)denom); +} + +///////////// SINT64 + +static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s64_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint64_t ud = (uint64_t)d; + uint64_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and non-branchfree cases are the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint8_t more; + uint64_t rem, proposed_m; + proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem); + const uint64_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we + // also set ADD_MARKER this is an annoying optimization that + // enables algorithm #4 to avoid the mask. However we always set it + // in the branchfree case + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + proposed_m += 1; + int64_t magic = (int64_t)proposed_m; + + // Mark if we are negative + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s64_t libdivide_s64_gen(int64_t d) { + return libdivide_internal_s64_gen(d, 0); +} + +struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) { + struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1); + struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more}; + return ret; +} + +int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { // shift path + uint64_t mask = (1ULL << shift) - 1; + uint64_t uq = numer + ((numer >> 63) & mask); + int64_t q = (int64_t)uq; + q >>= shift; + // must be arithmetic shift and then sign-extend + int64_t sign = (int8_t)more >> 7; + q = (q ^ sign) - sign; + return q; + } else { + uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint64_t)numer ^ sign) - sign; + } + int64_t q = (int64_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + int64_t magic = denom->magic; + int64_t q = libdivide_mullhi_s64(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2. + uint64_t is_power_of_2 = (magic == 0); + uint64_t q_sign = (uint64_t)(q >> 63); + q += q_sign & ((1ULL << shift) - is_power_of_2); + + // Arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + if (denom->magic == 0) { // shift path + uint64_t absD = 1ULL << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int64_t)absD; + } else { + // Unsigned math is much easier + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n_hi = 1ULL << shift, n_lo = 0; + uint64_t rem_ignored; + uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored); + int64_t result = (int64_t)(q + 1); + if (negative_divisor) { + result = -result; + } + return result; + } +} + +int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) { + return libdivide_s64_recover((const struct libdivide_s64_t *)denom); +} + +#if defined(LIBDIVIDE_AVX512) + +static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom); +static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom); +static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom); +static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom); + +static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline __m512i libdivide_s64_signbits(__m512i v) {; + return _mm512_srai_epi64(v, 63); +} + +static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) { + return _mm512_srai_epi64(v, amt); +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) { + __m512i lomask = _mm512_set1_epi64(0xffffffff); + __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1); + __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1); + __m512i w0 = _mm512_mul_epu32(x, y); + __m512i w1 = _mm512_mul_epu32(x, yh); + __m512i w2 = _mm512_mul_epu32(xh, y); + __m512i w3 = _mm512_mul_epu32(xh, yh); + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) { + __m512i p = libdivide_mullhi_u64_vector(x, y); + __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y); + __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x); + p = _mm512_sub_epi64(p, t1); + p = _mm512_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi32(numers, more); + } + else { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, shift); + } + else { + return _mm512_srli_epi32(q, more); + } + } +} + +__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi64(numers, more); + } + else { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, shift); + } + else { + return _mm512_srli_epi64(q, more); + } + } +} + +__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm512_srai_epi32(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= shift + q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic)); + q = _mm512_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31 + __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2); + q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm512_srai_epi32(q, shift); // q >>= shift + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi64(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + q = _mm512_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2); + q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_AVX2) + +static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom); +static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom); +static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom); +static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom); + +static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm256_srai_epi64(v, 63) (from AVX512). +static inline __m256i libdivide_s64_signbits(__m256i v) { + __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm256_srai_epi64 (from AVX512). +static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) { + const int b = 64 - amt; + __m256i m = _mm256_set1_epi64x(1ULL << (b - 1)); + __m256i x = _mm256_srli_epi64(v, amt); + __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) { + __m256i lomask = _mm256_set1_epi64x(0xffffffff); + __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) { + __m256i p = libdivide_mullhi_u64_vector(x, y); + __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y); + __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x); + p = _mm256_sub_epi64(p, t1); + p = _mm256_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi32(numers, more); + } + else { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, shift); + } + else { + return _mm256_srli_epi32(q, more); + } + } +} + +__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi64(numers, more); + } + else { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, shift); + } + else { + return _mm256_srli_epi64(q, more); + } + } +} + +__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm256_srai_epi32(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= shift + q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic)); + q = _mm256_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31 + __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2); + q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm256_srai_epi32(q, shift); // q >>= shift + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + q = _mm256_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_SSE2) + +static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom); +static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom); +static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom); +static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom); + +static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm_srai_epi64(v, 63) (from AVX512). +static inline __m128i libdivide_s64_signbits(__m128i v) { + __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm_srai_epi64 (from AVX512). +static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { + const int b = 64 - amt; + __m128i m = _mm_set1_epi64x(1ULL << (b - 1)); + __m128i x = _mm_srli_epi64(v, amt); + __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) { + __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); + __m128i a1X3X = _mm_srli_epi64(a, 32); + __m128i mask = _mm_set_epi32(-1, 0, -1, 0); + __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask); + return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// SSE2 does not have a signed multiplication instruction, but we can convert +// unsigned to signed pretty efficiently. Again, b is just a 32 bit value +// repeated four times. +static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) { + __m128i p = libdivide_mullhi_u32_vector(a, b); + // t1 = (a >> 31) & y, arithmetic shift + __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); + __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); + p = _mm_sub_epi32(p, t1); + p = _mm_sub_epi32(p, t2); + return p; +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) { + __m128i lomask = _mm_set1_epi64x(0xffffffff); + __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) { + __m128i p = libdivide_mullhi_u64_vector(x, y); + __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); + __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); + p = _mm_sub_epi64(p, t1); + p = _mm_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi32(numers, more); + } + else { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, shift); + } + else { + return _mm_srli_epi32(q, more); + } + } +} + +__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi64(numers, more); + } + else { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, shift); + } + else { + return _mm_srli_epi64(q, more); + } + } +} + +__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm_srai_epi32(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); + } + // q >>= shift + q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic)); + q = _mm_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31 + __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2); + q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm_srai_epi32(q, shift); // q >>= shift + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + q = _mm_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#endif + +/////////// C++ stuff + +#ifdef __cplusplus + +// The C++ divider class is templated on both an integer type +// (like uint64_t) and an algorithm type. +// * BRANCHFULL is the default algorithm type. +// * BRANCHFREE is the branchfree algorithm type. +enum { + BRANCHFULL, + BRANCHFREE +}; + +#if defined(LIBDIVIDE_AVX512) + #define LIBDIVIDE_VECTOR_TYPE __m512i +#elif defined(LIBDIVIDE_AVX2) + #define LIBDIVIDE_VECTOR_TYPE __m256i +#elif defined(LIBDIVIDE_SSE2) + #define LIBDIVIDE_VECTOR_TYPE __m128i +#endif + +#if !defined(LIBDIVIDE_VECTOR_TYPE) + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) +#else + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \ + return libdivide_##ALGO##_do_vector(n, &denom); \ + } +#endif + +// The DISPATCHER_GEN() macro generates C++ methods (for the given integer +// and algorithm types) that redirect to libdivide's C API. +#define DISPATCHER_GEN(T, ALGO) \ + libdivide_##ALGO##_t denom; \ + dispatcher() { } \ + dispatcher(T d) \ + : denom(libdivide_##ALGO##_gen(d)) \ + { } \ + T divide(T n) const { \ + return libdivide_##ALGO##_do(n, &denom); \ + } \ + LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + T recover() const { \ + return libdivide_##ALGO##_recover(&denom); \ + } + +// The dispatcher selects a specific division algorithm for a given +// type and ALGO using partial template specialization. +template struct dispatcher { }; + +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) }; +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) }; + +// This is the main divider class for use by the user (C++ API). +// The actual division algorithm is selected using the dispatcher struct +// based on the integer and algorithm template parameters. +template +class divider { +public: + // We leave the default constructor empty so that creating + // an array of dividers and then initializing them + // later doesn't slow us down. + divider() { } + + // Constructor that takes the divisor as a parameter + divider(T d) : div(d) { } + + // Divides n by the divisor + T divide(T n) const { + return div.divide(n); + } + + // Recovers the divisor, returns the value that was + // used to initialize this divider object. + T recover() const { + return div.recover(); + } + + bool operator==(const divider& other) const { + return div.denom.magic == other.denom.magic && + div.denom.more == other.denom.more; + } + + bool operator!=(const divider& other) const { + return !(*this == other); + } + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Treats the vector as packed integer values with the same type as + // the divider (e.g. s32, u32, s64, u64) and divides each of + // them by the divider, returning the packed quotients. + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { + return div.divide(n); + } +#endif + +private: + // Storage for the actual divisor + dispatcher::value, + std::is_signed::value, sizeof(T), ALGO> div; +}; + +// Overload of operator / for scalar division +template +T operator/(T n, const divider& div) { + return div.divide(n); +} + +// Overload of operator /= for scalar division +template +T& operator/=(T& n, const divider& div) { + n = div.divide(n); + return n; +} + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Overload of operator / for vector division + template + LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) { + return div.divide(n); + } + // Overload of operator /= for vector division + template + LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) { + n = div.divide(n); + return n; + } +#endif + +// libdivdie::branchfree_divider +template +using branchfree_divider = divider; + +} // namespace libdivide + +#endif // __cplusplus + +#endif // LIBDIVIDE_H diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/multiarray_api.txt b/MLPY/Lib/site-packages/numpy/core/include/numpy/multiarray_api.txt new file mode 100644 index 0000000000000000000000000000000000000000..9364f892007de9c4e434e2ba45397574b01829b1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/multiarray_api.txt @@ -0,0 +1,2483 @@ + +=========== +NumPy C-API +=========== +:: + + unsigned int + PyArray_GetNDArrayCVersion(void ) + + +Included at the very first so not auto-grabbed and thus not labeled. + +:: + + int + PyArray_SetNumericOps(PyObject *dict) + +Set internal structure with number functions that all arrays will use + +:: + + PyObject * + PyArray_GetNumericOps(void ) + +Get dictionary showing number functions that all arrays will use + +:: + + int + PyArray_INCREF(PyArrayObject *mp) + +For object arrays, increment all internal references. + +:: + + int + PyArray_XDECREF(PyArrayObject *mp) + +Decrement all internal references for object arrays. +(or arrays with object fields) + +:: + + void + PyArray_SetStringFunction(PyObject *op, int repr) + +Set the array print function to be a Python function. + +:: + + PyArray_Descr * + PyArray_DescrFromType(int type) + +Get the PyArray_Descr structure for a type. + +:: + + PyObject * + PyArray_TypeObjectFromType(int type) + +Get a typeobject from a type-number -- can return NULL. + +New reference + +:: + + char * + PyArray_Zero(PyArrayObject *arr) + +Get pointer to zero of correct type for array. + +:: + + char * + PyArray_One(PyArrayObject *arr) + +Get pointer to one of correct type for array + +:: + + PyObject * + PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int + is_f_order) + +For backward compatibility + +Cast an array using typecode structure. +steals reference to dtype --- cannot be NULL + +This function always makes a copy of arr, even if the dtype +doesn't change. + +:: + + int + PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp) + +Cast to an already created array. + +:: + + int + PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) + +Cast to an already created array. Arrays don't have to be "broadcastable" +Only requirement is they have the same number of elements. + +:: + + int + PyArray_CanCastSafely(int fromtype, int totype) + +Check the type coercion rules. + +:: + + npy_bool + PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) + +leaves reference count alone --- cannot be NULL + +PyArray_CanCastTypeTo is equivalent to this, but adds a 'casting' +parameter. + +:: + + int + PyArray_ObjectType(PyObject *op, int minimum_type) + +Return the typecode of the array a Python object would be converted to + +Returns the type number the result should have, or NPY_NOTYPE on error. + +:: + + PyArray_Descr * + PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) + +new reference -- accepts NULL for mintype + +:: + + PyArrayObject ** + PyArray_ConvertToCommonType(PyObject *op, int *retn) + + +This function is only used in one place within NumPy and should +generally be avoided. It is provided mainly for backward compatibility. + +The user of the function has to free the returned array. + +:: + + PyArray_Descr * + PyArray_DescrFromScalar(PyObject *sc) + +Return descr object from array scalar. + +New reference + +:: + + PyArray_Descr * + PyArray_DescrFromTypeObject(PyObject *type) + + +:: + + npy_intp + PyArray_Size(PyObject *op) + +Compute the size of an array (in number of items) + +:: + + PyObject * + PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) + +Get scalar-equivalent to a region of memory described by a descriptor. + +:: + + PyObject * + PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) + +Get 0-dim array from scalar + +0-dim array from array-scalar object +always contains a copy of the data +unless outcode is NULL, it is of void type and the referrer does +not own it either. + +steals reference to outcode + +:: + + void + PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) + +Convert to c-type + +no error checking is performed -- ctypeptr must be same type as scalar +in case of flexible type, the data is not copied +into ctypeptr which is expected to be a pointer to pointer + +:: + + int + PyArray_CastScalarToCtype(PyObject *scalar, void + *ctypeptr, PyArray_Descr *outcode) + +Cast Scalar to c-type + +The output buffer must be large-enough to receive the value +Even for flexible types which is different from ScalarAsCtype +where only a reference for flexible types is returned + +This may not work right on narrow builds for NumPy unicode scalars. + +:: + + int + PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr + *indescr, void *ctypeptr, int outtype) + +Cast Scalar to c-type + +:: + + PyObject * + PyArray_ScalarFromObject(PyObject *object) + +Get an Array Scalar From a Python Object + +Returns NULL if unsuccessful but error is only set if another error occurred. +Currently only Numeric-like object supported. + +:: + + PyArray_VectorUnaryFunc * + PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) + +Get a cast function to cast from the input descriptor to the +output type_number (must be a registered data-type). +Returns NULL if un-successful. + +:: + + PyObject * + PyArray_FromDims(int NPY_UNUSED(nd) , int *NPY_UNUSED(d) , int + NPY_UNUSED(type) ) + +Deprecated, use PyArray_SimpleNew instead. + +:: + + PyObject * + PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd) , int + *NPY_UNUSED(d) , PyArray_Descr + *descr, char *NPY_UNUSED(data) ) + +Deprecated, use PyArray_NewFromDescr instead. + +:: + + PyObject * + PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int + min_depth, int max_depth, int flags, PyObject + *context) + +Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags +Steals a reference to newtype --- which can be NULL + +:: + + PyObject * + PyArray_EnsureArray(PyObject *op) + +This is a quick wrapper around +PyArray_FromAny(op, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL) +that special cases Arrays and PyArray_Scalars up front +It *steals a reference* to the object +It also guarantees that the result is PyArray_Type +Because it decrefs op if any conversion needs to take place +so it can be used like PyArray_EnsureArray(some_function(...)) + +:: + + PyObject * + PyArray_EnsureAnyArray(PyObject *op) + + +:: + + PyObject * + PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char + *sep) + + +Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an +array corresponding to the data encoded in that file. + +The reference to `dtype` is stolen (it is possible that the passed in +dtype is not held on to). + +The number of elements to read is given as ``num``; if it is < 0, then +then as many as possible are read. + +If ``sep`` is NULL or empty, then binary data is assumed, else +text data, with ``sep`` as the separator between elements. Whitespace in +the separator matches any length of whitespace in the text, and a match +for whitespace around the separator is added. + +For memory-mapped files, use the buffer interface. No more data than +necessary is read by this routine. + +:: + + PyObject * + PyArray_FromString(char *data, npy_intp slen, PyArray_Descr + *dtype, npy_intp num, char *sep) + + +Given a pointer to a string ``data``, a string length ``slen``, and +a ``PyArray_Descr``, return an array corresponding to the data +encoded in that string. + +If the dtype is NULL, the default array type is used (double). +If non-null, the reference is stolen. + +If ``slen`` is < 0, then the end of string is used for text data. +It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs +would be the norm). + +The number of elements to read is given as ``num``; if it is < 0, then +then as many as possible are read. + +If ``sep`` is NULL or empty, then binary data is assumed, else +text data, with ``sep`` as the separator between elements. Whitespace in +the separator matches any length of whitespace in the text, and a match +for whitespace around the separator is added. + +:: + + PyObject * + PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, npy_intp + count, npy_intp offset) + + +:: + + PyObject * + PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) + + +steals a reference to dtype (which cannot be NULL) + +:: + + PyObject * + PyArray_Return(PyArrayObject *mp) + + +Return either an array or the appropriate Python object if the array +is 0d and matches a Python type. +steals reference to mp + +:: + + PyObject * + PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int + offset) + +Get a subset of bytes from each element of the array +steals reference to typed, must not be NULL + +:: + + int + PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int + offset, PyObject *val) + +Set a subset of bytes from each element of the array +steals reference to dtype, must not be NULL + +:: + + PyObject * + PyArray_Byteswap(PyArrayObject *self, npy_bool inplace) + + +:: + + PyObject * + PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int + refcheck, NPY_ORDER NPY_UNUSED(order) ) + +Resize (reallocate data). Only works if nothing else is referencing this +array and it is contiguous. If refcheck is 0, then the reference count is +not checked and assumed to be 1. You still must own this data and have no +weak-references and no base object. + +:: + + int + PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src) + +Move the memory of one array into another, allowing for overlapping data. + +Returns 0 on success, negative on failure. + +:: + + int + PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src) + +Copy an Array into another array. +Broadcast to the destination shape if necessary. + +Returns 0 on success, -1 on failure. + +:: + + int + PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src) + +Copy an Array into another array -- memory must not overlap +Does not require src and dest to have "broadcastable" shapes +(only the same number of elements). + +TODO: For NumPy 2.0, this could accept an order parameter which +only allows NPY_CORDER and NPY_FORDER. Could also rename +this to CopyAsFlat to make the name more intuitive. + +Returns 0 on success, -1 on error. + +:: + + int + PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) + + +:: + + PyObject * + PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order) + +Copy an array. + +:: + + PyObject * + PyArray_ToList(PyArrayObject *self) + +To List + +:: + + PyObject * + PyArray_ToString(PyArrayObject *self, NPY_ORDER order) + + +:: + + int + PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) + +To File + +:: + + int + PyArray_Dump(PyObject *self, PyObject *file, int protocol) + + +:: + + PyObject * + PyArray_Dumps(PyObject *self, int protocol) + + +:: + + int + PyArray_ValidType(int type) + +Is the typenum valid? + +:: + + void + PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) + +Update Several Flags at once. + +:: + + PyObject * + PyArray_New(PyTypeObject *subtype, int nd, npy_intp const *dims, int + type_num, npy_intp const *strides, void *data, int + itemsize, int flags, PyObject *obj) + +Generic new array creation routine. + +:: + + PyObject * + PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int + nd, npy_intp const *dims, npy_intp const + *strides, void *data, int flags, PyObject *obj) + +Generic new array creation routine. + +steals a reference to descr. On failure or when dtype->subarray is +true, dtype will be decrefed. + +:: + + PyArray_Descr * + PyArray_DescrNew(PyArray_Descr *base) + +base cannot be NULL + +:: + + PyArray_Descr * + PyArray_DescrNewFromType(int type_num) + + +:: + + double + PyArray_GetPriority(PyObject *obj, double default_) + +Get Priority from object + +:: + + PyObject * + PyArray_IterNew(PyObject *obj) + +Get Iterator. + +:: + + PyObject* + PyArray_MultiIterNew(int n, ... ) + +Get MultiIterator, + +:: + + int + PyArray_PyIntAsInt(PyObject *o) + + +:: + + npy_intp + PyArray_PyIntAsIntp(PyObject *o) + + +:: + + int + PyArray_Broadcast(PyArrayMultiIterObject *mit) + + +:: + + void + PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) + +Assumes contiguous + +:: + + int + PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) + + +:: + + npy_bool + PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp + offset, npy_intp const *dims, npy_intp const + *newstrides) + + +:: + + PyArray_Descr * + PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) + + +returns a copy of the PyArray_Descr structure with the byteorder +altered: +no arguments: The byteorder is swapped (in all subfields as well) +single argument: The byteorder is forced to the given state +(in all subfields as well) + +Valid states: ('big', '>') or ('little' or '<') +('native', or '=') + +If a descr structure with | is encountered it's own +byte-order is not changed but any fields are: + + +Deep bytorder change of a data-type descriptor +Leaves reference count of self unchanged --- does not DECREF self *** + +:: + + PyObject * + PyArray_IterAllButAxis(PyObject *obj, int *inaxis) + +Get Iterator that iterates over all but one axis (don't use this with +PyArray_ITER_GOTO1D). The axis will be over-written if negative +with the axis having the smallest stride. + +:: + + PyObject * + PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int + min_depth, int max_depth, int requires, PyObject + *context) + +steals a reference to descr -- accepts NULL + +:: + + PyObject * + PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int + flags) + +steals reference to newtype --- acc. NULL + +:: + + PyObject * + PyArray_FromInterface(PyObject *origin) + + +:: + + PyObject * + PyArray_FromStructInterface(PyObject *input) + + +:: + + PyObject * + PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject + *context) + + +:: + + NPY_SCALARKIND + PyArray_ScalarKind(int typenum, PyArrayObject **arr) + +ScalarKind + +Returns the scalar kind of a type number, with an +optional tweak based on the scalar value itself. +If no scalar is provided, it returns INTPOS_SCALAR +for both signed and unsigned integers, otherwise +it checks the sign of any signed integer to choose +INTNEG_SCALAR when appropriate. + +:: + + int + PyArray_CanCoerceScalar(int thistype, int neededtype, NPY_SCALARKIND + scalar) + + +Determines whether the data type 'thistype', with +scalar kind 'scalar', can be coerced into 'neededtype'. + +:: + + PyObject * + PyArray_NewFlagsObject(PyObject *obj) + + +Get New ArrayFlagsObject + +:: + + npy_bool + PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) + +See if array scalars can be cast. + +TODO: For NumPy 2.0, add a NPY_CASTING parameter. + +:: + + int + PyArray_CompareUCS4(npy_ucs4 const *s1, npy_ucs4 const *s2, size_t + len) + + +:: + + int + PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) + +Adjusts previously broadcasted iterators so that the axis with +the smallest sum of iterator strides is not iterated over. +Returns dimension which is smallest in the range [0,multi->nd). +A -1 is returned if multi->nd == 0. + +don't use with PyArray_ITER_GOTO1D because factors are not adjusted + +:: + + int + PyArray_ElementStrides(PyObject *obj) + + +:: + + void + PyArray_Item_INCREF(char *data, PyArray_Descr *descr) + +XINCREF all objects in a single array item. This is complicated for +structured datatypes where the position of objects needs to be extracted. +The function is execute recursively for each nested field or subarrays dtype +such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])` + +:: + + void + PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) + + +XDECREF all objects in a single array item. This is complicated for +structured datatypes where the position of objects needs to be extracted. +The function is execute recursively for each nested field or subarrays dtype +such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])` + +:: + + PyObject * + PyArray_FieldNames(PyObject *fields) + +Return the tuple of ordered field names from a dictionary. + +:: + + PyObject * + PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute) + +Return Transpose. + +:: + + PyObject * + PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int + axis, PyArrayObject *out, NPY_CLIPMODE clipmode) + +Take + +:: + + PyObject * + PyArray_PutTo(PyArrayObject *self, PyObject*values0, PyObject + *indices0, NPY_CLIPMODE clipmode) + +Put values into an array + +:: + + PyObject * + PyArray_PutMask(PyArrayObject *self, PyObject*values0, PyObject*mask0) + +Put values into an array according to a mask. + +:: + + PyObject * + PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) + +Repeat the array. + +:: + + PyObject * + PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject + *out, NPY_CLIPMODE clipmode) + + +:: + + int + PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) + +Sort an array in-place + +:: + + PyObject * + PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) + +ArgSort an array + +:: + + PyObject * + PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE + side, PyObject *perm) + + +Search the sorted array op1 for the location of the items in op2. The +result is an array of indexes, one for each element in op2, such that if +the item were to be inserted in op1 just before that index the array +would still be in sorted order. + +Parameters +---------- +op1 : PyArrayObject * +Array to be searched, must be 1-D. +op2 : PyObject * +Array of items whose insertion indexes in op1 are wanted +side : {NPY_SEARCHLEFT, NPY_SEARCHRIGHT} +If NPY_SEARCHLEFT, return first valid insertion indexes +If NPY_SEARCHRIGHT, return last valid insertion indexes +perm : PyObject * +Permutation array that sorts op1 (optional) + +Returns +------- +ret : PyObject * +New reference to npy_intp array containing indexes where items in op2 +could be validly inserted into op1. NULL on error. + +Notes +----- +Binary search is used to find the indexes. + +:: + + PyObject * + PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) + +ArgMax + +:: + + PyObject * + PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out) + +ArgMin + +:: + + PyObject * + PyArray_Reshape(PyArrayObject *self, PyObject *shape) + +Reshape + +:: + + PyObject * + PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER + order) + +New shape for an array + +:: + + PyObject * + PyArray_Squeeze(PyArrayObject *self) + + +return a new view of the array object with all of its unit-length +dimensions squeezed out if needed, otherwise +return the same array. + +:: + + PyObject * + PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject + *pytype) + +View +steals a reference to type -- accepts NULL + +:: + + PyObject * + PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) + +SwapAxes + +:: + + PyObject * + PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) + +Max + +:: + + PyObject * + PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) + +Min + +:: + + PyObject * + PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) + +Ptp + +:: + + PyObject * + PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +Mean + +:: + + PyObject * + PyArray_Trace(PyArrayObject *self, int offset, int axis1, int + axis2, int rtype, PyArrayObject *out) + +Trace + +:: + + PyObject * + PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int + axis2) + +Diagonal + +In NumPy versions prior to 1.7, this function always returned a copy of +the diagonal array. In 1.7, the code has been updated to compute a view +onto 'self', but it still copies this array before returning, as well as +setting the internal WARN_ON_WRITE flag. In a future version, it will +simply return a view onto self. + +:: + + PyObject * + PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject + *max, PyArrayObject *out) + +Clip + +:: + + PyObject * + PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) + +Conjugate + +:: + + PyObject * + PyArray_Nonzero(PyArrayObject *self) + +Nonzero + +TODO: In NumPy 2.0, should make the iteration order a parameter. + +:: + + PyObject * + PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out, int variance) + +Set variance to 1 to by-pass square-root calculation and return variance +Std + +:: + + PyObject * + PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +Sum + +:: + + PyObject * + PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +CumSum + +:: + + PyObject * + PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject + *out) + +Prod + +:: + + PyObject * + PyArray_CumProd(PyArrayObject *self, int axis, int + rtype, PyArrayObject *out) + +CumProd + +:: + + PyObject * + PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) + +All + +:: + + PyObject * + PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) + +Any + +:: + + PyObject * + PyArray_Compress(PyArrayObject *self, PyObject *condition, int + axis, PyArrayObject *out) + +Compress + +:: + + PyObject * + PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) + +Flatten + +:: + + PyObject * + PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order) + +Ravel +Returns a contiguous array + +:: + + npy_intp + PyArray_MultiplyList(npy_intp const *l1, int n) + +Multiply a List + +:: + + int + PyArray_MultiplyIntList(int const *l1, int n) + +Multiply a List of ints + +:: + + void * + PyArray_GetPtr(PyArrayObject *obj, npy_intp const*ind) + +Produce a pointer into array + +:: + + int + PyArray_CompareLists(npy_intp const *l1, npy_intp const *l2, int n) + +Compare Lists + +:: + + int + PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int + nd, PyArray_Descr*typedescr) + +Simulate a C-array +steals a reference to typedescr -- can be NULL + +:: + + int + PyArray_As1D(PyObject **NPY_UNUSED(op) , char **NPY_UNUSED(ptr) , int + *NPY_UNUSED(d1) , int NPY_UNUSED(typecode) ) + +Convert to a 1D C-array + +:: + + int + PyArray_As2D(PyObject **NPY_UNUSED(op) , char ***NPY_UNUSED(ptr) , int + *NPY_UNUSED(d1) , int *NPY_UNUSED(d2) , int + NPY_UNUSED(typecode) ) + +Convert to a 2D C-array + +:: + + int + PyArray_Free(PyObject *op, void *ptr) + +Free pointers created if As2D is called + +:: + + int + PyArray_Converter(PyObject *object, PyObject **address) + + +Useful to pass as converter function for O& processing in PyArgs_ParseTuple. + +This conversion function can be used with the "O&" argument for +PyArg_ParseTuple. It will immediately return an object of array type +or will convert to a NPY_ARRAY_CARRAY any other object. + +If you use PyArray_Converter, you must DECREF the array when finished +as you get a new reference to it. + +:: + + int + PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) + +PyArray_IntpFromSequence +Returns the number of integers converted or -1 if an error occurred. +vals must be large enough to hold maxvals + +:: + + PyObject * + PyArray_Concatenate(PyObject *op, int axis) + +Concatenate + +Concatenate an arbitrary Python sequence into an array. +op is a python object supporting the sequence interface. +Its elements will be concatenated together to form a single +multidimensional array. If axis is NPY_MAXDIMS or bigger, then +each sequence object will be flattened before concatenation + +:: + + PyObject * + PyArray_InnerProduct(PyObject *op1, PyObject *op2) + +Numeric.innerproduct(a,v) + +:: + + PyObject * + PyArray_MatrixProduct(PyObject *op1, PyObject *op2) + +Numeric.matrixproduct(a,v) +just like inner product but does the swapaxes stuff on the fly + +:: + + PyObject * + PyArray_CopyAndTranspose(PyObject *op) + +Copy and Transpose + +Could deprecate this function, as there isn't a speed benefit over +calling Transpose and then Copy. + +:: + + PyObject * + PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) + +Numeric.correlate(a1,a2,mode) + +:: + + int + PyArray_TypestrConvert(int itemsize, int gentype) + +Typestr converter + +:: + + int + PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) + +Get typenum from an object -- None goes to NPY_DEFAULT_TYPE +This function takes a Python object representing a type and converts it +to a the correct PyArray_Descr * structure to describe the type. + +Many objects can be used to represent a data-type which in NumPy is +quite a flexible concept. + +This is the central code that converts Python objects to +Type-descriptor objects that are used throughout numpy. + +Returns a new reference in *at, but the returned should not be +modified as it may be one of the canonical immutable objects or +a reference to the input obj. + +:: + + int + PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) + +Get typenum from an object -- None goes to NULL + +:: + + int + PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) + +Get intp chunk from sequence + +This function takes a Python sequence object and allocates and +fills in an intp array with the converted values. + +Remember to free the pointer seq.ptr when done using +PyDimMem_FREE(seq.ptr)** + +:: + + int + PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) + +Get buffer chunk from object + +this function takes a Python object which exposes the (single-segment) +buffer interface and returns a pointer to the data segment + +You should increment the reference count by one of buf->base +if you will hang on to a reference + +You only get a borrowed reference to the object. Do not free the +memory... + +:: + + int + PyArray_AxisConverter(PyObject *obj, int *axis) + +Get axis from an object (possibly None) -- a converter function, + +See also PyArray_ConvertMultiAxis, which also handles a tuple of axes. + +:: + + int + PyArray_BoolConverter(PyObject *object, npy_bool *val) + +Convert an object to true / false + +:: + + int + PyArray_ByteorderConverter(PyObject *obj, char *endian) + +Convert object to endian + +:: + + int + PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) + +Convert an object to FORTRAN / C / ANY / KEEP + +:: + + unsigned char + PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) + + +This function returns true if the two typecodes are +equivalent (same basic kind and same itemsize). + +:: + + PyObject * + PyArray_Zeros(int nd, npy_intp const *dims, PyArray_Descr *type, int + is_f_order) + +Zeros + +steals a reference to type. On failure or when dtype->subarray is +true, dtype will be decrefed. +accepts NULL type + +:: + + PyObject * + PyArray_Empty(int nd, npy_intp const *dims, PyArray_Descr *type, int + is_f_order) + +Empty + +accepts NULL type +steals a reference to type + +:: + + PyObject * + PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) + +Where + +:: + + PyObject * + PyArray_Arange(double start, double stop, double step, int type_num) + +Arange, + +:: + + PyObject * + PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject + *step, PyArray_Descr *dtype) + + +ArangeObj, + +this doesn't change the references + +:: + + int + PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) + +Convert object to sort kind + +:: + + PyObject * + PyArray_LexSort(PyObject *sort_keys, int axis) + +LexSort an array providing indices that will sort a collection of arrays +lexicographically. The first key is sorted on first, followed by the second key +-- requires that arg"merge"sort is available for each sort_key + +Returns an index array that shows the indexes for the lexicographic sort along +the given axis. + +:: + + PyObject * + PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) + +Round + +:: + + unsigned char + PyArray_EquivTypenums(int typenum1, int typenum2) + + +:: + + int + PyArray_RegisterDataType(PyArray_Descr *descr) + +Register Data type +Does not change the reference count of descr + +:: + + int + PyArray_RegisterCastFunc(PyArray_Descr *descr, int + totype, PyArray_VectorUnaryFunc *castfunc) + +Register Casting Function +Replaces any function currently stored. + +:: + + int + PyArray_RegisterCanCast(PyArray_Descr *descr, int + totype, NPY_SCALARKIND scalar) + +Register a type number indicating that a descriptor can be cast +to it safely + +:: + + void + PyArray_InitArrFuncs(PyArray_ArrFuncs *f) + +Initialize arrfuncs to NULL + +:: + + PyObject * + PyArray_IntTupleFromIntp(int len, npy_intp const *vals) + +PyArray_IntTupleFromIntp + +:: + + int + PyArray_TypeNumFromName(char const *str) + + +:: + + int + PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) + +Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP + +:: + + int + PyArray_OutputConverter(PyObject *object, PyArrayObject **address) + +Useful to pass as converter function for O& processing in +PyArgs_ParseTuple for output arrays + +:: + + PyObject * + PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd) + +Get Iterator broadcast to a particular shape + +:: + + void + _PyArray_SigintHandler(int signum) + + +:: + + void* + _PyArray_GetSigintBuf(void ) + + +:: + + int + PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) + + +Get type-descriptor from an object forcing alignment if possible +None goes to DEFAULT type. + +any object with the .fields attribute and/or .itemsize attribute (if the +.fields attribute does not give the total size -- i.e. a partial record +naming). If itemsize is given it must be >= size computed from fields + +The .fields attribute must return a convertible dictionary if present. +Result inherits from NPY_VOID. + +:: + + int + PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) + + +Get type-descriptor from an object forcing alignment if possible +None goes to NULL. + +:: + + int + PyArray_SearchsideConverter(PyObject *obj, void *addr) + +Convert object to searchsorted side + +:: + + PyObject * + PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags) + +PyArray_CheckAxis + +check that axis is valid +convert 0-d arrays to 1-d arrays + +:: + + npy_intp + PyArray_OverflowMultiplyList(npy_intp const *l1, int n) + +Multiply a List of Non-negative numbers with over-flow detection. + +:: + + int + PyArray_CompareString(const char *s1, const char *s2, size_t len) + + +:: + + PyObject* + PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ... ) + +Get MultiIterator from array of Python objects and any additional + +PyObject **mps - array of PyObjects +int n - number of PyObjects in the array +int nadd - number of additional arrays to include in the iterator. + +Returns a multi-iterator object. + +:: + + int + PyArray_GetEndianness(void ) + + +:: + + unsigned int + PyArray_GetNDArrayCFeatureVersion(void ) + +Returns the built-in (at compilation time) C API version + +:: + + PyObject * + PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) + +correlate(a1,a2,mode) + +This function computes the usual correlation (correlate(a1, a2) != +correlate(a2, a1), and conjugate the second argument for complex inputs + +:: + + PyObject* + PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp + *bounds, int mode, PyArrayObject*fill) + +A Neighborhood Iterator object. + +:: + + void + PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op) ) + +This function is scheduled to be removed + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + void + PyArray_DatetimeToDatetimeStruct(npy_datetime NPY_UNUSED(val) + , NPY_DATETIMEUNIT NPY_UNUSED(fr) + , npy_datetimestruct *result) + +Fill the datetime struct from the value and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + void + PyArray_TimedeltaToTimedeltaStruct(npy_timedelta NPY_UNUSED(val) + , NPY_DATETIMEUNIT NPY_UNUSED(fr) + , npy_timedeltastruct *result) + +Fill the timedelta struct from the timedelta value and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + npy_datetime + PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT NPY_UNUSED(fr) + , npy_datetimestruct *NPY_UNUSED(d) ) + +Create a datetime value from a filled datetime struct and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + npy_datetime + PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT NPY_UNUSED(fr) + , npy_timedeltastruct + *NPY_UNUSED(d) ) + +Create a timdelta value from a filled timedelta struct and resolution unit. + +TO BE REMOVED - NOT USED INTERNALLY. + +:: + + NpyIter * + NpyIter_New(PyArrayObject *op, npy_uint32 flags, NPY_ORDER + order, NPY_CASTING casting, PyArray_Descr*dtype) + +Allocate a new iterator for one array object. + +:: + + NpyIter * + NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32 + flags, NPY_ORDER order, NPY_CASTING + casting, npy_uint32 *op_flags, PyArray_Descr + **op_request_dtypes) + +Allocate a new iterator for more than one array object, using +standard NumPy broadcasting rules and the default buffer size. + +:: + + NpyIter * + NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 + flags, NPY_ORDER order, NPY_CASTING + casting, npy_uint32 *op_flags, PyArray_Descr + **op_request_dtypes, int oa_ndim, int + **op_axes, npy_intp *itershape, npy_intp + buffersize) + +Allocate a new iterator for multiple array objects, and advanced +options for controlling the broadcasting, shape, and buffer size. + +:: + + NpyIter * + NpyIter_Copy(NpyIter *iter) + +Makes a copy of the iterator + +:: + + int + NpyIter_Deallocate(NpyIter *iter) + +Deallocate an iterator. + +To correctly work when an error is in progress, we have to check +`PyErr_Occurred()`. This is necessary when buffers are not finalized +or WritebackIfCopy is used. We could avoid that check by exposing a new +function which is passed in whether or not a Python error is already set. + +:: + + npy_bool + NpyIter_HasDelayedBufAlloc(NpyIter *iter) + +Whether the buffer allocation is being delayed + +:: + + npy_bool + NpyIter_HasExternalLoop(NpyIter *iter) + +Whether the iterator handles the inner loop + +:: + + int + NpyIter_EnableExternalLoop(NpyIter *iter) + +Removes the inner loop handling (so HasExternalLoop returns true) + +:: + + npy_intp * + NpyIter_GetInnerStrideArray(NpyIter *iter) + +Get the array of strides for the inner loop (when HasExternalLoop is true) + +This function may be safely called without holding the Python GIL. + +:: + + npy_intp * + NpyIter_GetInnerLoopSizePtr(NpyIter *iter) + +Get a pointer to the size of the inner loop (when HasExternalLoop is true) + +This function may be safely called without holding the Python GIL. + +:: + + int + NpyIter_Reset(NpyIter *iter, char **errmsg) + +Resets the iterator to its initial state + +The use of errmsg is discouraged, it cannot be guaranteed that the GIL +will not be grabbed on casting errors even when this is passed. + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. Note that cast errors may still lead to the GIL being +grabbed temporarily. + +:: + + int + NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char + **errmsg) + +Resets the iterator to its initial state, with new base data pointers. +This function requires great caution. + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. Note that cast errors may still lead to the GIL being +grabbed temporarily. + +:: + + int + NpyIter_ResetToIterIndexRange(NpyIter *iter, npy_intp istart, npy_intp + iend, char **errmsg) + +Resets the iterator to a new iterator index range + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. Note that cast errors may still lead to the GIL being +grabbed temporarily. + +:: + + int + NpyIter_GetNDim(NpyIter *iter) + +Gets the number of dimensions being iterated + +:: + + int + NpyIter_GetNOp(NpyIter *iter) + +Gets the number of operands being iterated + +:: + + NpyIter_IterNextFunc * + NpyIter_GetIterNext(NpyIter *iter, char **errmsg) + +Compute the specialized iteration function for an iterator + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + npy_intp + NpyIter_GetIterSize(NpyIter *iter) + +Gets the number of elements being iterated + +:: + + void + NpyIter_GetIterIndexRange(NpyIter *iter, npy_intp *istart, npy_intp + *iend) + +Gets the range of iteration indices being iterated + +:: + + npy_intp + NpyIter_GetIterIndex(NpyIter *iter) + +Gets the current iteration index + +:: + + int + NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex) + +Sets the iterator position to the specified iterindex, +which matches the iteration order of the iterator. + +Returns NPY_SUCCEED on success, NPY_FAIL on failure. + +:: + + npy_bool + NpyIter_HasMultiIndex(NpyIter *iter) + +Whether the iterator is tracking a multi-index + +:: + + int + NpyIter_GetShape(NpyIter *iter, npy_intp *outshape) + +Gets the broadcast shape if a multi-index is being tracked by the iterator, +otherwise gets the shape of the iteration as Fortran-order +(fastest-changing index first). + +The reason Fortran-order is returned when a multi-index +is not enabled is that this is providing a direct view into how +the iterator traverses the n-dimensional space. The iterator organizes +its memory from fastest index to slowest index, and when +a multi-index is enabled, it uses a permutation to recover the original +order. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + NpyIter_GetMultiIndexFunc * + NpyIter_GetGetMultiIndex(NpyIter *iter, char **errmsg) + +Compute a specialized get_multi_index function for the iterator + +If errmsg is non-NULL, it should point to a variable which will +receive the error message, and no Python exception will be set. +This is so that the function can be called from code not holding +the GIL. + +:: + + int + NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp const *multi_index) + +Sets the iterator to the specified multi-index, which must have the +correct number of entries for 'ndim'. It is only valid +when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation +fails if the multi-index is out of bounds. + +Returns NPY_SUCCEED on success, NPY_FAIL on failure. + +:: + + int + NpyIter_RemoveMultiIndex(NpyIter *iter) + +Removes multi-index support from an iterator. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + npy_bool + NpyIter_HasIndex(NpyIter *iter) + +Whether the iterator is tracking an index + +:: + + npy_bool + NpyIter_IsBuffered(NpyIter *iter) + +Whether the iterator is buffered + +:: + + npy_bool + NpyIter_IsGrowInner(NpyIter *iter) + +Whether the inner loop can grow if buffering is unneeded + +:: + + npy_intp + NpyIter_GetBufferSize(NpyIter *iter) + +Gets the size of the buffer, or 0 if buffering is not enabled + +:: + + npy_intp * + NpyIter_GetIndexPtr(NpyIter *iter) + +Get a pointer to the index, if it is being tracked + +:: + + int + NpyIter_GotoIndex(NpyIter *iter, npy_intp flat_index) + +If the iterator is tracking an index, sets the iterator +to the specified index. + +Returns NPY_SUCCEED on success, NPY_FAIL on failure. + +:: + + char ** + NpyIter_GetDataPtrArray(NpyIter *iter) + +Get the array of data pointers (1 per object being iterated) + +This function may be safely called without holding the Python GIL. + +:: + + PyArray_Descr ** + NpyIter_GetDescrArray(NpyIter *iter) + +Get the array of data type pointers (1 per object being iterated) + +:: + + PyArrayObject ** + NpyIter_GetOperandArray(NpyIter *iter) + +Get the array of objects being iterated + +:: + + PyArrayObject * + NpyIter_GetIterView(NpyIter *iter, npy_intp i) + +Returns a view to the i-th object with the iterator's internal axes + +:: + + void + NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags) + +Gets an array of read flags (1 per object being iterated) + +:: + + void + NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags) + +Gets an array of write flags (1 per object being iterated) + +:: + + void + NpyIter_DebugPrint(NpyIter *iter) + +For debugging + +:: + + npy_bool + NpyIter_IterationNeedsAPI(NpyIter *iter) + +Whether the iteration loop, and in particular the iternext() +function, needs API access. If this is true, the GIL must +be retained while iterating. + +:: + + void + NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides) + +Get an array of strides which are fixed. Any strides which may +change during iteration receive the value NPY_MAX_INTP. Once +the iterator is ready to iterate, call this to get the strides +which will always be fixed in the inner loop, then choose optimized +inner loop functions which take advantage of those fixed strides. + +This function may be safely called without holding the Python GIL. + +:: + + int + NpyIter_RemoveAxis(NpyIter *iter, int axis) + +Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX +was set for iterator creation, and does not work if buffering is +enabled. This function also resets the iterator to its initial state. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + npy_intp * + NpyIter_GetAxisStrideArray(NpyIter *iter, int axis) + +Gets the array of strides for the specified axis. +If the iterator is tracking a multi-index, gets the strides +for the axis specified, otherwise gets the strides for +the iteration axis as Fortran order (fastest-changing axis first). + +Returns NULL if an error occurs. + +:: + + npy_bool + NpyIter_RequiresBuffering(NpyIter *iter) + +Whether the iteration could be done with no buffering. + +:: + + char ** + NpyIter_GetInitialDataPtrArray(NpyIter *iter) + +Get the array of data pointers (1 per object being iterated), +directly into the arrays (never pointing to a buffer), for starting +unbuffered iteration. This always returns the addresses for the +iterator position as reset to iterator index 0. + +These pointers are different from the pointers accepted by +NpyIter_ResetBasePointers, because the direction along some +axes may have been reversed, requiring base offsets. + +This function may be safely called without holding the Python GIL. + +:: + + int + NpyIter_CreateCompatibleStrides(NpyIter *iter, npy_intp + itemsize, npy_intp *outstrides) + +Builds a set of strides which are the same as the strides of an +output array created using the NPY_ITER_ALLOCATE flag, where NULL +was passed for op_axes. This is for data packed contiguously, +but not necessarily in C or Fortran order. This should be used +together with NpyIter_GetShape and NpyIter_GetNDim. + +A use case for this function is to match the shape and layout of +the iterator and tack on one or more dimensions. For example, +in order to generate a vector per input value for a numerical gradient, +you pass in ndim*itemsize for itemsize, then add another dimension to +the end with size ndim and stride itemsize. To do the Hessian matrix, +you do the same thing but add two dimensions, or take advantage of +the symmetry and pack it into 1 dimension with a particular encoding. + +This function may only be called if the iterator is tracking a multi-index +and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from +being iterated in reverse order. + +If an array is created with this method, simply adding 'itemsize' +for each iteration will traverse the new array matching the +iterator. + +Returns NPY_SUCCEED or NPY_FAIL. + +:: + + int + PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) + +Convert any Python object, *obj*, to an NPY_CASTING enum. + +:: + + npy_intp + PyArray_CountNonzero(PyArrayObject *self) + +Counts the number of non-zero elements in the array. + +Returns -1 on error. + +:: + + PyArray_Descr * + PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2) + +Produces the smallest size and lowest kind type to which both +input types can be cast. + +:: + + PyArray_Descr * + PyArray_MinScalarType(PyArrayObject *arr) + +If arr is a scalar (has 0 dimensions) with a built-in number data type, +finds the smallest type size/kind which can still represent its data. +Otherwise, returns the array's data type. + + +:: + + PyArray_Descr * + PyArray_ResultType(npy_intp narrs, PyArrayObject *arrs[] , npy_intp + ndtypes, PyArray_Descr *descrs[] ) + + +Produces the result type of a bunch of inputs, using the same rules +as `np.result_type`. + +NOTE: This function is expected to through a transitional period or +change behaviour. DTypes should always be strictly enforced for +0-D arrays, while "weak DTypes" will be used to represent Python +integers, floats, and complex in all cases. +(Within this function, these are currently flagged on the array +object to work through `np.result_type`, this may change.) + +Until a time where this transition is complete, we probably cannot +add new "weak DTypes" or allow users to create their own. + +:: + + npy_bool + PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr + *to, NPY_CASTING casting) + +Returns 1 if the array object may be cast to the given data type using +the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in +that it handles scalar arrays (0 dimensions) specially, by checking +their value. + +:: + + npy_bool + PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr + *to, NPY_CASTING casting) + +Returns true if data of type 'from' may be cast to data of type +'to' according to the rule 'casting'. + +:: + + PyArrayObject * + PyArray_EinsteinSum(char *subscripts, npy_intp nop, PyArrayObject + **op_in, PyArray_Descr *dtype, NPY_ORDER + order, NPY_CASTING casting, PyArrayObject *out) + +This function provides summation of array elements according to +the Einstein summation convention. For example: +- trace(a) -> einsum("ii", a) +- transpose(a) -> einsum("ji", a) +- multiply(a,b) -> einsum(",", a, b) +- inner(a,b) -> einsum("i,i", a, b) +- outer(a,b) -> einsum("i,j", a, b) +- matvec(a,b) -> einsum("ij,j", a, b) +- matmat(a,b) -> einsum("ij,jk", a, b) + +subscripts: The string of subscripts for einstein summation. +nop: The number of operands +op_in: The array of operands +dtype: Either NULL, or the data type to force the calculation as. +order: The order for the calculation/the output axes. +casting: What kind of casts should be permitted. +out: Either NULL, or an array into which the output should be placed. + +By default, the labels get placed in alphabetical order +at the end of the output. So, if c = einsum("i,j", a, b) +then c[i,j] == a[i]*b[j], but if c = einsum("j,i", a, b) +then c[i,j] = a[j]*b[i]. + +Alternatively, you can control the output order or prevent +an axis from being summed/force an axis to be summed by providing +indices for the output. This allows us to turn 'trace' into +'diag', for example. +- diag(a) -> einsum("ii->i", a) +- sum(a, axis=0) -> einsum("i...->", a) + +Subscripts at the beginning and end may be specified by +putting an ellipsis "..." in the middle. For example, +the function einsum("i...i", a) takes the diagonal of +the first and last dimensions of the operand, and +einsum("ij...,jk...->ik...") takes the matrix product using +the first two indices of each operand instead of the last two. + +When there is only one operand, no axes being summed, and +no output parameter, this function returns a view +into the operand instead of making a copy. + +:: + + PyObject * + PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER + order, PyArray_Descr *dtype, int subok) + +Creates a new array with the same shape as the provided one, +with possible memory layout order and data type changes. + +prototype - The array the new one should be like. +order - NPY_CORDER - C-contiguous result. +NPY_FORTRANORDER - Fortran-contiguous result. +NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise. +NPY_KEEPORDER - Keeps the axis ordering of prototype. +dtype - If not NULL, overrides the data type of the result. +subok - If 1, use the prototype's array subtype, otherwise +always create a base-class array. + +NOTE: If dtype is not NULL, steals the dtype reference. On failure or when +dtype->subarray is true, dtype will be decrefed. + +:: + + int + PyArray_GetArrayParamsFromObject(PyObject *NPY_UNUSED(op) + , PyArray_Descr + *NPY_UNUSED(requested_dtype) + , npy_bool NPY_UNUSED(writeable) + , PyArray_Descr + **NPY_UNUSED(out_dtype) , int + *NPY_UNUSED(out_ndim) , npy_intp + *NPY_UNUSED(out_dims) , PyArrayObject + **NPY_UNUSED(out_arr) , PyObject + *NPY_UNUSED(context) ) + + +:: + + int + PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE + *modes, int n) + +Convert an object to an array of n NPY_CLIPMODE values. +This is intended to be used in functions where a different mode +could be applied to each axis, like in ravel_multi_index. + +:: + + PyObject * + PyArray_MatrixProduct2(PyObject *op1, PyObject + *op2, PyArrayObject*out) + +Numeric.matrixproduct2(a,v,out) +just like inner product but does the swapaxes stuff on the fly + +:: + + npy_bool + NpyIter_IsFirstVisit(NpyIter *iter, int iop) + +Checks to see whether this is the first time the elements +of the specified reduction operand which the iterator points at are +being seen for the first time. The function returns +a reasonable answer for reduction operands and when buffering is +disabled. The answer may be incorrect for buffered non-reduction +operands. + +This function is intended to be used in EXTERNAL_LOOP mode only, +and will produce some wrong answers when that mode is not enabled. + +If this function returns true, the caller should also +check the inner loop stride of the operand, because if +that stride is 0, then only the first element of the innermost +external loop is being visited for the first time. + +WARNING: For performance reasons, 'iop' is not bounds-checked, +it is not confirmed that 'iop' is actually a reduction +operand, and it is not confirmed that EXTERNAL_LOOP +mode is enabled. These checks are the responsibility of +the caller, and should be done outside of any inner loops. + +:: + + int + PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) + +Sets the 'base' attribute of the array. This steals a reference +to 'obj'. + +Returns 0 on success, -1 on failure. + +:: + + void + PyArray_CreateSortedStridePerm(int ndim, npy_intp const + *strides, npy_stride_sort_item + *out_strideperm) + + +This function populates the first ndim elements +of strideperm with sorted descending by their absolute values. +For example, the stride array (4, -2, 12) becomes +[(2, 12), (0, 4), (1, -2)]. + +:: + + void + PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags) + + +Removes the axes flagged as True from the array, +modifying it in place. If an axis flagged for removal +has a shape entry bigger than one, this effectively selects +index zero for that axis. + +WARNING: If an axis flagged for removal has a shape equal to zero, +the array will point to invalid memory. The caller must +validate this! +If an axis flagged for removal has a shape larger than one, +the aligned flag (and in the future the contiguous flags), +may need explicit update. +(check also NPY_RELAXED_STRIDES_CHECKING) + +For example, this can be used to remove the reduction axes +from a reduction result once its computation is complete. + +:: + + void + PyArray_DebugPrint(PyArrayObject *obj) + +Prints the raw data of the ndarray in a form useful for debugging +low-level C issues. + +:: + + int + PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name) + + +This function does nothing if obj is writeable, and raises an exception +(and returns -1) if obj is not writeable. It may also do other +house-keeping, such as issuing warnings on arrays which are transitioning +to become views. Always call this function at some point before writing to +an array. + +'name' is a name for the array, used to give better error +messages. Something like "assignment destination", "output array", or even +just "array". + +:: + + int + PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) + + +Precondition: 'arr' is a copy of 'base' (though possibly with different +strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the +->base pointer on 'arr', so that when 'arr' is destructed, it will copy any +changes back to 'base'. DEPRECATED, use PyArray_SetWritebackIfCopyBase + +Steals a reference to 'base'. + +Returns 0 on success, -1 on failure. + +:: + + void * + PyDataMem_NEW(size_t size) + +Allocates memory for array data. + +:: + + void + PyDataMem_FREE(void *ptr) + +Free memory for array data. + +:: + + void * + PyDataMem_RENEW(void *ptr, size_t size) + +Reallocate/resize memory for array data. + +:: + + PyDataMem_EventHookFunc * + PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void + *user_data, void **old_data) + +Sets the allocation event hook for numpy array data. +Takes a PyDataMem_EventHookFunc *, which has the signature: +void hook(void *old, void *new, size_t size, void *user_data). +Also takes a void *user_data, and void **old_data. + +Returns a pointer to the previous hook or NULL. If old_data is +non-NULL, the previous user_data pointer will be copied to it. + +If not NULL, hook will be called at the end of each PyDataMem_NEW/FREE/RENEW: +result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data) +PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data) +result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data) + +When the hook is called, the GIL will be held by the calling +thread. The hook should be written to be reentrant, if it performs +operations that might cause new allocation events (such as the +creation/destruction numpy objects, or creating/destroying Python +objects which might cause a gc) + +:: + + void + PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject + **ret, int getmap) + + +Swap the axes to or from their inserted form. MapIter always puts the +advanced (array) indices first in the iteration. But if they are +consecutive, will insert/transpose them back before returning. +This is stored as `mit->consec != 0` (the place where they are inserted) +For assignments, the opposite happens: The values to be assigned are +transposed (getmap=1 instead of getmap=0). `getmap=0` and `getmap=1` +undo the other operation. + +:: + + PyObject * + PyArray_MapIterArray(PyArrayObject *a, PyObject *index) + + +Use advanced indexing to iterate an array. + +:: + + void + PyArray_MapIterNext(PyArrayMapIterObject *mit) + +This function needs to update the state of the map iterator +and point mit->dataptr to the memory-location of the next object + +Note that this function never handles an extra operand but provides +compatibility for an old (exposed) API. + +:: + + int + PyArray_Partition(PyArrayObject *op, PyArrayObject *ktharray, int + axis, NPY_SELECTKIND which) + +Partition an array in-place + +:: + + PyObject * + PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int + axis, NPY_SELECTKIND which) + +ArgPartition an array + +:: + + int + PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind) + +Convert object to select kind + +:: + + void * + PyDataMem_NEW_ZEROED(size_t size, size_t elsize) + +Allocates zeroed memory for array data. + +:: + + int + PyArray_CheckAnyScalarExact(PyObject *obj) + +return true an object is exactly a numpy scalar + +:: + + PyObject * + PyArray_MapIterArrayCopyIfOverlap(PyArrayObject *a, PyObject + *index, int + copy_if_overlap, PyArrayObject + *extra_op) + + +Same as PyArray_MapIterArray, but: + +If copy_if_overlap != 0, check if `a` has memory overlap with any of the +arrays in `index` and with `extra_op`. If yes, make copies as appropriate +to avoid problems if `a` is modified during the iteration. +`iter->array` may contain a copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set). + +:: + + int + PyArray_ResolveWritebackIfCopy(PyArrayObject *self) + + +If WRITEBACKIFCOPY and self has data, reset the base WRITEABLE flag, +copy the local data to base, release the local data, and set flags +appropriately. Return 0 if not relevant, 1 if success, < 0 on failure + +:: + + int + PyArray_SetWritebackIfCopyBase(PyArrayObject *arr, PyArrayObject + *base) + + +Precondition: 'arr' is a copy of 'base' (though possibly with different +strides, ordering, etc.). This function sets the WRITEBACKIFCOPY flag and the +->base pointer on 'arr', call PyArray_ResolveWritebackIfCopy to copy any +changes back to 'base' before deallocating the array. + +Steals a reference to 'base'. + +Returns 0 on success, -1 on failure. + diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/ndarrayobject.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/ndarrayobject.h new file mode 100644 index 0000000000000000000000000000000000000000..42fcc1c6aff6b82fc5fb0ac4bf7b60d2f9e365b1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/ndarrayobject.h @@ -0,0 +1,268 @@ +/* + * DON'T INCLUDE THIS DIRECTLY. + */ + +#ifndef NPY_NDARRAYOBJECT_H +#define NPY_NDARRAYOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "ndarraytypes.h" + +/* Includes the "function" C-API -- these are all stored in a + list of pointers --- one for each file + The two lists are concatenated into one in multiarray. + + They are available as import_array() +*/ + +#include "__multiarray_api.h" + + +/* C-API that requires previous API to be defined */ + +#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) + +#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) +#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) + +#define PyArray_HasArrayInterfaceType(op, type, context, out) \ + ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromArrayAttr(op, type, context)) != \ + Py_NotImplemented)) + +#define PyArray_HasArrayInterface(op, out) \ + PyArray_HasArrayInterfaceType(op, NULL, NULL, out) + +#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ + (PyArray_NDIM((PyArrayObject *)op) == 0)) + +#define PyArray_IsScalar(obj, cls) \ + (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) + +#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ + PyArray_IsZeroDim(m)) +#define PyArray_IsPythonNumber(obj) \ + (PyFloat_Check(obj) || PyComplex_Check(obj) || \ + PyLong_Check(obj) || PyBool_Check(obj)) +#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \ + || PyArray_IsScalar((obj), Integer)) +#define PyArray_IsPythonScalar(obj) \ + (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \ + PyUnicode_Check(obj)) + +#define PyArray_IsAnyScalar(obj) \ + (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) + +#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ + PyArray_CheckScalar(obj)) + + +#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ + Py_INCREF(m), (m) : \ + (PyArrayObject *)(PyArray_Copy(m))) + +#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ + PyArray_CompareLists(PyArray_DIMS(a1), \ + PyArray_DIMS(a2), \ + PyArray_NDIM(a1))) + +#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) +#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) +#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) + +#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ + NULL) + +#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ + PyArray_DescrFromType(type), 0, 0, 0, NULL) + +#define PyArray_FROM_OTF(m, type, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) + +#define PyArray_FROMANY(m, type, min, max, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) + +#define PyArray_ZEROS(m, dims, type, is_f_order) \ + PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_EMPTY(m, dims, type, is_f_order) \ + PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ + PyArray_NBYTES(obj)) +#ifndef PYPY_VERSION +#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt) +#define NPY_REFCOUNT PyArray_REFCOUNT +#endif +#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE) + +#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT, NULL) + +#define PyArray_EquivArrTypes(a1, a2) \ + PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) + +#define PyArray_EquivByteorders(b1, b2) \ + (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) + +#define PyArray_SimpleNew(nd, dims, typenum) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) + +#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ + data, 0, NPY_ARRAY_CARRAY, NULL) + +#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ + PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ + NULL, NULL, 0, NULL) + +#define PyArray_ToScalar(data, arr) \ + PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) + + +/* These might be faster without the dereferencing of obj + going on inside -- of course an optimizing compiler should + inline the constants inside a for loop making it a moot point +*/ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0])) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1])) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2])) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2] + \ + (l)*PyArray_STRIDES(obj)[3])) + +/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */ +static NPY_INLINE void +PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) +{ + PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; + if (fa && fa->base) { + if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || + (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) { + PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); + Py_DECREF(fa->base); + fa->base = NULL; + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); + PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); + } + } +} + +#define PyArray_DESCR_REPLACE(descr) do { \ + PyArray_Descr *_new_; \ + _new_ = PyArray_DescrNew(descr); \ + Py_XDECREF(descr); \ + descr = _new_; \ + } while(0) + +/* Copy should always return contiguous array */ +#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) + +#define PyArray_FromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_BEHAVED | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_ENSURECOPY | \ + NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_Cast(mp, type_num) \ + PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) + +#define PyArray_Take(ap, items, axis) \ + PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) + +#define PyArray_Put(ap, items, values) \ + PyArray_PutTo(ap, items, values, NPY_RAISE) + +/* Compatibility with old Numeric stuff -- don't use in new code */ + +#define PyArray_FromDimsAndData(nd, d, type, data) \ + PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \ + data) + + +/* + Check to see if this key in the dictionary is the "title" + entry of the tuple (i.e. a duplicate dictionary entry in the fields + dict). +*/ + +static NPY_INLINE int +NPY_TITLE_KEY_check(PyObject *key, PyObject *value) +{ + PyObject *title; + if (PyTuple_Size(value) != 3) { + return 0; + } + title = PyTuple_GetItem(value, 2); + if (key == title) { + return 1; + } +#ifdef PYPY_VERSION + /* + * On PyPy, dictionary keys do not always preserve object identity. + * Fall back to comparison by value. + */ + if (PyUnicode_Check(title) && PyUnicode_Check(key)) { + return PyUnicode_Compare(title, key) == 0 ? 1 : 0; + } +#endif + return 0; +} + +/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */ +#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value))) + +#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) +#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) + +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION) +static NPY_INLINE void +PyArray_XDECREF_ERR(PyArrayObject *arr) +{ + /* 2017-Nov-10 1.14 */ + DEPRECATE("PyArray_XDECREF_ERR is deprecated, call " + "PyArray_DiscardWritebackIfCopy then Py_XDECREF instead"); + PyArray_DiscardWritebackIfCopy(arr); + Py_XDECREF(arr); +} +#endif + + +#ifdef __cplusplus +} +#endif + + +#endif /* NPY_NDARRAYOBJECT_H */ diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/ndarraytypes.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/ndarraytypes.h new file mode 100644 index 0000000000000000000000000000000000000000..48ddbf9ac5d200b31b7c54e62af21d653d31fd4f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/ndarraytypes.h @@ -0,0 +1,1985 @@ +#ifndef NDARRAYTYPES_H +#define NDARRAYTYPES_H + +#include "npy_common.h" +#include "npy_endian.h" +#include "npy_cpu.h" +#include "utils.h" + +#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + +#ifndef __has_extension +#define __has_extension(x) 0 +#endif + +#if !defined(_NPY_NO_DEPRECATIONS) && \ + ((defined(__GNUC__)&& __GNUC__ >= 6) || \ + __has_extension(attribute_deprecated_with_message)) +#define NPY_ATTR_DEPRECATE(text) __attribute__ ((deprecated (text))) +#else +#define NPY_ATTR_DEPRECATE(text) +#endif + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR NPY_ATTR_DEPRECATE("Use NPY_STRING"), + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; +#ifdef _MSC_VER +#pragma deprecated(NPY_CHAR) +#endif + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +/* + * Changing this may break Numpy API compatibility + * due to changing offsets in PyArray_ArrFuncs, so be + * careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will + * depend on the data type. + */ +typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2, + NPY_STABLESORT=2, +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_STABLESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0 +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + _NPY_ERROR_OCCURRED_IN_CAST = -1, + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + /* + * Flag to allow signalling that a cast is a view, this flag is not + * valid when requesting a cast of specific safety. + * _NPY_CAST_IS_VIEW|NPY_EQUIV_CASTING means the same as NPY_NO_CASTING. + */ + // TODO-DTYPES: Needs to be documented. + _NPY_CAST_IS_VIEW = 1 << 16, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +typedef enum { + NPY_VALID=0, + NPY_SAME=1, + NPY_FULL=2 +} NPY_CORRELATEMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1) + +/* The FR in the unit names stands for frequency */ +typedef enum { + /* Force signed enum type, must be -1 for code compatibility */ + NPY_FR_ERROR = -1, /* error or undetermined */ + + /* Start of valid units */ + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10, /* nanoseconds */ + NPY_FR_ps = 11, /* picoseconds */ + NPY_FR_fs = 12, /* femtoseconds */ + NPY_FR_as = 13, /* attoseconds */ + NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + + +#if NPY_USE_PYMEM == 1 +/* use the Raw versions which are safe to call with the GIL released */ +#define PyArray_malloc PyMem_RawMalloc +#define PyArray_free PyMem_RawFree +#define PyArray_realloc PyMem_RawRealloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. Should return 0 on success + * and -1 on failure. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; + /* Cached hash value (-1 if not yet computed). + * This was added for NumPy 2.0.0. + */ + npy_hash_t hash; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of WRITEBACKIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that should be + * decref'd on deletion + * + * For WRITEBACKIFCOPY flag this is an + * array to-be-updated upon calling + * PyArray_ResolveWritebackIfCopy + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; + void *_buffer_info; /* private buffer info, tagged to allow warning */ +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +/* + * Removed 2020-Nov-25, NumPy 1.20 + * #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + * + * The above macro was removed as it gave a false sense of a stable ABI + * with respect to the structures size. If you require a runtime constant, + * you can use `PyArray_Type.tp_basicsize` instead. Otherwise, please + * see the PyArrayObject documentation or ask the NumPy developers for + * information on how to correctly replace the macro in a way that is + * compatible with multiple NumPy versions. + */ + + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional + * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements + * and the array is contiguous if ndarray.squeeze() is contiguous. + * I.e. dimensions for which `ndarray.shape[dimension] == 1` are + * ignored. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * Dual use of the ENSUREARRAY flag, to indicate that this was converted + * from a python float, int, or complex. + * An array using this flag must be a temporary array that can never + * leave the C internals of NumPy. Even if it does, ENSUREARRAY is + * absolutely safe to abuse, since it already is a base class array :). + */ + #define _NPY_ARRAY_WAS_PYSCALAR 0x0040 +#endif /* NPY_INTERNAL_BUILD */ + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropriate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when PyArray_ResolveWritebackIfCopy is called. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 /* Deprecated in 1.14 */ +#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS) + +/* the variable is used in some places, so always define it */ +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do { if (_save) \ + { PyEval_RestoreThread(_save); _save = NULL;} } while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 +/* + * If output operands overlap with other operands (based on heuristics that + * has false positives but no false negatives), make temporary copies to + * eliminate overlap. + */ +#define NPY_ITER_COPY_IF_OVERLAP 0x00002000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 +/* Assume iterator order data access for COPY_IF_OVERLAP */ +#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)( + PyArrayIterObject* iter, const npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp)(ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + + +/* + * Store the information needed for fancy-indexing over an array. The + * fields are slightly unordered to keep consec, dataptr and subspace + * where they were originally. + */ +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + NpyIter *outer; /* index objects + iterator */ + void *unused[NPY_MAXDIMS - 2]; + PyArrayObject *array; + /* Flat iterator for the indexed array. For compatibility solely. */ + PyArrayIterObject *ait; + + /* + * Subspace array. For binary compatibility (was an iterator, + * but only the check for NULL should be used). + */ + PyArrayObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + npy_intp fancy_strides[NPY_MAXDIMS]; + + /* pointer when all fancy indices are 0 */ + char *baseoffset; + + /* + * after binding consec denotes at which axis the fancy axes + * are inserted. + */ + int consec; + char *dataptr; + + int nd_fancy; + npy_intp fancy_dims[NPY_MAXDIMS]; + + /* Whether the iterator (any of the iterators) requires API */ + int needs_api; + + /* + * Extra op information. + */ + PyArrayObject *extra_op; + PyArray_Descr *extra_op_dtype; /* desired dtype */ + npy_uint32 *extra_op_flags; /* Iterator flags */ + + NpyIter *extra_op_iter; + NpyIter_IterNextFunc *extra_op_next; + char **extra_op_ptrs; + + /* + * Information about the iteration state. + */ + NpyIter_IterNextFunc *outer_next; + char **outer_ptrs; + npy_intp *outer_strides; + + /* + * Information about the subspace iterator. + */ + NpyIter *subspace_iter; + NpyIter_IterNextFunc *subspace_next; + char **subspace_ptrs; + npy_intp *subspace_strides; + + /* Count for the external loop (which ever it is) for API iteration */ + npy_intp iter_count; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static NPY_INLINE int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) +/* + * Changing access macros into functions, to allow for future hiding + * of the internal memory layout. This later hiding will allow the 2.x series + * to change the internal representation of arrays without affecting + * ABI compatibility. + */ + +static NPY_INLINE int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static NPY_INLINE void * +PyArray_DATA(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE char * +PyArray_BYTES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE npy_intp * +PyArray_DIMS(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static NPY_INLINE npy_intp * +PyArray_STRIDES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static NPY_INLINE npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static NPY_INLINE npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject * +PyArray_BASE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static NPY_INLINE NPY_RETURNS_BORROWED_REF PyArray_Descr * +PyArray_DESCR(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + +static NPY_INLINE npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->elsize; +} + +static NPY_INLINE int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static NPY_INLINE int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static NPY_INLINE PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return ((PyArrayObject_fields *)arr)->descr->f->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +/* + * SETITEM should only be used if it is known that the value is a scalar + * and of a type understood by the arrays dtype. + * Use `PyArray_Pack` if the value may be of a different dtype. + */ +static NPY_INLINE int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return ((PyArrayObject_fields *)arr)->descr->f->setitem(v, itemptr, arr); +} + +#else + +/* These macros are deprecated as of NumPy 1.7. */ +#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) +#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) +#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) +#define PyArray_ITEMSIZE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->type_num) +#define PyArray_GETITEM(obj,itemptr) \ + PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) +#endif + +static NPY_INLINE PyArray_Descr * +PyArray_DTYPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE npy_intp * +PyArray_SHAPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) +#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ + !PyDataType_HASFIELDS(dtype)) +#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's stored in the + * PyCapsule returned by an array's __array_struct__ attribute. See + * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + +/* + * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. + * See the documentation for PyDataMem_SetEventHook. + */ +typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, + void *user_data); + + +/* + * PyArray_DTypeMeta related definitions. + * + * As of now, this API is preliminary and will be extended as necessary. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * The Structures defined in this block are considered private API and + * may change without warning! + */ + /* TODO: Make this definition public in the API, as soon as its settled */ + NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type; + + typedef struct PyArray_DTypeMeta_tag PyArray_DTypeMeta; + + typedef PyArray_Descr *(discover_descr_from_pyobject_function)( + PyArray_DTypeMeta *cls, PyObject *obj); + + /* + * Before making this public, we should decide whether it should pass + * the type, or allow looking at the object. A possible use-case: + * `np.array(np.array([0]), dtype=np.ndarray)` + * Could consider arrays that are not `dtype=ndarray` "scalars". + */ + typedef int (is_known_scalar_type_function)( + PyArray_DTypeMeta *cls, PyTypeObject *obj); + + typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls); + typedef PyArray_DTypeMeta *(common_dtype_function)( + PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtyep2); + typedef PyArray_DTypeMeta *(common_dtype_with_value_function)( + PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtyep2, PyObject *value); + typedef PyArray_Descr *(common_instance_function)( + PyArray_Descr *dtype1, PyArray_Descr *dtyep2); + + /* + * While NumPy DTypes would not need to be heap types the plan is to + * make DTypes available in Python at which point they will be heap types. + * Since we also wish to add fields to the DType class, this looks like + * a typical instance definition, but with PyHeapTypeObject instead of + * only the PyObject_HEAD. + * This must only be exposed very extremely careful consideration, since + * it is a fairly complex construct which may be better to allow + * refactoring of. + */ + struct PyArray_DTypeMeta_tag { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* + * Is this DType created using the old API? This exists mainly to + * allow for assertions in paths specific to wrapping legacy types. + */ + npy_bool legacy; + /* The values stored by a parametric datatype depend on its instance */ + npy_bool parametric; + /* whether the DType can be instantiated (i.e. np.dtype cannot) */ + npy_bool abstract; + + /* + * The following fields replicate the most important dtype information. + * In the legacy implementation most of these are stored in the + * PyArray_Descr struct. + */ + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* + * Point to the original ArrFuncs. + * NOTE: We could make a copy to detect changes to `f`. + */ + PyArray_ArrFuncs *f; + + /* DType methods, these could be moved into its own struct */ + discover_descr_from_pyobject_function *discover_descr_from_pyobject; + is_known_scalar_type_function *is_known_scalar_type; + default_descr_function *default_descr; + common_dtype_function *common_dtype; + common_dtype_with_value_function *common_dtype_with_value; + common_instance_function *common_instance; + /* + * The casting implementation (ArrayMethod) to convert between two + * instances of this DType, stored explicitly for fast access: + */ + PyObject *within_dtype_castingimpl; + /* + * Dictionary of ArrayMethods representing most possible casts + * (structured and object are exceptions). + * This should potentially become a weak mapping in the future. + */ + PyObject *castingimpls; + }; + +#endif /* NPY_INTERNAL_BUILD */ + + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +#include "npy_1_7_deprecated_api.h" +#endif +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + */ +#undef NPY_DEPRECATED_INCLUDES + +#endif /* NPY_ARRAYTYPES_H */ diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/noprefix.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/noprefix.h new file mode 100644 index 0000000000000000000000000000000000000000..ecd9a3694b666112afdcae29d5e18f33c353b5a4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/noprefix.h @@ -0,0 +1,212 @@ +#ifndef NPY_NOPREFIX_H +#define NPY_NOPREFIX_H + +/* + * You can directly include noprefix.h as a backward + * compatibility measure + */ +#ifndef NPY_NO_PREFIX +#include "ndarrayobject.h" +#include "npy_interrupt.h" +#endif + +#define SIGSETJMP NPY_SIGSETJMP +#define SIGLONGJMP NPY_SIGLONGJMP +#define SIGJMP_BUF NPY_SIGJMP_BUF + +#define MAX_DIMS NPY_MAXDIMS + +#define longlong npy_longlong +#define ulonglong npy_ulonglong +#define Bool npy_bool +#define longdouble npy_longdouble +#define byte npy_byte + +#ifndef _BSD_SOURCE +#define ushort npy_ushort +#define uint npy_uint +#define ulong npy_ulong +#endif + +#define ubyte npy_ubyte +#define ushort npy_ushort +#define uint npy_uint +#define ulong npy_ulong +#define cfloat npy_cfloat +#define cdouble npy_cdouble +#define clongdouble npy_clongdouble +#define Int8 npy_int8 +#define UInt8 npy_uint8 +#define Int16 npy_int16 +#define UInt16 npy_uint16 +#define Int32 npy_int32 +#define UInt32 npy_uint32 +#define Int64 npy_int64 +#define UInt64 npy_uint64 +#define Int128 npy_int128 +#define UInt128 npy_uint128 +#define Int256 npy_int256 +#define UInt256 npy_uint256 +#define Float16 npy_float16 +#define Complex32 npy_complex32 +#define Float32 npy_float32 +#define Complex64 npy_complex64 +#define Float64 npy_float64 +#define Complex128 npy_complex128 +#define Float80 npy_float80 +#define Complex160 npy_complex160 +#define Float96 npy_float96 +#define Complex192 npy_complex192 +#define Float128 npy_float128 +#define Complex256 npy_complex256 +#define intp npy_intp +#define uintp npy_uintp +#define datetime npy_datetime +#define timedelta npy_timedelta + +#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG +#define SIZEOF_INTP NPY_SIZEOF_INTP +#define SIZEOF_UINTP NPY_SIZEOF_UINTP +#define SIZEOF_HALF NPY_SIZEOF_HALF +#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE +#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME +#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA + +#define LONGLONG_FMT NPY_LONGLONG_FMT +#define ULONGLONG_FMT NPY_ULONGLONG_FMT +#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX +#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX + +#define MAX_INT8 127 +#define MIN_INT8 -128 +#define MAX_UINT8 255 +#define MAX_INT16 32767 +#define MIN_INT16 -32768 +#define MAX_UINT16 65535 +#define MAX_INT32 2147483647 +#define MIN_INT32 (-MAX_INT32 - 1) +#define MAX_UINT32 4294967295U +#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807) +#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1)) +#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615) +#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1)) +#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) +#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1)) +#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) + +#define MAX_BYTE NPY_MAX_BYTE +#define MIN_BYTE NPY_MIN_BYTE +#define MAX_UBYTE NPY_MAX_UBYTE +#define MAX_SHORT NPY_MAX_SHORT +#define MIN_SHORT NPY_MIN_SHORT +#define MAX_USHORT NPY_MAX_USHORT +#define MAX_INT NPY_MAX_INT +#define MIN_INT NPY_MIN_INT +#define MAX_UINT NPY_MAX_UINT +#define MAX_LONG NPY_MAX_LONG +#define MIN_LONG NPY_MIN_LONG +#define MAX_ULONG NPY_MAX_ULONG +#define MAX_LONGLONG NPY_MAX_LONGLONG +#define MIN_LONGLONG NPY_MIN_LONGLONG +#define MAX_ULONGLONG NPY_MAX_ULONGLONG +#define MIN_DATETIME NPY_MIN_DATETIME +#define MAX_DATETIME NPY_MAX_DATETIME +#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA +#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA + +#define BITSOF_BOOL NPY_BITSOF_BOOL +#define BITSOF_CHAR NPY_BITSOF_CHAR +#define BITSOF_SHORT NPY_BITSOF_SHORT +#define BITSOF_INT NPY_BITSOF_INT +#define BITSOF_LONG NPY_BITSOF_LONG +#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG +#define BITSOF_HALF NPY_BITSOF_HALF +#define BITSOF_FLOAT NPY_BITSOF_FLOAT +#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE +#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE +#define BITSOF_DATETIME NPY_BITSOF_DATETIME +#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA + +#define _pya_malloc PyArray_malloc +#define _pya_free PyArray_free +#define _pya_realloc PyArray_realloc + +#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF +#define BEGIN_THREADS NPY_BEGIN_THREADS +#define END_THREADS NPY_END_THREADS +#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF +#define ALLOW_C_API NPY_ALLOW_C_API +#define DISABLE_C_API NPY_DISABLE_C_API + +#define PY_FAIL NPY_FAIL +#define PY_SUCCEED NPY_SUCCEED + +#ifndef TRUE +#define TRUE NPY_TRUE +#endif + +#ifndef FALSE +#define FALSE NPY_FALSE +#endif + +#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT + +#define CONTIGUOUS NPY_CONTIGUOUS +#define C_CONTIGUOUS NPY_C_CONTIGUOUS +#define FORTRAN NPY_FORTRAN +#define F_CONTIGUOUS NPY_F_CONTIGUOUS +#define OWNDATA NPY_OWNDATA +#define FORCECAST NPY_FORCECAST +#define ENSURECOPY NPY_ENSURECOPY +#define ENSUREARRAY NPY_ENSUREARRAY +#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES +#define ALIGNED NPY_ALIGNED +#define NOTSWAPPED NPY_NOTSWAPPED +#define WRITEABLE NPY_WRITEABLE +#define UPDATEIFCOPY NPY_UPDATEIFCOPY +#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY +#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR +#define BEHAVED NPY_BEHAVED +#define BEHAVED_NS NPY_BEHAVED_NS +#define CARRAY NPY_CARRAY +#define CARRAY_RO NPY_CARRAY_RO +#define FARRAY NPY_FARRAY +#define FARRAY_RO NPY_FARRAY_RO +#define DEFAULT NPY_DEFAULT +#define IN_ARRAY NPY_IN_ARRAY +#define OUT_ARRAY NPY_OUT_ARRAY +#define INOUT_ARRAY NPY_INOUT_ARRAY +#define IN_FARRAY NPY_IN_FARRAY +#define OUT_FARRAY NPY_OUT_FARRAY +#define INOUT_FARRAY NPY_INOUT_FARRAY +#define UPDATE_ALL NPY_UPDATE_ALL + +#define OWN_DATA NPY_OWNDATA +#define BEHAVED_FLAGS NPY_BEHAVED +#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS +#define CARRAY_FLAGS_RO NPY_CARRAY_RO +#define CARRAY_FLAGS NPY_CARRAY +#define FARRAY_FLAGS NPY_FARRAY +#define FARRAY_FLAGS_RO NPY_FARRAY_RO +#define DEFAULT_FLAGS NPY_DEFAULT +#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS + +#ifndef MIN +#define MIN PyArray_MIN +#endif +#ifndef MAX +#define MAX PyArray_MAX +#endif +#define MAX_INTP NPY_MAX_INTP +#define MIN_INTP NPY_MIN_INTP +#define MAX_UINTP NPY_MAX_UINTP +#define INTP_FMT NPY_INTP_FMT + +#ifndef PYPY_VERSION +#define REFCOUNT PyArray_REFCOUNT +#define MAX_ELSIZE NPY_MAX_ELSIZE +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h new file mode 100644 index 0000000000000000000000000000000000000000..e011679ed5d730a79dc61889ada1173e25b07e60 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h @@ -0,0 +1,125 @@ +#ifndef _NPY_1_7_DEPRECATED_API_H +#define _NPY_1_7_DEPRECATED_API_H + +#ifndef NPY_DEPRECATED_INCLUDES +#error "Should never include npy_*_*_deprecated_api directly." +#endif + +/* Emit a warning if the user did not specifically request the old API */ +#ifndef NPY_NO_DEPRECATED_API +#if defined(_WIN32) +#define _WARN___STR2__(x) #x +#define _WARN___STR1__(x) _WARN___STR2__(x) +#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " +#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ + "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") +#else +#warning "Using deprecated NumPy API, disable it with " \ + "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" +#endif +#endif + +/* + * This header exists to collect all dangerous/deprecated NumPy API + * as of NumPy 1.7. + * + * This is an attempt to remove bad API, the proliferation of macros, + * and namespace pollution currently produced by the NumPy headers. + */ + +/* These array flags are deprecated as of NumPy 1.7 */ +#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS +#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS + +/* + * The consistent NPY_ARRAY_* names which don't pollute the NPY_* + * namespace were added in NumPy 1.7. + * + * These versions of the carray flags are deprecated, but + * probably should only be removed after two releases instead of one. + */ +#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS +#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS +#define NPY_OWNDATA NPY_ARRAY_OWNDATA +#define NPY_FORCECAST NPY_ARRAY_FORCECAST +#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY +#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY +#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES +#define NPY_ALIGNED NPY_ARRAY_ALIGNED +#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED +#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE +#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY +#define NPY_BEHAVED NPY_ARRAY_BEHAVED +#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS +#define NPY_CARRAY NPY_ARRAY_CARRAY +#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO +#define NPY_DEFAULT NPY_ARRAY_DEFAULT +#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY +#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY +#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY +#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY +#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY +#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY +#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL + +/* This way of accessing the default type is deprecated as of NumPy 1.7 */ +#define PyArray_DEFAULT NPY_DEFAULT_TYPE + +/* These DATETIME bits aren't used internally */ +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ + PyDict_GetItemString( \ + descr->metadata, NPY_METADATA_DTSTR), NULL)))) + +/* + * Deprecated as of NumPy 1.7, this kind of shortcut doesn't + * belong in the public API. + */ +#define NPY_AO PyArrayObject + +/* + * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't + * belong in the public API. + */ +#define fortran fortran_ + +/* + * Deprecated as of NumPy 1.7, as it is a namespace-polluting + * macro. + */ +#define FORTRAN_IF PyArray_FORTRAN_IF + +/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */ +#define NPY_METADATA_DTSTR "__timeunit__" + +/* + * Deprecated as of NumPy 1.7. + * The reasoning: + * - These are for datetime, but there's no datetime "namespace". + * - They just turn NPY_STR_ into "", which is just + * making something simple be indirected. + */ +#define NPY_STR_Y "Y" +#define NPY_STR_M "M" +#define NPY_STR_W "W" +#define NPY_STR_D "D" +#define NPY_STR_h "h" +#define NPY_STR_m "m" +#define NPY_STR_s "s" +#define NPY_STR_ms "ms" +#define NPY_STR_us "us" +#define NPY_STR_ns "ns" +#define NPY_STR_ps "ps" +#define NPY_STR_fs "fs" +#define NPY_STR_as "as" + +/* + * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be + * removed in the next major release. + */ +#include "old_defines.h" + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_3kcompat.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_3kcompat.h new file mode 100644 index 0000000000000000000000000000000000000000..5c87837a2b86c50136df0fda7fdd2a76dda90392 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_3kcompat.h @@ -0,0 +1,595 @@ +/* + * This is a convenience header file providing compatibility utilities + * for supporting Python 2 and Python 3 in the same code base. + * + * If you want to use this for your own projects, it's recommended to make a + * copy of it. Although the stuff below is unlikely to change, we don't provide + * strong backwards compatibility guarantees at the moment. + */ + +#ifndef _NPY_3KCOMPAT_H_ +#define _NPY_3KCOMPAT_H_ + +#include +#include + +#ifndef NPY_PY3K +#define NPY_PY3K 1 +#endif + +#include "numpy/npy_common.h" +#include "numpy/ndarrayobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * PyInt -> PyLong + */ + + +/* + * This is a renamed copy of the Python non-limited API function _PyLong_AsInt. It is + * included here because it is missing from the PyPy API. It completes the PyLong_As* + * group of functions and can be useful in replacing PyInt_Check. + */ +static NPY_INLINE int +Npy__PyLong_AsInt(PyObject *obj) +{ + int overflow; + long result = PyLong_AsLongAndOverflow(obj, &overflow); + + /* INT_MAX and INT_MIN are defined in Python.h */ + if (overflow || result > INT_MAX || result < INT_MIN) { + /* XXX: could be cute and give a different + message for overflow == -1 */ + PyErr_SetString(PyExc_OverflowError, + "Python int too large to convert to C int"); + return -1; + } + return (int)result; +} + + +#if defined(NPY_PY3K) +/* Return True only if the long fits in a C long */ +static NPY_INLINE int PyInt_Check(PyObject *op) { + int overflow = 0; + if (!PyLong_Check(op)) { + return 0; + } + PyLong_AsLongAndOverflow(op, &overflow); + return (overflow == 0); +} + + +#define PyInt_FromLong PyLong_FromLong +#define PyInt_AsLong PyLong_AsLong +#define PyInt_AS_LONG PyLong_AsLong +#define PyInt_AsSsize_t PyLong_AsSsize_t +#define PyNumber_Int PyNumber_Long + +/* NOTE: + * + * Since the PyLong type is very different from the fixed-range PyInt, + * we don't define PyInt_Type -> PyLong_Type. + */ +#endif /* NPY_PY3K */ + +/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */ +#ifdef NPY_PY3K +# define NpySlice_GetIndicesEx PySlice_GetIndicesEx +#else +# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \ + PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength) +#endif + +#if PY_VERSION_HEX < 0x030900a4 + /* Introduced in https://github.com/python/cpython/commit/d2ec81a8c99796b51fb8c49b77a7fe369863226f */ + #define Py_SET_TYPE(obj, type) ((Py_TYPE(obj) = (type)), (void)0) + /* Introduced in https://github.com/python/cpython/commit/b10dc3e7a11fcdb97e285882eba6da92594f90f9 */ + #define Py_SET_SIZE(obj, size) ((Py_SIZE(obj) = (size)), (void)0) + /* Introduced in https://github.com/python/cpython/commit/c86a11221df7e37da389f9c6ce6e47ea22dc44ff */ + #define Py_SET_REFCNT(obj, refcnt) ((Py_REFCNT(obj) = (refcnt)), (void)0) +#endif + + +#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) + +/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */ +#if PY_VERSION_HEX < 0x03050200 + #define Py_SETREF(op, op2) \ + do { \ + PyObject *_py_tmp = (PyObject *)(op); \ + (op) = (op2); \ + Py_DECREF(_py_tmp); \ + } while (0) +#endif + +/* introduced in https://github.com/python/cpython/commit/a24107b04c1277e3c1105f98aff5bfa3a98b33a0 */ +#if PY_VERSION_HEX < 0x030800A3 + static NPY_INLINE PyObject * + _PyDict_GetItemStringWithError(PyObject *v, const char *key) + { + PyObject *kv, *rv; + kv = PyUnicode_FromString(key); + if (kv == NULL) { + return NULL; + } + rv = PyDict_GetItemWithError(v, kv); + Py_DECREF(kv); + return rv; + } +#endif + +/* + * PyString -> PyBytes + */ + +#if defined(NPY_PY3K) + +#define PyString_Type PyBytes_Type +#define PyString_Check PyBytes_Check +#define PyStringObject PyBytesObject +#define PyString_FromString PyBytes_FromString +#define PyString_FromStringAndSize PyBytes_FromStringAndSize +#define PyString_AS_STRING PyBytes_AS_STRING +#define PyString_AsStringAndSize PyBytes_AsStringAndSize +#define PyString_FromFormat PyBytes_FromFormat +#define PyString_Concat PyBytes_Concat +#define PyString_ConcatAndDel PyBytes_ConcatAndDel +#define PyString_AsString PyBytes_AsString +#define PyString_GET_SIZE PyBytes_GET_SIZE +#define PyString_Size PyBytes_Size + +#define PyUString_Type PyUnicode_Type +#define PyUString_Check PyUnicode_Check +#define PyUStringObject PyUnicodeObject +#define PyUString_FromString PyUnicode_FromString +#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize +#define PyUString_FromFormat PyUnicode_FromFormat +#define PyUString_Concat PyUnicode_Concat2 +#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel +#define PyUString_GET_SIZE PyUnicode_GET_SIZE +#define PyUString_Size PyUnicode_Size +#define PyUString_InternFromString PyUnicode_InternFromString +#define PyUString_Format PyUnicode_Format + +#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) + +#else + +#define PyBytes_Type PyString_Type +#define PyBytes_Check PyString_Check +#define PyBytesObject PyStringObject +#define PyBytes_FromString PyString_FromString +#define PyBytes_FromStringAndSize PyString_FromStringAndSize +#define PyBytes_AS_STRING PyString_AS_STRING +#define PyBytes_AsStringAndSize PyString_AsStringAndSize +#define PyBytes_FromFormat PyString_FromFormat +#define PyBytes_Concat PyString_Concat +#define PyBytes_ConcatAndDel PyString_ConcatAndDel +#define PyBytes_AsString PyString_AsString +#define PyBytes_GET_SIZE PyString_GET_SIZE +#define PyBytes_Size PyString_Size + +#define PyUString_Type PyString_Type +#define PyUString_Check PyString_Check +#define PyUStringObject PyStringObject +#define PyUString_FromString PyString_FromString +#define PyUString_FromStringAndSize PyString_FromStringAndSize +#define PyUString_FromFormat PyString_FromFormat +#define PyUString_Concat PyString_Concat +#define PyUString_ConcatAndDel PyString_ConcatAndDel +#define PyUString_GET_SIZE PyString_GET_SIZE +#define PyUString_Size PyString_Size +#define PyUString_InternFromString PyString_InternFromString +#define PyUString_Format PyString_Format + +#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) + +#endif /* NPY_PY3K */ + + +static NPY_INLINE void +PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) +{ + Py_SETREF(*left, PyUnicode_Concat(*left, right)); + Py_DECREF(right); +} + +static NPY_INLINE void +PyUnicode_Concat2(PyObject **left, PyObject *right) +{ + Py_SETREF(*left, PyUnicode_Concat(*left, right)); +} + +/* + * PyFile_* compatibility + */ + +/* + * Get a FILE* handle to the file represented by the Python object + */ +static NPY_INLINE FILE* +npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) +{ + int fd, fd2, unbuf; + Py_ssize_t fd2_tmp; + PyObject *ret, *os, *io, *io_raw; + npy_off_t pos; + FILE *handle; + + /* For Python 2 PyFileObject, use PyFile_AsFile */ +#if !defined(NPY_PY3K) + if (PyFile_Check(file)) { + return PyFile_AsFile(file); + } +#endif + + /* Flush first to ensure things end up in the file in the correct order */ + ret = PyObject_CallMethod(file, "flush", ""); + if (ret == NULL) { + return NULL; + } + Py_DECREF(ret); + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return NULL; + } + + /* + * The handle needs to be dup'd because we have to call fclose + * at the end + */ + os = PyImport_ImportModule("os"); + if (os == NULL) { + return NULL; + } + ret = PyObject_CallMethod(os, "dup", "i", fd); + Py_DECREF(os); + if (ret == NULL) { + return NULL; + } + fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError); + Py_DECREF(ret); + if (fd2_tmp == -1 && PyErr_Occurred()) { + return NULL; + } + if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) { + PyErr_SetString(PyExc_IOError, + "Getting an 'int' from os.dup() failed"); + return NULL; + } + fd2 = (int)fd2_tmp; + + /* Convert to FILE* handle */ +#ifdef _WIN32 + handle = _fdopen(fd2, mode); +#else + handle = fdopen(fd2, mode); +#endif + if (handle == NULL) { + PyErr_SetString(PyExc_IOError, + "Getting a FILE* from a Python file object failed"); + return NULL; + } + + /* Record the original raw file handle position */ + *orig_pos = npy_ftell(handle); + if (*orig_pos == -1) { + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + fclose(handle); + return NULL; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + fclose(handle); + return NULL; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return handle; + } + else { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + fclose(handle); + return NULL; + } + } + + /* Seek raw handle to the Python-side position */ + ret = PyObject_CallMethod(file, "tell", ""); + if (ret == NULL) { + fclose(handle); + return NULL; + } + pos = PyLong_AsLongLong(ret); + Py_DECREF(ret); + if (PyErr_Occurred()) { + fclose(handle); + return NULL; + } + if (npy_fseek(handle, pos, SEEK_SET) == -1) { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + fclose(handle); + return NULL; + } + return handle; +} + +/* + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static NPY_INLINE int +npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) +{ + int fd, unbuf; + PyObject *ret, *io, *io_raw; + npy_off_t position; + + /* For Python 2 PyFileObject, do nothing */ +#if !defined(NPY_PY3K) + if (PyFile_Check(file)) { + return 0; + } +#endif + + position = npy_ftell(handle); + + /* Close the FILE* handle */ + fclose(handle); + + /* + * Restore original file handle position, in order to not confuse + * Python-side data structures + */ + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return -1; + } + + if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { + + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + return -1; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + return -1; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return 0; + } + else { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + return -1; + } + } + + if (position == -1) { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + return -1; + } + + /* Seek Python-side handle to the FILE* handle position */ + ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +static NPY_INLINE int +npy_PyFile_Check(PyObject *file) +{ + int fd; + /* For Python 2, check if it is a PyFileObject */ +#if !defined(NPY_PY3K) + if (PyFile_Check(file)) { + return 1; + } +#endif + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + PyErr_Clear(); + return 0; + } + return 1; +} + +static NPY_INLINE PyObject* +npy_PyFile_OpenFile(PyObject *filename, const char *mode) +{ + PyObject *open; + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + if (open == NULL) { + return NULL; + } + return PyObject_CallFunction(open, "Os", filename, mode); +} + +static NPY_INLINE int +npy_PyFile_CloseFile(PyObject *file) +{ + PyObject *ret; + + ret = PyObject_CallMethod(file, "close", NULL); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + + +/* This is a copy of _PyErr_ChainExceptions + */ +static NPY_INLINE void +npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + /* only py3 supports this anyway */ + #ifdef NPY_PY3K + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetContext(val2, val); + PyErr_Restore(exc2, val2, tb2); + #endif + } + else { + PyErr_Restore(exc, val, tb); + } +} + + +/* This is a copy of _PyErr_ChainExceptions, with: + * - a minimal implementation for python 2 + * - __cause__ used instead of __context__ + */ +static NPY_INLINE void +npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + /* only py3 supports this anyway */ + #ifdef NPY_PY3K + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); + #endif + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* + * PyObject_Cmp + */ +#if defined(NPY_PY3K) +static NPY_INLINE int +PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) +{ + int v; + v = PyObject_RichCompareBool(i1, i2, Py_LT); + if (v == 1) { + *cmp = -1; + return 1; + } + else if (v == -1) { + return -1; + } + + v = PyObject_RichCompareBool(i1, i2, Py_GT); + if (v == 1) { + *cmp = 1; + return 1; + } + else if (v == -1) { + return -1; + } + + v = PyObject_RichCompareBool(i1, i2, Py_EQ); + if (v == 1) { + *cmp = 0; + return 1; + } + else { + *cmp = 0; + return -1; + } +} +#endif + +/* + * PyCObject functions adapted to PyCapsules. + * + * The main job here is to get rid of the improved error handling + * of PyCapsules. It's a shame... + */ +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static NPY_INLINE PyObject * +NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) +{ + PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); + if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { + PyErr_Clear(); + Py_DECREF(ret); + ret = NULL; + } + return ret; +} + +static NPY_INLINE void * +NpyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static NPY_INLINE void * +NpyCapsule_GetDesc(PyObject *obj) +{ + return PyCapsule_GetContext(obj); +} + +static NPY_INLINE int +NpyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif + + +#endif /* _NPY_3KCOMPAT_H_ */ diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_common.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_common.h new file mode 100644 index 0000000000000000000000000000000000000000..4928d6ba3ef75fdb3f1005cc615b2fc93c1aaee6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_common.h @@ -0,0 +1,1110 @@ +#ifndef _NPY_COMMON_H_ +#define _NPY_COMMON_H_ + +/* need Python.h for npy_intp, npy_uintp */ +#include + +/* numpconfig.h is auto-generated */ +#include "numpyconfig.h" +#ifdef HAVE_NPY_CONFIG_H +#include +#endif + +/* + * using static inline modifiers when defining npy_math functions + * allows the compiler to make optimizations when possible + */ +#ifndef NPY_INLINE_MATH +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + #define NPY_INLINE_MATH 1 +#else + #define NPY_INLINE_MATH 0 +#endif +#endif + +/* + * gcc does not unroll even with -O3 + * use with care, unrolling on modern cpus rarely speeds things up + */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS +#define NPY_GCC_UNROLL_LOOPS \ + __attribute__((optimize("unroll-loops"))) +#else +#define NPY_GCC_UNROLL_LOOPS +#endif + +/* highest gcc optimization level, enabled autovectorizer */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 +#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) +#else +#define NPY_GCC_OPT_3 +#endif + +/* compile target attributes */ +#if defined HAVE_ATTRIBUTE_TARGET_AVX && defined HAVE_LINK_AVX +#define NPY_GCC_TARGET_AVX __attribute__((target("avx"))) +#else +#define NPY_GCC_TARGET_AVX +#endif + +#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS +#define HAVE_ATTRIBUTE_TARGET_FMA +#define NPY_GCC_TARGET_FMA __attribute__((target("avx2,fma"))) +#endif + +#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2 +#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2"))) +#else +#define NPY_GCC_TARGET_AVX2 +#endif + +#if defined HAVE_ATTRIBUTE_TARGET_AVX512F && defined HAVE_LINK_AVX512F +#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f"))) +#elif defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS +#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f"))) +#else +#define NPY_GCC_TARGET_AVX512F +#endif + +#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX && defined HAVE_LINK_AVX512_SKX +#define NPY_GCC_TARGET_AVX512_SKX __attribute__((target("avx512f,avx512dq,avx512vl,avx512bw,avx512cd"))) +#elif defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS +#define NPY_GCC_TARGET_AVX512_SKX __attribute__((target("avx512f,avx512dq,avx512vl,avx512bw,avx512cd"))) +#else +#define NPY_GCC_TARGET_AVX512_SKX +#endif +/* + * mark an argument (starting from 1) that must not be NULL and is not checked + * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check + */ +#ifdef HAVE_ATTRIBUTE_NONNULL +#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) +#else +#define NPY_GCC_NONNULL(n) +#endif + +#if defined HAVE_XMMINTRIN_H && defined HAVE__MM_LOAD_PS +#define NPY_HAVE_SSE_INTRINSICS +#endif + +#if defined HAVE_EMMINTRIN_H && defined HAVE__MM_LOAD_PD +#define NPY_HAVE_SSE2_INTRINSICS +#endif + +#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX2 +#define NPY_HAVE_AVX2_INTRINSICS +#endif + +#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX512F +#define NPY_HAVE_AVX512F_INTRINSICS +#endif +/* + * give a hint to the compiler which branch is more likely or unlikely + * to occur, e.g. rare error cases: + * + * if (NPY_UNLIKELY(failure == 0)) + * return NULL; + * + * the double !! is to cast the expression (e.g. NULL) to a boolean required by + * the intrinsic + */ +#ifdef HAVE___BUILTIN_EXPECT +#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) +#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define NPY_LIKELY(x) (x) +#define NPY_UNLIKELY(x) (x) +#endif + +#ifdef HAVE___BUILTIN_PREFETCH +/* unlike _mm_prefetch also works on non-x86 */ +#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc)) +#else +#ifdef HAVE__MM_PREFETCH +/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */ +#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \ + (loc == 1 ? _MM_HINT_T2 : \ + (loc == 2 ? _MM_HINT_T1 : \ + (loc == 3 ? _MM_HINT_T0 : -1)))) +#else +#define NPY_PREFETCH(x, rw,loc) +#endif +#endif + +#if defined(_MSC_VER) + #define NPY_INLINE __inline +#elif defined(__GNUC__) + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif +#else + #define NPY_INLINE +#endif + +#ifdef _MSC_VER + #define NPY_FINLINE static __forceinline +#elif defined(__GNUC__) + #define NPY_FINLINE static NPY_INLINE __attribute__((always_inline)) +#else + #define NPY_FINLINE static +#endif + +#ifdef HAVE___THREAD + #define NPY_TLS __thread +#else + #ifdef HAVE___DECLSPEC_THREAD_ + #define NPY_TLS __declspec(thread) + #else + #define NPY_TLS + #endif +#endif + +#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE + #define NPY_RETURNS_BORROWED_REF \ + __attribute__((cpychecker_returns_borrowed_ref)) +#else + #define NPY_RETURNS_BORROWED_REF +#endif + +#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE + #define NPY_STEALS_REF_TO_ARG(n) \ + __attribute__((cpychecker_steals_reference_to_arg(n))) +#else + #define NPY_STEALS_REF_TO_ARG(n) +#endif + +/* 64 bit file position support, also on win-amd64. Ticket #1660 */ +#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ + defined(__MINGW32__) || defined(__MINGW64__) + #include + +/* mingw based on 3.4.5 has lseek but not ftell/fseek */ +#if defined(__MINGW32__) || defined(__MINGW64__) +extern int __cdecl _fseeki64(FILE *, long long, int); +extern long long __cdecl _ftelli64(FILE *); +#endif + + #define npy_fseek _fseeki64 + #define npy_ftell _ftelli64 + #define npy_lseek _lseeki64 + #define npy_off_t npy_int64 + + #if NPY_SIZEOF_INT == 8 + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_LONG == 8 + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_LONGLONG == 8 + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#else +#ifdef HAVE_FSEEKO + #define npy_fseek fseeko +#else + #define npy_fseek fseek +#endif +#ifdef HAVE_FTELLO + #define npy_ftell ftello +#else + #define npy_ftell ftell +#endif + #include + #define npy_lseek lseek + #define npy_off_t off_t + + #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT + #define NPY_OFF_T_PYFMT "h" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#endif + +/* enums for detected endianness */ +enum { + NPY_CPU_UNKNOWN_ENDIAN, + NPY_CPU_LITTLE, + NPY_CPU_BIG +}; + +/* + * This is to typedef npy_intp to the appropriate pointer size for this + * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. + */ +typedef Py_intptr_t npy_intp; +typedef Py_uintptr_t npy_uintp; + +/* + * Define sizes that were not defined in numpyconfig.h. + */ +#define NPY_SIZEOF_CHAR 1 +#define NPY_SIZEOF_BYTE 1 +#define NPY_SIZEOF_DATETIME 8 +#define NPY_SIZEOF_TIMEDELTA 8 +#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T +#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T +#define NPY_SIZEOF_HALF 2 +#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT +#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE +#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE + +#ifdef constchar +#undef constchar +#endif + +#define NPY_SSIZE_T_PYFMT "n" +#define constchar char + +/* NPY_INTP_FMT Note: + * Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf, + * NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those + * functions use different formatting codes that are portably specified + * according to the Python documentation. See issue gh-2388. + */ +#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT + #define NPY_INTP NPY_INT + #define NPY_UINTP NPY_UINT + #define PyIntpArrType_Type PyIntArrType_Type + #define PyUIntpArrType_Type PyUIntArrType_Type + #define NPY_MAX_INTP NPY_MAX_INT + #define NPY_MIN_INTP NPY_MIN_INT + #define NPY_MAX_UINTP NPY_MAX_UINT + #define NPY_INTP_FMT "d" +#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG + #define NPY_INTP NPY_LONG + #define NPY_UINTP NPY_ULONG + #define PyIntpArrType_Type PyLongArrType_Type + #define PyUIntpArrType_Type PyULongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONG + #define NPY_MIN_INTP NPY_MIN_LONG + #define NPY_MAX_UINTP NPY_MAX_ULONG + #define NPY_INTP_FMT "ld" +#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) + #define NPY_INTP NPY_LONGLONG + #define NPY_UINTP NPY_ULONGLONG + #define PyIntpArrType_Type PyLongLongArrType_Type + #define PyUIntpArrType_Type PyULongLongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONGLONG + #define NPY_MIN_INTP NPY_MIN_LONGLONG + #define NPY_MAX_UINTP NPY_MAX_ULONGLONG + #define NPY_INTP_FMT "lld" +#endif + +/* + * We can only use C99 formats for npy_int_p if it is the same as + * intp_t, hence the condition on HAVE_UNITPTR_T + */ +#if (NPY_USE_C99_FORMATS) == 1 \ + && (defined HAVE_UINTPTR_T) \ + && (defined HAVE_INTTYPES_H) + #include + #undef NPY_INTP_FMT + #define NPY_INTP_FMT PRIdPTR +#endif + + +/* + * Some platforms don't define bool, long long, or long double. + * Handle that here. + */ +#define NPY_BYTE_FMT "hhd" +#define NPY_UBYTE_FMT "hhu" +#define NPY_SHORT_FMT "hd" +#define NPY_USHORT_FMT "hu" +#define NPY_INT_FMT "d" +#define NPY_UINT_FMT "u" +#define NPY_LONG_FMT "ld" +#define NPY_ULONG_FMT "lu" +#define NPY_HALF_FMT "g" +#define NPY_FLOAT_FMT "g" +#define NPY_DOUBLE_FMT "g" + + +#ifdef PY_LONG_LONG +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +# ifdef _MSC_VER +# define NPY_LONGLONG_FMT "I64d" +# define NPY_ULONGLONG_FMT "I64u" +# else +# define NPY_LONGLONG_FMT "lld" +# define NPY_ULONGLONG_FMT "llu" +# endif +# ifdef _MSC_VER +# define NPY_LONGLONG_SUFFIX(x) (x##i64) +# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) +# else +# define NPY_LONGLONG_SUFFIX(x) (x##LL) +# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) +# endif +#else +typedef long npy_longlong; +typedef unsigned long npy_ulonglong; +# define NPY_LONGLONG_SUFFIX(x) (x##L) +# define NPY_ULONGLONG_SUFFIX(x) (x##UL) +#endif + + +typedef unsigned char npy_bool; +#define NPY_FALSE 0 +#define NPY_TRUE 1 + + +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + typedef double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "g" +#else + typedef long double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "Lg" +#endif + +#ifndef Py_USING_UNICODE +#error Must use Python with unicode enabled. +#endif + + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +typedef Py_hash_t npy_hash_t; +#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP + +/* + * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being + * able to do .real/.imag. Will have to convert code first. + */ +#if 0 +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE) +typedef complex npy_cdouble; +#else +typedef struct { double real, imag; } npy_cdouble; +#endif + +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT) +typedef complex float npy_cfloat; +#else +typedef struct { float real, imag; } npy_cfloat; +#endif + +#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE) +typedef complex long double npy_clongdouble; +#else +typedef struct {npy_longdouble real, imag;} npy_clongdouble; +#endif +#endif +#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE +#error npy_cdouble definition is not compatible with C99 complex definition ! \ + Please contact NumPy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { double real, imag; } npy_cdouble; + +#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT +#error npy_cfloat definition is not compatible with C99 complex definition ! \ + Please contact NumPy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { float real, imag; } npy_cfloat; + +#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE +#error npy_clongdouble definition is not compatible with C99 complex definition ! \ + Please contact NumPy maintainers and give detailed information about your \ + compiler and platform +#endif +typedef struct { npy_longdouble real, imag; } npy_clongdouble; + +/* + * numarray-style bit-width typedefs + */ +#define NPY_MAX_INT8 127 +#define NPY_MIN_INT8 -128 +#define NPY_MAX_UINT8 255 +#define NPY_MAX_INT16 32767 +#define NPY_MIN_INT16 -32768 +#define NPY_MAX_UINT16 65535 +#define NPY_MAX_INT32 2147483647 +#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) +#define NPY_MAX_UINT32 4294967295U +#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) +#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) +#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967) +#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935) +#define NPY_MIN_DATETIME NPY_MIN_INT64 +#define NPY_MAX_DATETIME NPY_MAX_INT64 +#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 +#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 + + /* Need to find the number of bits for each type and + make definitions accordingly. + + C states that sizeof(char) == 1 by definition + + So, just using the sizeof keyword won't help. + + It also looks like Python itself uses sizeof(char) quite a + bit, which by definition should be 1 all the time. + + Idea: Make Use of CHAR_BIT which should tell us how many + BITS per CHARACTER + */ + + /* Include platform definitions -- These are in the C89/90 standard */ +#include +#define NPY_MAX_BYTE SCHAR_MAX +#define NPY_MIN_BYTE SCHAR_MIN +#define NPY_MAX_UBYTE UCHAR_MAX +#define NPY_MAX_SHORT SHRT_MAX +#define NPY_MIN_SHORT SHRT_MIN +#define NPY_MAX_USHORT USHRT_MAX +#define NPY_MAX_INT INT_MAX +#ifndef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#endif +#define NPY_MIN_INT INT_MIN +#define NPY_MAX_UINT UINT_MAX +#define NPY_MAX_LONG LONG_MAX +#define NPY_MIN_LONG LONG_MIN +#define NPY_MAX_ULONG ULONG_MAX + +#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) +#define NPY_BITSOF_CHAR CHAR_BIT +#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) +#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) +#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) +#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) +#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) +#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) +#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) +#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) +#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) +#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) +#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) +#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) + +#if NPY_BITSOF_LONG == 8 +#define NPY_INT8 NPY_LONG +#define NPY_UINT8 NPY_ULONG + typedef long npy_int8; + typedef unsigned long npy_uint8; +#define PyInt8ScalarObject PyLongScalarObject +#define PyInt8ArrType_Type PyLongArrType_Type +#define PyUInt8ScalarObject PyULongScalarObject +#define PyUInt8ArrType_Type PyULongArrType_Type +#define NPY_INT8_FMT NPY_LONG_FMT +#define NPY_UINT8_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 16 +#define NPY_INT16 NPY_LONG +#define NPY_UINT16 NPY_ULONG + typedef long npy_int16; + typedef unsigned long npy_uint16; +#define PyInt16ScalarObject PyLongScalarObject +#define PyInt16ArrType_Type PyLongArrType_Type +#define PyUInt16ScalarObject PyULongScalarObject +#define PyUInt16ArrType_Type PyULongArrType_Type +#define NPY_INT16_FMT NPY_LONG_FMT +#define NPY_UINT16_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 32 +#define NPY_INT32 NPY_LONG +#define NPY_UINT32 NPY_ULONG + typedef long npy_int32; + typedef unsigned long npy_uint32; + typedef unsigned long npy_ucs4; +#define PyInt32ScalarObject PyLongScalarObject +#define PyInt32ArrType_Type PyLongArrType_Type +#define PyUInt32ScalarObject PyULongScalarObject +#define PyUInt32ArrType_Type PyULongArrType_Type +#define NPY_INT32_FMT NPY_LONG_FMT +#define NPY_UINT32_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 64 +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG + typedef long npy_int64; + typedef unsigned long npy_uint64; +#define PyInt64ScalarObject PyLongScalarObject +#define PyInt64ArrType_Type PyLongArrType_Type +#define PyUInt64ScalarObject PyULongScalarObject +#define PyUInt64ArrType_Type PyULongArrType_Type +#define NPY_INT64_FMT NPY_LONG_FMT +#define NPY_UINT64_FMT NPY_ULONG_FMT +#define MyPyLong_FromInt64 PyLong_FromLong +#define MyPyLong_AsInt64 PyLong_AsLong +#elif NPY_BITSOF_LONG == 128 +#define NPY_INT128 NPY_LONG +#define NPY_UINT128 NPY_ULONG + typedef long npy_int128; + typedef unsigned long npy_uint128; +#define PyInt128ScalarObject PyLongScalarObject +#define PyInt128ArrType_Type PyLongArrType_Type +#define PyUInt128ScalarObject PyULongScalarObject +#define PyUInt128ArrType_Type PyULongArrType_Type +#define NPY_INT128_FMT NPY_LONG_FMT +#define NPY_UINT128_FMT NPY_ULONG_FMT +#endif + +#if NPY_BITSOF_LONGLONG == 8 +# ifndef NPY_INT8 +# define NPY_INT8 NPY_LONGLONG +# define NPY_UINT8 NPY_ULONGLONG + typedef npy_longlong npy_int8; + typedef npy_ulonglong npy_uint8; +# define PyInt8ScalarObject PyLongLongScalarObject +# define PyInt8ArrType_Type PyLongLongArrType_Type +# define PyUInt8ScalarObject PyULongLongScalarObject +# define PyUInt8ArrType_Type PyULongLongArrType_Type +#define NPY_INT8_FMT NPY_LONGLONG_FMT +#define NPY_UINT8_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT8 +# define NPY_MIN_LONGLONG NPY_MIN_INT8 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 +#elif NPY_BITSOF_LONGLONG == 16 +# ifndef NPY_INT16 +# define NPY_INT16 NPY_LONGLONG +# define NPY_UINT16 NPY_ULONGLONG + typedef npy_longlong npy_int16; + typedef npy_ulonglong npy_uint16; +# define PyInt16ScalarObject PyLongLongScalarObject +# define PyInt16ArrType_Type PyLongLongArrType_Type +# define PyUInt16ScalarObject PyULongLongScalarObject +# define PyUInt16ArrType_Type PyULongLongArrType_Type +#define NPY_INT16_FMT NPY_LONGLONG_FMT +#define NPY_UINT16_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT16 +# define NPY_MIN_LONGLONG NPY_MIN_INT16 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 +#elif NPY_BITSOF_LONGLONG == 32 +# ifndef NPY_INT32 +# define NPY_INT32 NPY_LONGLONG +# define NPY_UINT32 NPY_ULONGLONG + typedef npy_longlong npy_int32; + typedef npy_ulonglong npy_uint32; + typedef npy_ulonglong npy_ucs4; +# define PyInt32ScalarObject PyLongLongScalarObject +# define PyInt32ArrType_Type PyLongLongArrType_Type +# define PyUInt32ScalarObject PyULongLongScalarObject +# define PyUInt32ArrType_Type PyULongLongArrType_Type +#define NPY_INT32_FMT NPY_LONGLONG_FMT +#define NPY_UINT32_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT32 +# define NPY_MIN_LONGLONG NPY_MIN_INT32 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 +#elif NPY_BITSOF_LONGLONG == 64 +# ifndef NPY_INT64 +# define NPY_INT64 NPY_LONGLONG +# define NPY_UINT64 NPY_ULONGLONG + typedef npy_longlong npy_int64; + typedef npy_ulonglong npy_uint64; +# define PyInt64ScalarObject PyLongLongScalarObject +# define PyInt64ArrType_Type PyLongLongArrType_Type +# define PyUInt64ScalarObject PyULongLongScalarObject +# define PyUInt64ArrType_Type PyULongLongArrType_Type +#define NPY_INT64_FMT NPY_LONGLONG_FMT +#define NPY_UINT64_FMT NPY_ULONGLONG_FMT +# define MyPyLong_FromInt64 PyLong_FromLongLong +# define MyPyLong_AsInt64 PyLong_AsLongLong +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT64 +# define NPY_MIN_LONGLONG NPY_MIN_INT64 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 +#elif NPY_BITSOF_LONGLONG == 128 +# ifndef NPY_INT128 +# define NPY_INT128 NPY_LONGLONG +# define NPY_UINT128 NPY_ULONGLONG + typedef npy_longlong npy_int128; + typedef npy_ulonglong npy_uint128; +# define PyInt128ScalarObject PyLongLongScalarObject +# define PyInt128ArrType_Type PyLongLongArrType_Type +# define PyUInt128ScalarObject PyULongLongScalarObject +# define PyUInt128ArrType_Type PyULongLongArrType_Type +#define NPY_INT128_FMT NPY_LONGLONG_FMT +#define NPY_UINT128_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT128 +# define NPY_MIN_LONGLONG NPY_MIN_INT128 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT128 +#elif NPY_BITSOF_LONGLONG == 256 +# define NPY_INT256 NPY_LONGLONG +# define NPY_UINT256 NPY_ULONGLONG + typedef npy_longlong npy_int256; + typedef npy_ulonglong npy_uint256; +# define PyInt256ScalarObject PyLongLongScalarObject +# define PyInt256ArrType_Type PyLongLongArrType_Type +# define PyUInt256ScalarObject PyULongLongScalarObject +# define PyUInt256ArrType_Type PyULongLongArrType_Type +#define NPY_INT256_FMT NPY_LONGLONG_FMT +#define NPY_UINT256_FMT NPY_ULONGLONG_FMT +# define NPY_MAX_LONGLONG NPY_MAX_INT256 +# define NPY_MIN_LONGLONG NPY_MIN_INT256 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT256 +#endif + +#if NPY_BITSOF_INT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_INT +#define NPY_UINT8 NPY_UINT + typedef int npy_int8; + typedef unsigned int npy_uint8; +# define PyInt8ScalarObject PyIntScalarObject +# define PyInt8ArrType_Type PyIntArrType_Type +# define PyUInt8ScalarObject PyUIntScalarObject +# define PyUInt8ArrType_Type PyUIntArrType_Type +#define NPY_INT8_FMT NPY_INT_FMT +#define NPY_UINT8_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_INT +#define NPY_UINT16 NPY_UINT + typedef int npy_int16; + typedef unsigned int npy_uint16; +# define PyInt16ScalarObject PyIntScalarObject +# define PyInt16ArrType_Type PyIntArrType_Type +# define PyUInt16ScalarObject PyIntUScalarObject +# define PyUInt16ArrType_Type PyIntUArrType_Type +#define NPY_INT16_FMT NPY_INT_FMT +#define NPY_UINT16_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT + typedef int npy_int32; + typedef unsigned int npy_uint32; + typedef unsigned int npy_ucs4; +# define PyInt32ScalarObject PyIntScalarObject +# define PyInt32ArrType_Type PyIntArrType_Type +# define PyUInt32ScalarObject PyUIntScalarObject +# define PyUInt32ArrType_Type PyUIntArrType_Type +#define NPY_INT32_FMT NPY_INT_FMT +#define NPY_UINT32_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_INT +#define NPY_UINT64 NPY_UINT + typedef int npy_int64; + typedef unsigned int npy_uint64; +# define PyInt64ScalarObject PyIntScalarObject +# define PyInt64ArrType_Type PyIntArrType_Type +# define PyUInt64ScalarObject PyUIntScalarObject +# define PyUInt64ArrType_Type PyUIntArrType_Type +#define NPY_INT64_FMT NPY_INT_FMT +#define NPY_UINT64_FMT NPY_UINT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_INT == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_INT +#define NPY_UINT128 NPY_UINT + typedef int npy_int128; + typedef unsigned int npy_uint128; +# define PyInt128ScalarObject PyIntScalarObject +# define PyInt128ArrType_Type PyIntArrType_Type +# define PyUInt128ScalarObject PyUIntScalarObject +# define PyUInt128ArrType_Type PyUIntArrType_Type +#define NPY_INT128_FMT NPY_INT_FMT +#define NPY_UINT128_FMT NPY_UINT_FMT +#endif +#endif + +#if NPY_BITSOF_SHORT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_SHORT +#define NPY_UINT8 NPY_USHORT + typedef short npy_int8; + typedef unsigned short npy_uint8; +# define PyInt8ScalarObject PyShortScalarObject +# define PyInt8ArrType_Type PyShortArrType_Type +# define PyUInt8ScalarObject PyUShortScalarObject +# define PyUInt8ArrType_Type PyUShortArrType_Type +#define NPY_INT8_FMT NPY_SHORT_FMT +#define NPY_UINT8_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT + typedef short npy_int16; + typedef unsigned short npy_uint16; +# define PyInt16ScalarObject PyShortScalarObject +# define PyInt16ArrType_Type PyShortArrType_Type +# define PyUInt16ScalarObject PyUShortScalarObject +# define PyUInt16ArrType_Type PyUShortArrType_Type +#define NPY_INT16_FMT NPY_SHORT_FMT +#define NPY_UINT16_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_SHORT +#define NPY_UINT32 NPY_USHORT + typedef short npy_int32; + typedef unsigned short npy_uint32; + typedef unsigned short npy_ucs4; +# define PyInt32ScalarObject PyShortScalarObject +# define PyInt32ArrType_Type PyShortArrType_Type +# define PyUInt32ScalarObject PyUShortScalarObject +# define PyUInt32ArrType_Type PyUShortArrType_Type +#define NPY_INT32_FMT NPY_SHORT_FMT +#define NPY_UINT32_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_SHORT +#define NPY_UINT64 NPY_USHORT + typedef short npy_int64; + typedef unsigned short npy_uint64; +# define PyInt64ScalarObject PyShortScalarObject +# define PyInt64ArrType_Type PyShortArrType_Type +# define PyUInt64ScalarObject PyUShortScalarObject +# define PyUInt64ArrType_Type PyUShortArrType_Type +#define NPY_INT64_FMT NPY_SHORT_FMT +#define NPY_UINT64_FMT NPY_USHORT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_SHORT == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_SHORT +#define NPY_UINT128 NPY_USHORT + typedef short npy_int128; + typedef unsigned short npy_uint128; +# define PyInt128ScalarObject PyShortScalarObject +# define PyInt128ArrType_Type PyShortArrType_Type +# define PyUInt128ScalarObject PyUShortScalarObject +# define PyUInt128ArrType_Type PyUShortArrType_Type +#define NPY_INT128_FMT NPY_SHORT_FMT +#define NPY_UINT128_FMT NPY_USHORT_FMT +#endif +#endif + + +#if NPY_BITSOF_CHAR == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE + typedef signed char npy_int8; + typedef unsigned char npy_uint8; +# define PyInt8ScalarObject PyByteScalarObject +# define PyInt8ArrType_Type PyByteArrType_Type +# define PyUInt8ScalarObject PyUByteScalarObject +# define PyUInt8ArrType_Type PyUByteArrType_Type +#define NPY_INT8_FMT NPY_BYTE_FMT +#define NPY_UINT8_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_BYTE +#define NPY_UINT16 NPY_UBYTE + typedef signed char npy_int16; + typedef unsigned char npy_uint16; +# define PyInt16ScalarObject PyByteScalarObject +# define PyInt16ArrType_Type PyByteArrType_Type +# define PyUInt16ScalarObject PyUByteScalarObject +# define PyUInt16ArrType_Type PyUByteArrType_Type +#define NPY_INT16_FMT NPY_BYTE_FMT +#define NPY_UINT16_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_BYTE +#define NPY_UINT32 NPY_UBYTE + typedef signed char npy_int32; + typedef unsigned char npy_uint32; + typedef unsigned char npy_ucs4; +# define PyInt32ScalarObject PyByteScalarObject +# define PyInt32ArrType_Type PyByteArrType_Type +# define PyUInt32ScalarObject PyUByteScalarObject +# define PyUInt32ArrType_Type PyUByteArrType_Type +#define NPY_INT32_FMT NPY_BYTE_FMT +#define NPY_UINT32_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_BYTE +#define NPY_UINT64 NPY_UBYTE + typedef signed char npy_int64; + typedef unsigned char npy_uint64; +# define PyInt64ScalarObject PyByteScalarObject +# define PyInt64ArrType_Type PyByteArrType_Type +# define PyUInt64ScalarObject PyUByteScalarObject +# define PyUInt64ArrType_Type PyUByteArrType_Type +#define NPY_INT64_FMT NPY_BYTE_FMT +#define NPY_UINT64_FMT NPY_UBYTE_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_CHAR == 128 +#ifndef NPY_INT128 +#define NPY_INT128 NPY_BYTE +#define NPY_UINT128 NPY_UBYTE + typedef signed char npy_int128; + typedef unsigned char npy_uint128; +# define PyInt128ScalarObject PyByteScalarObject +# define PyInt128ArrType_Type PyByteArrType_Type +# define PyUInt128ScalarObject PyUByteScalarObject +# define PyUInt128ArrType_Type PyUByteArrType_Type +#define NPY_INT128_FMT NPY_BYTE_FMT +#define NPY_UINT128_FMT NPY_UBYTE_FMT +#endif +#endif + + + +#if NPY_BITSOF_DOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_DOUBLE +#define NPY_COMPLEX64 NPY_CDOUBLE + typedef double npy_float32; + typedef npy_cdouble npy_complex64; +# define PyFloat32ScalarObject PyDoubleScalarObject +# define PyComplex64ScalarObject PyCDoubleScalarObject +# define PyFloat32ArrType_Type PyDoubleArrType_Type +# define PyComplex64ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX128 NPY_CDOUBLE + typedef double npy_float64; + typedef npy_cdouble npy_complex128; +# define PyFloat64ScalarObject PyDoubleScalarObject +# define PyComplex128ScalarObject PyCDoubleScalarObject +# define PyFloat64ArrType_Type PyDoubleArrType_Type +# define PyComplex128ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_DOUBLE +#define NPY_COMPLEX160 NPY_CDOUBLE + typedef double npy_float80; + typedef npy_cdouble npy_complex160; +# define PyFloat80ScalarObject PyDoubleScalarObject +# define PyComplex160ScalarObject PyCDoubleScalarObject +# define PyFloat80ArrType_Type PyDoubleArrType_Type +# define PyComplex160ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_DOUBLE +#define NPY_COMPLEX192 NPY_CDOUBLE + typedef double npy_float96; + typedef npy_cdouble npy_complex192; +# define PyFloat96ScalarObject PyDoubleScalarObject +# define PyComplex192ScalarObject PyCDoubleScalarObject +# define PyFloat96ArrType_Type PyDoubleArrType_Type +# define PyComplex192ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_DOUBLE +#define NPY_COMPLEX256 NPY_CDOUBLE + typedef double npy_float128; + typedef npy_cdouble npy_complex256; +# define PyFloat128ScalarObject PyDoubleScalarObject +# define PyComplex256ScalarObject PyCDoubleScalarObject +# define PyFloat128ArrType_Type PyDoubleArrType_Type +# define PyComplex256ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT +#endif +#endif + + + +#if NPY_BITSOF_FLOAT == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_COMPLEX64 NPY_CFLOAT + typedef float npy_float32; + typedef npy_cfloat npy_complex64; +# define PyFloat32ScalarObject PyFloatScalarObject +# define PyComplex64ScalarObject PyCFloatScalarObject +# define PyFloat32ArrType_Type PyFloatArrType_Type +# define PyComplex64ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT32_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_FLOAT +#define NPY_COMPLEX128 NPY_CFLOAT + typedef float npy_float64; + typedef npy_cfloat npy_complex128; +# define PyFloat64ScalarObject PyFloatScalarObject +# define PyComplex128ScalarObject PyCFloatScalarObject +# define PyFloat64ArrType_Type PyFloatArrType_Type +# define PyComplex128ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT64_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_FLOAT +#define NPY_COMPLEX160 NPY_CFLOAT + typedef float npy_float80; + typedef npy_cfloat npy_complex160; +# define PyFloat80ScalarObject PyFloatScalarObject +# define PyComplex160ScalarObject PyCFloatScalarObject +# define PyFloat80ArrType_Type PyFloatArrType_Type +# define PyComplex160ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT80_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_FLOAT +#define NPY_COMPLEX192 NPY_CFLOAT + typedef float npy_float96; + typedef npy_cfloat npy_complex192; +# define PyFloat96ScalarObject PyFloatScalarObject +# define PyComplex192ScalarObject PyCFloatScalarObject +# define PyFloat96ArrType_Type PyFloatArrType_Type +# define PyComplex192ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT96_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_FLOAT +#define NPY_COMPLEX256 NPY_CFLOAT + typedef float npy_float128; + typedef npy_cfloat npy_complex256; +# define PyFloat128ScalarObject PyFloatScalarObject +# define PyComplex256ScalarObject PyCFloatScalarObject +# define PyFloat128ArrType_Type PyFloatArrType_Type +# define PyComplex256ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT128_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT +#endif +#endif + +/* half/float16 isn't a floating-point type in C */ +#define NPY_FLOAT16 NPY_HALF +typedef npy_uint16 npy_half; +typedef npy_half npy_float16; + +#if NPY_BITSOF_LONGDOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_LONGDOUBLE +#define NPY_COMPLEX64 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float32; + typedef npy_clongdouble npy_complex64; +# define PyFloat32ScalarObject PyLongDoubleScalarObject +# define PyComplex64ScalarObject PyCLongDoubleScalarObject +# define PyFloat32ArrType_Type PyLongDoubleArrType_Type +# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_LONGDOUBLE +#define NPY_COMPLEX128 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float64; + typedef npy_clongdouble npy_complex128; +# define PyFloat64ScalarObject PyLongDoubleScalarObject +# define PyComplex128ScalarObject PyCLongDoubleScalarObject +# define PyFloat64ArrType_Type PyLongDoubleArrType_Type +# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_LONGDOUBLE +#define NPY_COMPLEX160 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float80; + typedef npy_clongdouble npy_complex160; +# define PyFloat80ScalarObject PyLongDoubleScalarObject +# define PyComplex160ScalarObject PyCLongDoubleScalarObject +# define PyFloat80ArrType_Type PyLongDoubleArrType_Type +# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_LONGDOUBLE +#define NPY_COMPLEX192 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float96; + typedef npy_clongdouble npy_complex192; +# define PyFloat96ScalarObject PyLongDoubleScalarObject +# define PyComplex192ScalarObject PyCLongDoubleScalarObject +# define PyFloat96ArrType_Type PyLongDoubleArrType_Type +# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_LONGDOUBLE +#define NPY_COMPLEX256 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float128; + typedef npy_clongdouble npy_complex256; +# define PyFloat128ScalarObject PyLongDoubleScalarObject +# define PyComplex256ScalarObject PyCLongDoubleScalarObject +# define PyFloat128ArrType_Type PyLongDoubleArrType_Type +# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 256 +#define NPY_FLOAT256 NPY_LONGDOUBLE +#define NPY_COMPLEX512 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float256; + typedef npy_clongdouble npy_complex512; +# define PyFloat256ScalarObject PyLongDoubleScalarObject +# define PyComplex512ScalarObject PyCLongDoubleScalarObject +# define PyFloat256ArrType_Type PyLongDoubleArrType_Type +# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT +#endif + +/* datetime typedefs */ +typedef npy_int64 npy_timedelta; +typedef npy_int64 npy_datetime; +#define NPY_DATETIME_FMT NPY_INT64_FMT +#define NPY_TIMEDELTA_FMT NPY_INT64_FMT + +/* End of typedefs for numarray style bit-width names */ + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_cpu.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..e6aafffd7f0658d6811419b43667c783c39496b6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_cpu.h @@ -0,0 +1,126 @@ +/* + * This set (target) cpu specific macros: + * - Possible values: + * NPY_CPU_X86 + * NPY_CPU_AMD64 + * NPY_CPU_PPC + * NPY_CPU_PPC64 + * NPY_CPU_PPC64LE + * NPY_CPU_SPARC + * NPY_CPU_S390 + * NPY_CPU_IA64 + * NPY_CPU_HPPA + * NPY_CPU_ALPHA + * NPY_CPU_ARMEL + * NPY_CPU_ARMEB + * NPY_CPU_SH_LE + * NPY_CPU_SH_BE + * NPY_CPU_ARCEL + * NPY_CPU_ARCEB + * NPY_CPU_RISCV64 + * NPY_CPU_WASM + */ +#ifndef _NPY_CPUARCH_H_ +#define _NPY_CPUARCH_H_ + +#include "numpyconfig.h" + +#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) + /* + * __i386__ is defined by gcc and Intel compiler on Linux, + * _M_IX86 by VS compiler, + * i386 by Sun compilers on opensolaris at least + */ + #define NPY_CPU_X86 +#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) + /* + * both __x86_64__ and __amd64__ are defined by gcc + * __x86_64 defined by sun compiler on opensolaris at least + * _M_AMD64 defined by MS compiler + */ + #define NPY_CPU_AMD64 +#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_PPC64LE +#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_PPC64 +#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) + /* + * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, + * but can't find it ATM + * _ARCH_PPC is used by at least gcc on AIX + * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check + * for those specifically first before defaulting to ppc + */ + #define NPY_CPU_PPC +#elif defined(__sparc__) || defined(__sparc) + /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ + #define NPY_CPU_SPARC +#elif defined(__s390__) + #define NPY_CPU_S390 +#elif defined(__ia64) + #define NPY_CPU_IA64 +#elif defined(__hppa) + #define NPY_CPU_HPPA +#elif defined(__alpha__) + #define NPY_CPU_ALPHA +#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64) + /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */ + #if defined(__ARMEB__) || defined(__AARCH64EB__) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEB_AARCH32 + #elif defined(__ARM_64BIT_STATE) + #define NPY_CPU_ARMEB_AARCH64 + #else + #define NPY_CPU_ARMEB + #endif + #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEL_AARCH32 + #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) + #define NPY_CPU_ARMEL_AARCH64 + #else + #define NPY_CPU_ARMEL + #endif + #else + # error Unknown ARM CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) + #endif +#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_SH_LE +#elif defined(__sh__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_SH_BE +#elif defined(__MIPSEL__) + #define NPY_CPU_MIPSEL +#elif defined(__MIPSEB__) + #define NPY_CPU_MIPSEB +#elif defined(__or1k__) + #define NPY_CPU_OR1K +#elif defined(__mc68000__) + #define NPY_CPU_M68K +#elif defined(__arc__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_ARCEL +#elif defined(__arc__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_ARCEB +#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64 + #define NPY_CPU_RISCV64 +#elif defined(__EMSCRIPTEN__) + /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + #define NPY_CPU_WASM +#else + #error Unknown CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) +#endif + +/* + * Except for the following architectures, memory access is limited to the natural + * alignment of data types otherwise it may lead to bus error or performance regression. + * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. +*/ +#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) + #define NPY_ALIGNMENT_REQUIRED 0 +#endif +#ifndef NPY_ALIGNMENT_REQUIRED + #define NPY_ALIGNMENT_REQUIRED 1 +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_endian.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_endian.h new file mode 100644 index 0000000000000000000000000000000000000000..ca3c98173b7ebcea94e28a4fdd2b33086602dd4d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_endian.h @@ -0,0 +1,73 @@ +#ifndef _NPY_ENDIAN_H_ +#define _NPY_ENDIAN_H_ + +/* + * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in + * endian.h + */ + +#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H) + /* Use endian.h if available */ + + #if defined(NPY_HAVE_ENDIAN_H) + #include + #elif defined(NPY_HAVE_SYS_ENDIAN_H) + #include + #endif + + #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN) + #define NPY_BYTE_ORDER BYTE_ORDER + #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN + #define NPY_BIG_ENDIAN BIG_ENDIAN + #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) + #define NPY_BYTE_ORDER _BYTE_ORDER + #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN + #define NPY_BIG_ENDIAN _BIG_ENDIAN + #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) + #define NPY_BYTE_ORDER __BYTE_ORDER + #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN + #define NPY_BIG_ENDIAN __BIG_ENDIAN + #endif +#endif + +#ifndef NPY_BYTE_ORDER + /* Set endianness info using target CPU */ + #include "npy_cpu.h" + + #define NPY_LITTLE_ENDIAN 1234 + #define NPY_BIG_ENDIAN 4321 + + #if defined(NPY_CPU_X86) \ + || defined(NPY_CPU_AMD64) \ + || defined(NPY_CPU_IA64) \ + || defined(NPY_CPU_ALPHA) \ + || defined(NPY_CPU_ARMEL) \ + || defined(NPY_CPU_ARMEL_AARCH32) \ + || defined(NPY_CPU_ARMEL_AARCH64) \ + || defined(NPY_CPU_SH_LE) \ + || defined(NPY_CPU_MIPSEL) \ + || defined(NPY_CPU_PPC64LE) \ + || defined(NPY_CPU_ARCEL) \ + || defined(NPY_CPU_RISCV64) \ + || defined(NPY_CPU_WASM) + #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + #elif defined(NPY_CPU_PPC) \ + || defined(NPY_CPU_SPARC) \ + || defined(NPY_CPU_S390) \ + || defined(NPY_CPU_HPPA) \ + || defined(NPY_CPU_PPC64) \ + || defined(NPY_CPU_ARMEB) \ + || defined(NPY_CPU_ARMEB_AARCH32) \ + || defined(NPY_CPU_ARMEB_AARCH64) \ + || defined(NPY_CPU_SH_BE) \ + || defined(NPY_CPU_MIPSEB) \ + || defined(NPY_CPU_OR1K) \ + || defined(NPY_CPU_M68K) \ + || defined(NPY_CPU_ARCEB) + #define NPY_BYTE_ORDER NPY_BIG_ENDIAN + #else + #error Unknown CPU: can not set endianness + #endif +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_interrupt.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_interrupt.h new file mode 100644 index 0000000000000000000000000000000000000000..2aadfeaa85cb7c0a740fed027157611472395716 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_interrupt.h @@ -0,0 +1,56 @@ +/* + * This API is only provided because it is part of publicly exported + * headers. Its use is considered DEPRECATED, and it will be removed + * eventually. + * (This includes the _PyArray_SigintHandler and _PyArray_GetSigintBuf + * functions which are however, public API, and not headers.) + * + * Instead of using these non-threadsafe macros consider periodically + * querying `PyErr_CheckSignals()` or `PyOS_InterruptOccurred()` will work. + * Both of these require holding the GIL, although cpython could add a + * version of `PyOS_InterruptOccurred()` which does not. Such a version + * actually exists as private API in Python 3.10, and backported to 3.9 and 3.8, + * see also https://bugs.python.org/issue41037 and + * https://github.com/python/cpython/pull/20599). + */ + +#ifndef NPY_INTERRUPT_H +#define NPY_INTERRUPT_H + +#ifndef NPY_NO_SIGNAL + +#include +#include + +#ifndef sigsetjmp + +#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1) +#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2) +#define NPY_SIGJMP_BUF jmp_buf + +#else + +#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2) +#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2) +#define NPY_SIGJMP_BUF sigjmp_buf + +#endif + +# define NPY_SIGINT_ON { \ + PyOS_sighandler_t _npy_sig_save; \ + _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \ + if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \ + 1) == 0) { \ + +# define NPY_SIGINT_OFF } \ + PyOS_setsig(SIGINT, _npy_sig_save); \ + } + +#else /* NPY_NO_SIGNAL */ + +#define NPY_SIGINT_ON +#define NPY_SIGINT_OFF + +#endif /* HAVE_SIGSETJMP */ + +#endif /* NPY_INTERRUPT_H */ diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_math.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_math.h new file mode 100644 index 0000000000000000000000000000000000000000..5ed940e645251b037b2bbb9bdb077b02cea927da --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_math.h @@ -0,0 +1,588 @@ +#ifndef __NPY_MATH_C99_H_ +#define __NPY_MATH_C99_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include +#ifdef __SUNPRO_CC +#include +#endif + +/* By adding static inline specifiers to npy_math function definitions when + appropriate, compiler is given the opportunity to optimize */ +#if NPY_INLINE_MATH +#define NPY_INPLACE NPY_INLINE static +#else +#define NPY_INPLACE +#endif + + +/* + * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 + * for INFINITY) + * + * XXX: I should test whether INFINITY and NAN are available on the platform + */ +NPY_INLINE static float __npy_inff(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; + return __bint.__f; +} + +NPY_INLINE static float __npy_nanf(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; + return __bint.__f; +} + +NPY_INLINE static float __npy_pzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; + return __bint.__f; +} + +NPY_INLINE static float __npy_nzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; + return __bint.__f; +} + +#define NPY_INFINITYF __npy_inff() +#define NPY_NANF __npy_nanf() +#define NPY_PZEROF __npy_pzerof() +#define NPY_NZEROF __npy_nzerof() + +#define NPY_INFINITY ((npy_double)NPY_INFINITYF) +#define NPY_NAN ((npy_double)NPY_NANF) +#define NPY_PZERO ((npy_double)NPY_PZEROF) +#define NPY_NZERO ((npy_double)NPY_NZEROF) + +#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) +#define NPY_NANL ((npy_longdouble)NPY_NANF) +#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) +#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) + +/* + * Useful constants + */ +#define NPY_E 2.718281828459045235360287471352662498 /* e */ +#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ +#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ +#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ +#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ +#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ +#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ +#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ +#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ +#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ +#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ +#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ +#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ + +#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ +#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ +#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ +#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ +#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ +#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ +#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ +#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ +#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ +#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ +#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */ +#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ +#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ + +#define NPY_El 2.718281828459045235360287471352662498L /* e */ +#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ +#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ +#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ +#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ +#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ +#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ +#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ +#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ +#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ +#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */ +#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ +#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ + +/* + * Integer functions. + */ +NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b); + +NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b); +NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b); +NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); + +/* + * C99 double math funcs + */ +NPY_INPLACE double npy_sin(double x); +NPY_INPLACE double npy_cos(double x); +NPY_INPLACE double npy_tan(double x); +NPY_INPLACE double npy_sinh(double x); +NPY_INPLACE double npy_cosh(double x); +NPY_INPLACE double npy_tanh(double x); + +NPY_INPLACE double npy_asin(double x); +NPY_INPLACE double npy_acos(double x); +NPY_INPLACE double npy_atan(double x); + +NPY_INPLACE double npy_log(double x); +NPY_INPLACE double npy_log10(double x); +NPY_INPLACE double npy_exp(double x); +NPY_INPLACE double npy_sqrt(double x); +NPY_INPLACE double npy_cbrt(double x); + +NPY_INPLACE double npy_fabs(double x); +NPY_INPLACE double npy_ceil(double x); +NPY_INPLACE double npy_fmod(double x, double y); +NPY_INPLACE double npy_floor(double x); + +NPY_INPLACE double npy_expm1(double x); +NPY_INPLACE double npy_log1p(double x); +NPY_INPLACE double npy_hypot(double x, double y); +NPY_INPLACE double npy_acosh(double x); +NPY_INPLACE double npy_asinh(double xx); +NPY_INPLACE double npy_atanh(double x); +NPY_INPLACE double npy_rint(double x); +NPY_INPLACE double npy_trunc(double x); +NPY_INPLACE double npy_exp2(double x); +NPY_INPLACE double npy_log2(double x); + +NPY_INPLACE double npy_atan2(double x, double y); +NPY_INPLACE double npy_pow(double x, double y); +NPY_INPLACE double npy_modf(double x, double* y); +NPY_INPLACE double npy_frexp(double x, int* y); +NPY_INPLACE double npy_ldexp(double n, int y); + +NPY_INPLACE double npy_copysign(double x, double y); +double npy_nextafter(double x, double y); +double npy_spacing(double x); + +/* + * IEEE 754 fpu handling. Those are guaranteed to be macros + */ + +/* use builtins to avoid function calls in tight loops + * only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISNAN + #define npy_isnan(x) __builtin_isnan(x) +#else + #ifndef NPY_HAVE_DECL_ISNAN + #define npy_isnan(x) ((x) != (x)) + #else + #if defined(_MSC_VER) && (_MSC_VER < 1900) + #define npy_isnan(x) _isnan((x)) + #else + #define npy_isnan(x) isnan(x) + #endif + #endif +#endif + + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISFINITE + #define npy_isfinite(x) __builtin_isfinite(x) +#else + #ifndef NPY_HAVE_DECL_ISFINITE + #ifdef _MSC_VER + #define npy_isfinite(x) _finite((x)) + #else + #define npy_isfinite(x) !npy_isnan((x) + (-x)) + #endif + #else + #define npy_isfinite(x) isfinite((x)) + #endif +#endif + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISINF + #define npy_isinf(x) __builtin_isinf(x) +#else + #ifndef NPY_HAVE_DECL_ISINF + #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) + #else + #if defined(_MSC_VER) && (_MSC_VER < 1900) + #define npy_isinf(x) (!_finite((x)) && !_isnan((x))) + #else + #define npy_isinf(x) isinf((x)) + #endif + #endif +#endif + +#ifndef NPY_HAVE_DECL_SIGNBIT + int _npy_signbit_f(float x); + int _npy_signbit_d(double x); + int _npy_signbit_ld(long double x); + #define npy_signbit(x) \ + (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ + : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ + : _npy_signbit_f (x)) +#else + #define npy_signbit(x) signbit((x)) +#endif + +/* + * float C99 math functions + */ +NPY_INPLACE float npy_sinf(float x); +NPY_INPLACE float npy_cosf(float x); +NPY_INPLACE float npy_tanf(float x); +NPY_INPLACE float npy_sinhf(float x); +NPY_INPLACE float npy_coshf(float x); +NPY_INPLACE float npy_tanhf(float x); +NPY_INPLACE float npy_fabsf(float x); +NPY_INPLACE float npy_floorf(float x); +NPY_INPLACE float npy_ceilf(float x); +NPY_INPLACE float npy_rintf(float x); +NPY_INPLACE float npy_truncf(float x); +NPY_INPLACE float npy_sqrtf(float x); +NPY_INPLACE float npy_cbrtf(float x); +NPY_INPLACE float npy_log10f(float x); +NPY_INPLACE float npy_logf(float x); +NPY_INPLACE float npy_expf(float x); +NPY_INPLACE float npy_expm1f(float x); +NPY_INPLACE float npy_asinf(float x); +NPY_INPLACE float npy_acosf(float x); +NPY_INPLACE float npy_atanf(float x); +NPY_INPLACE float npy_asinhf(float x); +NPY_INPLACE float npy_acoshf(float x); +NPY_INPLACE float npy_atanhf(float x); +NPY_INPLACE float npy_log1pf(float x); +NPY_INPLACE float npy_exp2f(float x); +NPY_INPLACE float npy_log2f(float x); + +NPY_INPLACE float npy_atan2f(float x, float y); +NPY_INPLACE float npy_hypotf(float x, float y); +NPY_INPLACE float npy_powf(float x, float y); +NPY_INPLACE float npy_fmodf(float x, float y); + +NPY_INPLACE float npy_modff(float x, float* y); +NPY_INPLACE float npy_frexpf(float x, int* y); +NPY_INPLACE float npy_ldexpf(float x, int y); + +NPY_INPLACE float npy_copysignf(float x, float y); +float npy_nextafterf(float x, float y); +float npy_spacingf(float x); + +/* + * long double C99 math functions + */ +NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_sinhl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_coshl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_tanhl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_fabsl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_floorl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_ceill(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_rintl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_truncl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_cbrtl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_log10l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_logl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_expm1l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_asinl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_acosl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_atanl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_asinhl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_acoshl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_atanhl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_log1pl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_exp2l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x); + +NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); + +NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); +NPY_INPLACE npy_longdouble npy_frexpl(npy_longdouble x, int* y); +NPY_INPLACE npy_longdouble npy_ldexpl(npy_longdouble x, int y); + +NPY_INPLACE npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_spacingl(npy_longdouble x); + +/* + * Non standard functions + */ +NPY_INPLACE double npy_deg2rad(double x); +NPY_INPLACE double npy_rad2deg(double x); +NPY_INPLACE double npy_logaddexp(double x, double y); +NPY_INPLACE double npy_logaddexp2(double x, double y); +NPY_INPLACE double npy_divmod(double x, double y, double *modulus); +NPY_INPLACE double npy_heaviside(double x, double h0); + +NPY_INPLACE float npy_deg2radf(float x); +NPY_INPLACE float npy_rad2degf(float x); +NPY_INPLACE float npy_logaddexpf(float x, float y); +NPY_INPLACE float npy_logaddexp2f(float x, float y); +NPY_INPLACE float npy_divmodf(float x, float y, float *modulus); +NPY_INPLACE float npy_heavisidef(float x, float h0); + +NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y, + npy_longdouble *modulus); +NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); + +#define npy_degrees npy_rad2deg +#define npy_degreesf npy_rad2degf +#define npy_degreesl npy_rad2degl + +#define npy_radians npy_deg2rad +#define npy_radiansf npy_deg2radf +#define npy_radiansl npy_deg2radl + +/* + * Complex declarations + */ + +/* + * C99 specifies that complex numbers have the same representation as + * an array of two elements, where the first element is the real part + * and the second element is the imaginary part. + */ +#define __NPY_CPACK_IMP(x, y, type, ctype) \ + union { \ + ctype z; \ + type a[2]; \ + } z1;; \ + \ + z1.a[0] = (x); \ + z1.a[1] = (y); \ + \ + return z1.z; + +static NPY_INLINE npy_cdouble npy_cpack(double x, double y) +{ + __NPY_CPACK_IMP(x, y, double, npy_cdouble); +} + +static NPY_INLINE npy_cfloat npy_cpackf(float x, float y) +{ + __NPY_CPACK_IMP(x, y, float, npy_cfloat); +} + +static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) +{ + __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble); +} +#undef __NPY_CPACK_IMP + +/* + * Same remark as above, but in the other direction: extract first/second + * member of complex number, assuming a C99-compatible representation + * + * Those are defineds as static inline, and such as a reasonable compiler would + * most likely compile this to one or two instructions (on CISC at least) + */ +#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \ + union { \ + ctype z; \ + type a[2]; \ + } __z_repr; \ + __z_repr.z = z; \ + \ + return __z_repr.a[index]; + +static NPY_INLINE double npy_creal(npy_cdouble z) +{ + __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble); +} + +static NPY_INLINE double npy_cimag(npy_cdouble z) +{ + __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble); +} + +static NPY_INLINE float npy_crealf(npy_cfloat z) +{ + __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat); +} + +static NPY_INLINE float npy_cimagf(npy_cfloat z) +{ + __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat); +} + +static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z) +{ + __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble); +} + +static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z) +{ + __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble); +} +#undef __NPY_CEXTRACT_IMP + +/* + * Double precision complex functions + */ +double npy_cabs(npy_cdouble z); +double npy_carg(npy_cdouble z); + +npy_cdouble npy_cexp(npy_cdouble z); +npy_cdouble npy_clog(npy_cdouble z); +npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); + +npy_cdouble npy_csqrt(npy_cdouble z); + +npy_cdouble npy_ccos(npy_cdouble z); +npy_cdouble npy_csin(npy_cdouble z); +npy_cdouble npy_ctan(npy_cdouble z); + +npy_cdouble npy_ccosh(npy_cdouble z); +npy_cdouble npy_csinh(npy_cdouble z); +npy_cdouble npy_ctanh(npy_cdouble z); + +npy_cdouble npy_cacos(npy_cdouble z); +npy_cdouble npy_casin(npy_cdouble z); +npy_cdouble npy_catan(npy_cdouble z); + +npy_cdouble npy_cacosh(npy_cdouble z); +npy_cdouble npy_casinh(npy_cdouble z); +npy_cdouble npy_catanh(npy_cdouble z); + +/* + * Single precision complex functions + */ +float npy_cabsf(npy_cfloat z); +float npy_cargf(npy_cfloat z); + +npy_cfloat npy_cexpf(npy_cfloat z); +npy_cfloat npy_clogf(npy_cfloat z); +npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); + +npy_cfloat npy_csqrtf(npy_cfloat z); + +npy_cfloat npy_ccosf(npy_cfloat z); +npy_cfloat npy_csinf(npy_cfloat z); +npy_cfloat npy_ctanf(npy_cfloat z); + +npy_cfloat npy_ccoshf(npy_cfloat z); +npy_cfloat npy_csinhf(npy_cfloat z); +npy_cfloat npy_ctanhf(npy_cfloat z); + +npy_cfloat npy_cacosf(npy_cfloat z); +npy_cfloat npy_casinf(npy_cfloat z); +npy_cfloat npy_catanf(npy_cfloat z); + +npy_cfloat npy_cacoshf(npy_cfloat z); +npy_cfloat npy_casinhf(npy_cfloat z); +npy_cfloat npy_catanhf(npy_cfloat z); + + +/* + * Extended precision complex functions + */ +npy_longdouble npy_cabsl(npy_clongdouble z); +npy_longdouble npy_cargl(npy_clongdouble z); + +npy_clongdouble npy_cexpl(npy_clongdouble z); +npy_clongdouble npy_clogl(npy_clongdouble z); +npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); + +npy_clongdouble npy_csqrtl(npy_clongdouble z); + +npy_clongdouble npy_ccosl(npy_clongdouble z); +npy_clongdouble npy_csinl(npy_clongdouble z); +npy_clongdouble npy_ctanl(npy_clongdouble z); + +npy_clongdouble npy_ccoshl(npy_clongdouble z); +npy_clongdouble npy_csinhl(npy_clongdouble z); +npy_clongdouble npy_ctanhl(npy_clongdouble z); + +npy_clongdouble npy_cacosl(npy_clongdouble z); +npy_clongdouble npy_casinl(npy_clongdouble z); +npy_clongdouble npy_catanl(npy_clongdouble z); + +npy_clongdouble npy_cacoshl(npy_clongdouble z); +npy_clongdouble npy_casinhl(npy_clongdouble z); +npy_clongdouble npy_catanhl(npy_clongdouble z); + + +/* + * Functions that set the floating point error + * status word. + */ + +/* + * platform-dependent code translates floating point + * status to an integer sum of these values + */ +#define NPY_FPE_DIVIDEBYZERO 1 +#define NPY_FPE_OVERFLOW 2 +#define NPY_FPE_UNDERFLOW 4 +#define NPY_FPE_INVALID 8 + +int npy_clear_floatstatus_barrier(char*); +int npy_get_floatstatus_barrier(char*); +/* + * use caution with these - clang and gcc8.1 are known to reorder calls + * to this form of the function which can defeat the check. The _barrier + * form of the call is preferable, where the argument is + * (char*)&local_variable + */ +int npy_clear_floatstatus(void); +int npy_get_floatstatus(void); + +void npy_set_floatstatus_divbyzero(void); +void npy_set_floatstatus_overflow(void); +void npy_set_floatstatus_underflow(void); +void npy_set_floatstatus_invalid(void); + +#ifdef __cplusplus +} +#endif + +#if NPY_INLINE_MATH +#include "npy_math_internal.h" +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h new file mode 100644 index 0000000000000000000000000000000000000000..b22c5e1df043a3a722a54707a0dea7df976e04d9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h @@ -0,0 +1,19 @@ +/* + * This include file is provided for inclusion in Cython *.pyd files where + * one would like to define the NPY_NO_DEPRECATED_API macro. It can be + * included by + * + * cdef extern from "npy_no_deprecated_api.h": pass + * + */ +#ifndef NPY_NO_DEPRECATED_API + +/* put this check here since there may be multiple includes in C extensions. */ +#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \ + defined(OLD_DEFINES_H) +#error "npy_no_deprecated_api.h" must be first among numpy includes. +#else +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_os.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_os.h new file mode 100644 index 0000000000000000000000000000000000000000..1c19c0e2f05bd832f5716100557b2e1d34d8ee44 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/npy_os.h @@ -0,0 +1,30 @@ +#ifndef _NPY_OS_H_ +#define _NPY_OS_H_ + +#if defined(linux) || defined(__linux) || defined(__linux__) + #define NPY_OS_LINUX +#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__) || defined(__DragonFly__) + #define NPY_OS_BSD + #ifdef __FreeBSD__ + #define NPY_OS_FREEBSD + #elif defined(__NetBSD__) + #define NPY_OS_NETBSD + #elif defined(__OpenBSD__) + #define NPY_OS_OPENBSD + #elif defined(__DragonFly__) + #define NPY_OS_DRAGONFLY + #endif +#elif defined(sun) || defined(__sun) + #define NPY_OS_SOLARIS +#elif defined(__CYGWIN__) + #define NPY_OS_CYGWIN +#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) + #define NPY_OS_WIN32 +#elif defined(__APPLE__) + #define NPY_OS_DARWIN +#else + #define NPY_OS_UNKNOWN +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/numpyconfig.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/numpyconfig.h new file mode 100644 index 0000000000000000000000000000000000000000..1d7c52e99f6e27068847194b285e71761514e4b2 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/numpyconfig.h @@ -0,0 +1,47 @@ +#ifndef _NPY_NUMPYCONFIG_H_ +#define _NPY_NUMPYCONFIG_H_ + +#include "_numpyconfig.h" + +/* + * On Mac OS X, because there is only one configuration stage for all the archs + * in universal builds, any macro which depends on the arch needs to be + * hardcoded + */ +#ifdef __APPLE__ + #undef NPY_SIZEOF_LONG + #undef NPY_SIZEOF_PY_INTPTR_T + + #ifdef __LP64__ + #define NPY_SIZEOF_LONG 8 + #define NPY_SIZEOF_PY_INTPTR_T 8 + #else + #define NPY_SIZEOF_LONG 4 + #define NPY_SIZEOF_PY_INTPTR_T 4 + #endif +#endif + +/** + * To help with the NPY_NO_DEPRECATED_API macro, we include API version + * numbers for specific versions of NumPy. To exclude all API that was + * deprecated as of 1.7, add the following before #including any NumPy + * headers: + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + */ +#define NPY_1_7_API_VERSION 0x00000007 +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_1_9_API_VERSION 0x00000008 +#define NPY_1_10_API_VERSION 0x00000008 +#define NPY_1_11_API_VERSION 0x00000008 +#define NPY_1_12_API_VERSION 0x00000008 +#define NPY_1_13_API_VERSION 0x00000008 +#define NPY_1_14_API_VERSION 0x00000008 +#define NPY_1_15_API_VERSION 0x00000008 +#define NPY_1_16_API_VERSION 0x00000008 +#define NPY_1_17_API_VERSION 0x00000008 +#define NPY_1_18_API_VERSION 0x00000008 +#define NPY_1_19_API_VERSION 0x00000008 +#define NPY_1_20_API_VERSION 0x0000000e +#define NPY_1_21_API_VERSION 0x0000000e + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/old_defines.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/old_defines.h new file mode 100644 index 0000000000000000000000000000000000000000..a7dbc112ba392e39f8cc3020452eebf3b21b0099 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/old_defines.h @@ -0,0 +1,187 @@ +/* This header is deprecated as of NumPy 1.7 */ +#ifndef OLD_DEFINES_H +#define OLD_DEFINES_H + +#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION +#error The header "old_defines.h" is deprecated as of NumPy 1.7. +#endif + +#define NDARRAY_VERSION NPY_VERSION + +#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE +#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE +#define PyArray_BUFSIZE NPY_BUFSIZE + +#define PyArray_PRIORITY NPY_PRIORITY +#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY +#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE + +#define NPY_MAX PyArray_MAX +#define NPY_MIN PyArray_MIN + +#define PyArray_TYPES NPY_TYPES +#define PyArray_BOOL NPY_BOOL +#define PyArray_BYTE NPY_BYTE +#define PyArray_UBYTE NPY_UBYTE +#define PyArray_SHORT NPY_SHORT +#define PyArray_USHORT NPY_USHORT +#define PyArray_INT NPY_INT +#define PyArray_UINT NPY_UINT +#define PyArray_LONG NPY_LONG +#define PyArray_ULONG NPY_ULONG +#define PyArray_LONGLONG NPY_LONGLONG +#define PyArray_ULONGLONG NPY_ULONGLONG +#define PyArray_HALF NPY_HALF +#define PyArray_FLOAT NPY_FLOAT +#define PyArray_DOUBLE NPY_DOUBLE +#define PyArray_LONGDOUBLE NPY_LONGDOUBLE +#define PyArray_CFLOAT NPY_CFLOAT +#define PyArray_CDOUBLE NPY_CDOUBLE +#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE +#define PyArray_OBJECT NPY_OBJECT +#define PyArray_STRING NPY_STRING +#define PyArray_UNICODE NPY_UNICODE +#define PyArray_VOID NPY_VOID +#define PyArray_DATETIME NPY_DATETIME +#define PyArray_TIMEDELTA NPY_TIMEDELTA +#define PyArray_NTYPES NPY_NTYPES +#define PyArray_NOTYPE NPY_NOTYPE +#define PyArray_CHAR NPY_CHAR +#define PyArray_USERDEF NPY_USERDEF +#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES + +#define PyArray_INTP NPY_INTP +#define PyArray_UINTP NPY_UINTP + +#define PyArray_INT8 NPY_INT8 +#define PyArray_UINT8 NPY_UINT8 +#define PyArray_INT16 NPY_INT16 +#define PyArray_UINT16 NPY_UINT16 +#define PyArray_INT32 NPY_INT32 +#define PyArray_UINT32 NPY_UINT32 + +#ifdef NPY_INT64 +#define PyArray_INT64 NPY_INT64 +#define PyArray_UINT64 NPY_UINT64 +#endif + +#ifdef NPY_INT128 +#define PyArray_INT128 NPY_INT128 +#define PyArray_UINT128 NPY_UINT128 +#endif + +#ifdef NPY_FLOAT16 +#define PyArray_FLOAT16 NPY_FLOAT16 +#define PyArray_COMPLEX32 NPY_COMPLEX32 +#endif + +#ifdef NPY_FLOAT80 +#define PyArray_FLOAT80 NPY_FLOAT80 +#define PyArray_COMPLEX160 NPY_COMPLEX160 +#endif + +#ifdef NPY_FLOAT96 +#define PyArray_FLOAT96 NPY_FLOAT96 +#define PyArray_COMPLEX192 NPY_COMPLEX192 +#endif + +#ifdef NPY_FLOAT128 +#define PyArray_FLOAT128 NPY_FLOAT128 +#define PyArray_COMPLEX256 NPY_COMPLEX256 +#endif + +#define PyArray_FLOAT32 NPY_FLOAT32 +#define PyArray_COMPLEX64 NPY_COMPLEX64 +#define PyArray_FLOAT64 NPY_FLOAT64 +#define PyArray_COMPLEX128 NPY_COMPLEX128 + + +#define PyArray_TYPECHAR NPY_TYPECHAR +#define PyArray_BOOLLTR NPY_BOOLLTR +#define PyArray_BYTELTR NPY_BYTELTR +#define PyArray_UBYTELTR NPY_UBYTELTR +#define PyArray_SHORTLTR NPY_SHORTLTR +#define PyArray_USHORTLTR NPY_USHORTLTR +#define PyArray_INTLTR NPY_INTLTR +#define PyArray_UINTLTR NPY_UINTLTR +#define PyArray_LONGLTR NPY_LONGLTR +#define PyArray_ULONGLTR NPY_ULONGLTR +#define PyArray_LONGLONGLTR NPY_LONGLONGLTR +#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR +#define PyArray_HALFLTR NPY_HALFLTR +#define PyArray_FLOATLTR NPY_FLOATLTR +#define PyArray_DOUBLELTR NPY_DOUBLELTR +#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR +#define PyArray_CFLOATLTR NPY_CFLOATLTR +#define PyArray_CDOUBLELTR NPY_CDOUBLELTR +#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR +#define PyArray_OBJECTLTR NPY_OBJECTLTR +#define PyArray_STRINGLTR NPY_STRINGLTR +#define PyArray_STRINGLTR2 NPY_STRINGLTR2 +#define PyArray_UNICODELTR NPY_UNICODELTR +#define PyArray_VOIDLTR NPY_VOIDLTR +#define PyArray_DATETIMELTR NPY_DATETIMELTR +#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR +#define PyArray_CHARLTR NPY_CHARLTR +#define PyArray_INTPLTR NPY_INTPLTR +#define PyArray_UINTPLTR NPY_UINTPLTR +#define PyArray_GENBOOLLTR NPY_GENBOOLLTR +#define PyArray_SIGNEDLTR NPY_SIGNEDLTR +#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR +#define PyArray_FLOATINGLTR NPY_FLOATINGLTR +#define PyArray_COMPLEXLTR NPY_COMPLEXLTR + +#define PyArray_QUICKSORT NPY_QUICKSORT +#define PyArray_HEAPSORT NPY_HEAPSORT +#define PyArray_MERGESORT NPY_MERGESORT +#define PyArray_SORTKIND NPY_SORTKIND +#define PyArray_NSORTS NPY_NSORTS + +#define PyArray_NOSCALAR NPY_NOSCALAR +#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR +#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR +#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR +#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR +#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR +#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR +#define PyArray_SCALARKIND NPY_SCALARKIND +#define PyArray_NSCALARKINDS NPY_NSCALARKINDS + +#define PyArray_ANYORDER NPY_ANYORDER +#define PyArray_CORDER NPY_CORDER +#define PyArray_FORTRANORDER NPY_FORTRANORDER +#define PyArray_ORDER NPY_ORDER + +#define PyDescr_ISBOOL PyDataType_ISBOOL +#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED +#define PyDescr_ISSIGNED PyDataType_ISSIGNED +#define PyDescr_ISINTEGER PyDataType_ISINTEGER +#define PyDescr_ISFLOAT PyDataType_ISFLOAT +#define PyDescr_ISNUMBER PyDataType_ISNUMBER +#define PyDescr_ISSTRING PyDataType_ISSTRING +#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX +#define PyDescr_ISPYTHON PyDataType_ISPYTHON +#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE +#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF +#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED +#define PyDescr_ISOBJECT PyDataType_ISOBJECT +#define PyDescr_HASFIELDS PyDataType_HASFIELDS + +#define PyArray_LITTLE NPY_LITTLE +#define PyArray_BIG NPY_BIG +#define PyArray_NATIVE NPY_NATIVE +#define PyArray_SWAP NPY_SWAP +#define PyArray_IGNORE NPY_IGNORE + +#define PyArray_NATBYTE NPY_NATBYTE +#define PyArray_OPPBYTE NPY_OPPBYTE + +#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE + +#define PyArray_USE_PYMEM NPY_USE_PYMEM + +#define PyArray_RemoveLargest PyArray_RemoveSmallest + +#define PyArray_UCS4 npy_ucs4 + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/oldnumeric.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/oldnumeric.h new file mode 100644 index 0000000000000000000000000000000000000000..6f8ac242cf568934278f1c4e2686d3ea38b70fb8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/oldnumeric.h @@ -0,0 +1,25 @@ +#include "arrayobject.h" + +#ifndef PYPY_VERSION +#ifndef REFCOUNT +# define REFCOUNT NPY_REFCOUNT +# define MAX_ELSIZE 16 +#endif +#endif + +#define PyArray_UNSIGNED_TYPES +#define PyArray_SBYTE NPY_BYTE +#define PyArray_CopyArray PyArray_CopyInto +#define _PyArray_multiply_list PyArray_MultiplyIntList +#define PyArray_ISSPACESAVER(m) NPY_FALSE +#define PyScalarArray_Check PyArray_CheckScalar + +#define CONTIGUOUS NPY_CONTIGUOUS +#define OWN_DIMENSIONS 0 +#define OWN_STRIDES 0 +#define OWN_DATA NPY_OWNDATA +#define SAVESPACE 0 +#define SAVESPACEBIT 0 + +#undef import_array +#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } } diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/random/bitgen.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/random/bitgen.h new file mode 100644 index 0000000000000000000000000000000000000000..ce907ec69e6a05dab87a78d58ef7e85530e23386 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/random/bitgen.h @@ -0,0 +1,20 @@ +#ifndef _RANDOM_BITGEN_H +#define _RANDOM_BITGEN_H + +#pragma once +#include +#include +#include + +/* Must match the declaration in numpy/random/.pxd */ + +typedef struct bitgen { + void *state; + uint64_t (*next_uint64)(void *st); + uint32_t (*next_uint32)(void *st); + double (*next_double)(void *st); + uint64_t (*next_raw)(void *st); +} bitgen_t; + + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/random/distributions.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/random/distributions.h new file mode 100644 index 0000000000000000000000000000000000000000..78474ad600eb54db4c3d3794eb9bdc54ec77a0bd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/random/distributions.h @@ -0,0 +1,209 @@ +#ifndef _RANDOMDGEN__DISTRIBUTIONS_H_ +#define _RANDOMDGEN__DISTRIBUTIONS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "Python.h" +#include "numpy/npy_common.h" +#include +#include +#include + +#include "numpy/npy_math.h" +#include "numpy/random/bitgen.h" + +/* + * RAND_INT_TYPE is used to share integer generators with RandomState which + * used long in place of int64_t. If changing a distribution that uses + * RAND_INT_TYPE, then the original unmodified copy must be retained for + * use in RandomState by copying to the legacy distributions source file. + */ +#ifdef NP_RANDOM_LEGACY +#define RAND_INT_TYPE long +#define RAND_INT_MAX LONG_MAX +#else +#define RAND_INT_TYPE int64_t +#define RAND_INT_MAX INT64_MAX +#endif + +#ifdef _MSC_VER +#define DECLDIR __declspec(dllexport) +#else +#define DECLDIR extern +#endif + +#ifndef MIN +#define MIN(x, y) (((x) < (y)) ? x : y) +#define MAX(x, y) (((x) > (y)) ? x : y) +#endif + +#ifndef M_PI +#define M_PI 3.14159265358979323846264338328 +#endif + +typedef struct s_binomial_t { + int has_binomial; /* !=0: following parameters initialized for binomial */ + double psave; + RAND_INT_TYPE nsave; + double r; + double q; + double fm; + RAND_INT_TYPE m; + double p1; + double xm; + double xl; + double xr; + double c; + double laml; + double lamr; + double p2; + double p3; + double p4; +} binomial_t; + +DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state); +DECLDIR double random_standard_uniform(bitgen_t *bitgen_state); +DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state); +DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state); +DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state); +DECLDIR uint64_t random_uint(bitgen_t *bitgen_state); + +DECLDIR double random_standard_exponential(bitgen_t *bitgen_state); +DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR double random_standard_normal(bitgen_t *bitgen_state); +DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape); +DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape); + +DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale); + +DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale); +DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale); + +DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale); +DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range); +DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b); +DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df); +DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden); +DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state); +DECLDIR double random_pareto(bitgen_t *bitgen_state, double a); +DECLDIR double random_weibull(bitgen_t *bitgen_state, double a); +DECLDIR double random_power(bitgen_t *bitgen_state, double a); +DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma); +DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode); +DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df); +DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc); +DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc); +DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale); +DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa); +DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right); + +DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam); +DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, + double p); + +DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, + int64_t n, binomial_t *binomial); + +DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p); +DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a); +DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, + int64_t good, int64_t bad, int64_t sample); +DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max); + +/* Generate random uint64 numbers in closed interval [off, off + rng]. */ +DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, uint64_t mask, + bool use_masked); + +/* Generate random uint32 numbers in closed interval [off, off + rng]. */ +DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, + uint32_t off, uint32_t rng, + uint32_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, + uint16_t off, uint16_t rng, + uint16_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, uint8_t mask, + bool use_masked, int *bcnt, + uint32_t *buf); +DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_bool mask, + bool use_masked, int *bcnt, + uint32_t *buf); + +DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, npy_intp cnt, + bool use_masked, uint64_t *out); +DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off, + uint32_t rng, npy_intp cnt, + bool use_masked, uint32_t *out); +DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off, + uint16_t rng, npy_intp cnt, + bool use_masked, uint16_t *out); +DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, npy_intp cnt, + bool use_masked, uint8_t *out); +DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_intp cnt, + bool use_masked, npy_bool *out); + +DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, + double *pix, npy_intp d, binomial_t *binomial); + +/* multivariate hypergeometric, "count" method */ +DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* multivariate hypergeometric, "marginals" method */ +DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* Common to legacy-distributions.c and distributions.c but not exported */ + +RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +double random_loggam(double x); +static NPY_INLINE double next_double(bitgen_t *bitgen_state) { + return bitgen_state->next_double(bitgen_state->state); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/ufunc_api.txt b/MLPY/Lib/site-packages/numpy/core/include/numpy/ufunc_api.txt new file mode 100644 index 0000000000000000000000000000000000000000..29d5a807a4819f4e1f82f19ed3fbac3de256cdeb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/ufunc_api.txt @@ -0,0 +1,335 @@ + +================= +NumPy Ufunc C-API +================= +:: + + PyObject * + PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void + **data, char *types, int ntypes, int nin, int + nout, int identity, const char *name, const + char *doc, int unused) + + +:: + + int + PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int + usertype, PyUFuncGenericFunction + function, const int *arg_types, void + *data) + + +:: + + int + PyUFunc_GenericFunction(PyUFuncObject *NPY_UNUSED(ufunc) , PyObject + *NPY_UNUSED(args) , PyObject *NPY_UNUSED(kwds) + , PyArrayObject **NPY_UNUSED(op) ) + + +:: + + void + PyUFunc_f_f_As_d_d(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + void + PyUFunc_d_d(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_f_f(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_g_g(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_F_F_As_D_D(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + void + PyUFunc_F_F(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_D_D(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_G_G(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_O_O(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_ff_f_As_dd_d(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + void + PyUFunc_ff_f(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_dd_d(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_gg_g(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_FF_F_As_DD_D(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + void + PyUFunc_DD_D(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_FF_F(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_GG_G(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_OO_O(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_O_O_method(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + void + PyUFunc_OO_O_method(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + void + PyUFunc_On_Om(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + int + PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject + **errobj) + + +On return, if errobj is populated with a non-NULL value, the caller +owns a new reference to errobj. + +:: + + int + PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) + + +:: + + void + PyUFunc_clearfperr() + + +:: + + int + PyUFunc_getfperr(void ) + + +:: + + int + PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int + *first) + + +:: + + int + PyUFunc_ReplaceLoopBySignature(PyUFuncObject + *func, PyUFuncGenericFunction + newfunc, const int + *signature, PyUFuncGenericFunction + *oldfunc) + + +:: + + PyObject * + PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void + **data, char *types, int + ntypes, int nin, int nout, int + identity, const char *name, const + char *doc, int unused, const char + *signature) + + +:: + + int + PyUFunc_SetUsesArraysAsData(void **NPY_UNUSED(data) , size_t + NPY_UNUSED(i) ) + + +:: + + void + PyUFunc_e_e(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_e_e_As_f_f(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + void + PyUFunc_e_e_As_d_d(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + void + PyUFunc_ee_e(char **args, npy_intp const *dimensions, npy_intp const + *steps, void *func) + + +:: + + void + PyUFunc_ee_e_As_ff_f(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + void + PyUFunc_ee_e_As_dd_d(char **args, npy_intp const *dimensions, npy_intp + const *steps, void *func) + + +:: + + int + PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING + casting, PyArrayObject + **operands, PyObject + *type_tup, PyArray_Descr **out_dtypes) + + +This function applies the default type resolution rules +for the provided ufunc. + +Returns 0 on success, -1 on error. + +:: + + int + PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING + casting, PyArrayObject + **operands, PyArray_Descr **dtypes) + + +Validates that the input operands can be cast to +the input types, and the output types can be cast to +the output operands where provided. + +Returns 0 on success, -1 (with exception raised) on validation failure. + +:: + + int + PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, PyArray_Descr + *user_dtype, PyUFuncGenericFunction + function, PyArray_Descr + **arg_dtypes, void *data) + + +:: + + PyObject * + PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction + *func, void + **data, char + *types, int ntypes, int + nin, int nout, int + identity, const char + *name, const char + *doc, const int + unused, const char + *signature, PyObject + *identity_value) + + diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/ufuncobject.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/ufuncobject.h new file mode 100644 index 0000000000000000000000000000000000000000..85dada1b1b8514cd38e875e5c1da345d113a99c6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/ufuncobject.h @@ -0,0 +1,373 @@ +#ifndef Py_UFUNCOBJECT_H +#define Py_UFUNCOBJECT_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The legacy generic inner loop for a standard element-wise or + * generalized ufunc. + */ +typedef void (*PyUFuncGenericFunction) + (char **args, + npy_intp const *dimensions, + npy_intp const *strides, + void *innerloopdata); + +/* + * The most generic one-dimensional inner loop for + * a masked standard element-wise ufunc. "Masked" here means that it skips + * doing calculations on any items for which the maskptr array has a true + * value. + */ +typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( + char **dataptrs, npy_intp *strides, + char *maskptr, npy_intp mask_stride, + npy_intp count, + NpyAuxData *innerloopdata); + +/* Forward declaration for the type resolver and loop selector typedefs */ +struct _tagPyUFuncObject; + +/* + * Given the operands for calling a ufunc, should determine the + * calculation input and output data types and return an inner loop function. + * This function should validate that the casting rule is being followed, + * and fail if it is not. + * + * For backwards compatibility, the regular type resolution function does not + * support auxiliary data with object semantics. The type resolution call + * which returns a masked generic function returns a standard NpyAuxData + * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros + * work. + * + * ufunc: The ufunc object. + * casting: The 'casting' parameter provided to the ufunc. + * operands: An array of length (ufunc->nin + ufunc->nout), + * with the output parameters possibly NULL. + * type_tup: Either NULL, or the type_tup passed to the ufunc. + * out_dtypes: An array which should be populated with new + * references to (ufunc->nin + ufunc->nout) new + * dtypes, one for each input and output. These + * dtypes should all be in native-endian format. + * + * Should return 0 on success, -1 on failure (with exception set), + * or -2 if Py_NotImplemented should be returned. + */ +typedef int (PyUFunc_TypeResolutionFunc)( + struct _tagPyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + +/* + * Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc, + * and an array of fixed strides (the array will contain NPY_MAX_INTP for + * strides which are not necessarily fixed), returns an inner loop + * with associated auxiliary data. + * + * For backwards compatibility, there is a variant of the inner loop + * selection which returns an inner loop irrespective of the strides, + * and with a void* static auxiliary data instead of an NpyAuxData * + * dynamically allocatable auxiliary data. + * + * ufunc: The ufunc object. + * dtypes: An array which has been populated with dtypes, + * in most cases by the type resolution function + * for the same ufunc. + * fixed_strides: For each input/output, either the stride that + * will be used every time the function is called + * or NPY_MAX_INTP if the stride might change or + * is not known ahead of time. The loop selection + * function may use this stride to pick inner loops + * which are optimized for contiguous or 0-stride + * cases. + * out_innerloop: Should be populated with the correct ufunc inner + * loop for the given type. + * out_innerloopdata: Should be populated with the void* data to + * be passed into the out_innerloop function. + * out_needs_api: If the inner loop needs to use the Python API, + * should set the to 1, otherwise should leave + * this untouched. + */ +typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)( + struct _tagPyUFuncObject *ufunc, + PyArray_Descr **dtypes, + PyUFuncGenericFunction *out_innerloop, + void **out_innerloopdata, + int *out_needs_api); +typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)( + struct _tagPyUFuncObject *ufunc, + PyArray_Descr **dtypes, + PyArray_Descr *mask_dtype, + npy_intp *fixed_strides, + npy_intp fixed_mask_stride, + PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop, + NpyAuxData **out_innerloopdata, + int *out_needs_api); + +typedef struct _tagPyUFuncObject { + PyObject_HEAD + /* + * nin: Number of inputs + * nout: Number of outputs + * nargs: Always nin + nout (Why is it stored?) + */ + int nin, nout, nargs; + + /* + * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero + * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone, + * PyUFunc_IdentityValue. + */ + int identity; + + /* Array of one-dimensional core loops */ + PyUFuncGenericFunction *functions; + /* Array of funcdata that gets passed into the functions */ + void **data; + /* The number of elements in 'functions' and 'data' */ + int ntypes; + + /* Used to be unused field 'check_return' */ + int reserved1; + + /* The name of the ufunc */ + const char *name; + + /* Array of type numbers, of size ('nargs' * 'ntypes') */ + char *types; + + /* Documentation string */ + const char *doc; + + void *ptr; + PyObject *obj; + PyObject *userloops; + + /* generalized ufunc parameters */ + + /* 0 for scalar ufunc; 1 for generalized ufunc */ + int core_enabled; + /* number of distinct dimension names in signature */ + int core_num_dim_ix; + + /* + * dimension indices of input/output argument k are stored in + * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] + */ + + /* numbers of core dimensions of each argument */ + int *core_num_dims; + /* + * dimension indices in a flatted form; indices + * are in the range of [0,core_num_dim_ix) + */ + int *core_dim_ixs; + /* + * positions of 1st core dimensions of each + * argument in core_dim_ixs, equivalent to cumsum(core_num_dims) + */ + int *core_offsets; + /* signature string for printing purpose */ + char *core_signature; + + /* + * A function which resolves the types and fills an array + * with the dtypes for the inputs and outputs. + */ + PyUFunc_TypeResolutionFunc *type_resolver; + /* + * A function which returns an inner loop written for + * NumPy 1.6 and earlier ufuncs. This is for backwards + * compatibility, and may be NULL if inner_loop_selector + * is specified. + */ + PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector; + /* + * This was blocked off to be the "new" inner loop selector in 1.7, + * but this was never implemented. (This is also why the above + * selector is called the "legacy" selector.) + */ + #if PY_VERSION_HEX >= 0x03080000 + vectorcallfunc vectorcall; + #else + void *reserved2; + #endif + /* + * A function which returns a masked inner loop for the ufunc. + */ + PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector; + + /* + * List of flags for each operand when ufunc is called by nditer object. + * These flags will be used in addition to the default flags for each + * operand set by nditer object. + */ + npy_uint32 *op_flags; + + /* + * List of global flags used when ufunc is called by nditer object. + * These flags will be used in addition to the default global flags + * set by nditer object. + */ + npy_uint32 iter_flags; + + /* New in NPY_API_VERSION 0x0000000D and above */ + + /* + * for each core_num_dim_ix distinct dimension names, + * the possible "frozen" size (-1 if not frozen). + */ + npy_intp *core_dim_sizes; + + /* + * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags + */ + npy_uint32 *core_dim_flags; + + /* Identity for reduction, when identity == PyUFunc_IdentityValue */ + PyObject *identity_value; + +} PyUFuncObject; + +#include "arrayobject.h" +/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ +/* the core dimension's size will be determined by the operands. */ +#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002 +/* the core dimension may be absent */ +#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004 +/* flags inferred during execution */ +#define UFUNC_CORE_DIM_MISSING 0x00040000 + +#define UFUNC_ERR_IGNORE 0 +#define UFUNC_ERR_WARN 1 +#define UFUNC_ERR_RAISE 2 +#define UFUNC_ERR_CALL 3 +#define UFUNC_ERR_PRINT 4 +#define UFUNC_ERR_LOG 5 + + /* Python side integer mask */ + +#define UFUNC_MASK_DIVIDEBYZERO 0x07 +#define UFUNC_MASK_OVERFLOW 0x3f +#define UFUNC_MASK_UNDERFLOW 0x1ff +#define UFUNC_MASK_INVALID 0xfff + +#define UFUNC_SHIFT_DIVIDEBYZERO 0 +#define UFUNC_SHIFT_OVERFLOW 3 +#define UFUNC_SHIFT_UNDERFLOW 6 +#define UFUNC_SHIFT_INVALID 9 + + +#define UFUNC_OBJ_ISOBJECT 1 +#define UFUNC_OBJ_NEEDS_API 2 + + /* Default user error mode */ +#define UFUNC_ERR_DEFAULT \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \ + (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID) + +#if NPY_ALLOW_THREADS +#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); +#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); +#else +#define NPY_LOOP_BEGIN_THREADS +#define NPY_LOOP_END_THREADS +#endif + +/* + * UFunc has unit of 0, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_Zero 0 +/* + * UFunc has unit of 1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_One 1 +/* + * UFunc has unit of -1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. Intended for + * bitwise_and reduction. + */ +#define PyUFunc_MinusOne 2 +/* + * UFunc has no unit, and the order of operations cannot be reordered. + * This case does not allow reduction with multiple axes at once. + */ +#define PyUFunc_None -1 +/* + * UFunc has no unit, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_ReorderableNone -2 +/* + * UFunc unit is an identity_value, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_IdentityValue -3 + + +#define UFUNC_REDUCE 0 +#define UFUNC_ACCUMULATE 1 +#define UFUNC_REDUCEAT 2 +#define UFUNC_OUTER 3 + + +typedef struct { + int nin; + int nout; + PyObject *callable; +} PyUFunc_PyFuncData; + +/* A linked-list of function information for + user-defined 1-d loops. + */ +typedef struct _loop1d_info { + PyUFuncGenericFunction func; + void *data; + int *arg_types; + struct _loop1d_info *next; + int nargs; + PyArray_Descr **arg_dtypes; +} PyUFunc_Loop1d; + + +#include "__ufunc_api.h" + +#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" + +/* + * THESE MACROS ARE DEPRECATED. + * Use npy_set_floatstatus_* in the npymath library. + */ +#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO +#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW +#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW +#define UFUNC_FPE_INVALID NPY_FPE_INVALID + +#define generate_divbyzero_error() npy_set_floatstatus_divbyzero() +#define generate_overflow_error() npy_set_floatstatus_overflow() + + /* Make sure it gets defined if it isn't already */ +#ifndef UFUNC_NOFPE +/* Clear the floating point exception default of Borland C++ */ +#if defined(__BORLANDC__) +#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); +#else +#define UFUNC_NOFPE +#endif +#endif + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_UFUNCOBJECT_H */ diff --git a/MLPY/Lib/site-packages/numpy/core/include/numpy/utils.h b/MLPY/Lib/site-packages/numpy/core/include/numpy/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..5fc49f7d09b47323615f3a40f5efecc29d99d507 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/include/numpy/utils.h @@ -0,0 +1,37 @@ +#ifndef __NUMPY_UTILS_HEADER__ +#define __NUMPY_UTILS_HEADER__ + +#ifndef __COMP_NPY_UNUSED + #if defined(__GNUC__) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__ICC) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__clang__) + #define __COMP_NPY_UNUSED __attribute__ ((unused)) + #else + #define __COMP_NPY_UNUSED + #endif +#endif + +#if defined(__GNUC__) || defined(__ICC) || defined(__clang__) + #define NPY_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined(_MSC_VER) + #define NPY_DECL_ALIGNED(x) __declspec(align(x)) +#else + #define NPY_DECL_ALIGNED(x) +#endif + +/* Use this to tag a variable as not used. It will remove unused variable + * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable + * to avoid accidental use */ +#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED +#define NPY_EXPAND(x) x + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + +#define NPY_CAT__(a, b) a ## b +#define NPY_CAT_(a, b) NPY_CAT__(a, b) +#define NPY_CAT(a, b) NPY_CAT_(a, b) + +#endif diff --git a/MLPY/Lib/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini b/MLPY/Lib/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini new file mode 100644 index 0000000000000000000000000000000000000000..290c51994e90c830656e017eef435e1bdaa9a511 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini @@ -0,0 +1,12 @@ +[meta] +Name = mlib +Description = Math library used with this version of numpy +Version = 1.0 + +[default] +Libs= +Cflags= + +[msvc] +Libs= +Cflags= diff --git a/MLPY/Lib/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini b/MLPY/Lib/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini new file mode 100644 index 0000000000000000000000000000000000000000..3b28a91c044c341b50e1fa1b1f69d9852eaeb21b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini @@ -0,0 +1,20 @@ +[meta] +Name=npymath +Description=Portable, core math library implementing C99 standard +Version=0.1 + +[variables] +pkgname=numpy.core +prefix=${pkgdir} +libdir=${prefix}\lib +includedir=${prefix}\include + +[default] +Libs=-L${libdir} -lnpymath +Cflags=-I${includedir} +Requires=mlib + +[msvc] +Libs=/LIBPATH:${libdir} npymath.lib +Cflags=/INCLUDE:${includedir} +Requires=mlib diff --git a/MLPY/Lib/site-packages/numpy/core/lib/npymath.lib b/MLPY/Lib/site-packages/numpy/core/lib/npymath.lib new file mode 100644 index 0000000000000000000000000000000000000000..6d926573bcfb085ca3cbb451da37a5edd85b6e37 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/lib/npymath.lib differ diff --git a/MLPY/Lib/site-packages/numpy/core/machar.py b/MLPY/Lib/site-packages/numpy/core/machar.py new file mode 100644 index 0000000000000000000000000000000000000000..858422076086d41d7a1e2b94b036d9bc0c7d960f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/machar.py @@ -0,0 +1,342 @@ +""" +Machine arithmetics - determine the parameters of the +floating-point arithmetic system + +Author: Pearu Peterson, September 2003 + +""" +__all__ = ['MachAr'] + +from numpy.core.fromnumeric import any +from numpy.core._ufunc_config import errstate +from numpy.core.overrides import set_module + +# Need to speed this up...especially for longfloat + +@set_module('numpy') +class MachAr: + """ + Diagnosing machine parameters. + + Attributes + ---------- + ibeta : int + Radix in which numbers are represented. + it : int + Number of base-`ibeta` digits in the floating point mantissa M. + machep : int + Exponent of the smallest (most negative) power of `ibeta` that, + added to 1.0, gives something different from 1.0 + eps : float + Floating-point number ``beta**machep`` (floating point precision) + negep : int + Exponent of the smallest power of `ibeta` that, subtracted + from 1.0, gives something different from 1.0. + epsneg : float + Floating-point number ``beta**negep``. + iexp : int + Number of bits in the exponent (including its sign and bias). + minexp : int + Smallest (most negative) power of `ibeta` consistent with there + being no leading zeros in the mantissa. + xmin : float + Floating-point number ``beta**minexp`` (the smallest [in + magnitude] positive floating point number with full precision). + maxexp : int + Smallest (positive) power of `ibeta` that causes overflow. + xmax : float + ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] + usable floating value). + irnd : int + In ``range(6)``, information on what kind of rounding is done + in addition, and on how underflow is handled. + ngrd : int + Number of 'guard digits' used when truncating the product + of two mantissas to fit the representation. + epsilon : float + Same as `eps`. + tiny : float + Same as `xmin`. + huge : float + Same as `xmax`. + precision : float + ``- int(-log10(eps))`` + resolution : float + ``- 10**(-precision)`` + + Parameters + ---------- + float_conv : function, optional + Function that converts an integer or integer array to a float + or float array. Default is `float`. + int_conv : function, optional + Function that converts a float or float array to an integer or + integer array. Default is `int`. + float_to_float : function, optional + Function that converts a float array to float. Default is `float`. + Note that this does not seem to do anything useful in the current + implementation. + float_to_str : function, optional + Function that converts a single float to a string. Default is + ``lambda v:'%24.16e' %v``. + title : str, optional + Title that is printed in the string representation of `MachAr`. + + See Also + -------- + finfo : Machine limits for floating point types. + iinfo : Machine limits for integer types. + + References + ---------- + .. [1] Press, Teukolsky, Vetterling and Flannery, + "Numerical Recipes in C++," 2nd ed, + Cambridge University Press, 2002, p. 31. + + """ + + def __init__(self, float_conv=float,int_conv=int, + float_to_float=float, + float_to_str=lambda v:'%24.16e' % v, + title='Python floating point number'): + """ + + float_conv - convert integer to float (array) + int_conv - convert float (array) to integer + float_to_float - convert float array to float + float_to_str - convert array float to str + title - description of used floating point numbers + + """ + # We ignore all errors here because we are purposely triggering + # underflow to detect the properties of the runninng arch. + with errstate(under='ignore'): + self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) + + def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): + max_iterN = 10000 + msg = "Did not converge after %d tries with %s" + one = float_conv(1) + two = one + one + zero = one - one + + # Do we really need to do this? Aren't they 2 and 2.0? + # Determine ibeta and beta + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + b = one + for _ in range(max_iterN): + b = b + b + temp = a + b + itemp = int_conv(temp-a) + if any(itemp != 0): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + ibeta = itemp + beta = float_conv(ibeta) + + # Determine it and irnd + it = -1 + b = one + for _ in range(max_iterN): + it = it + 1 + b = b * beta + temp = b + one + temp1 = temp - b + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + + betah = beta / two + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + temp = a + betah + irnd = 0 + if any(temp-a != zero): + irnd = 1 + tempa = a + beta + temp = tempa + betah + if irnd == 0 and any(temp-tempa != zero): + irnd = 2 + + # Determine negep and epsneg + negep = it + 3 + betain = one / beta + a = one + for i in range(negep): + a = a * betain + b = a + for _ in range(max_iterN): + temp = one - a + if any(temp-one != zero): + break + a = a * beta + negep = negep - 1 + # Prevent infinite loop on PPC with gcc 4.0: + if negep < 0: + raise RuntimeError("could not determine machine tolerance " + "for 'negep', locals() -> %s" % (locals())) + else: + raise RuntimeError(msg % (_, one.dtype)) + negep = -negep + epsneg = a + + # Determine machep and eps + machep = - it - 3 + a = b + + for _ in range(max_iterN): + temp = one + a + if any(temp-one != zero): + break + a = a * beta + machep = machep + 1 + else: + raise RuntimeError(msg % (_, one.dtype)) + eps = a + + # Determine ngrd + ngrd = 0 + temp = one + eps + if irnd == 0 and any(temp*one - one != zero): + ngrd = 1 + + # Determine iexp + i = 0 + k = 1 + z = betain + t = one + eps + nxres = 0 + for _ in range(max_iterN): + y = z + z = y*y + a = z*one # Check here for underflow + temp = z*t + if any(a+a == zero) or any(abs(z) >= y): + break + temp1 = temp * betain + if any(temp1*beta == z): + break + i = i + 1 + k = k + k + else: + raise RuntimeError(msg % (_, one.dtype)) + if ibeta != 10: + iexp = i + 1 + mx = k + k + else: + iexp = 2 + iz = ibeta + while k >= iz: + iz = iz * ibeta + iexp = iexp + 1 + mx = iz + iz - 1 + + # Determine minexp and xmin + for _ in range(max_iterN): + xmin = y + y = y * betain + a = y * one + temp = y * t + if any((a + a) != zero) and any(abs(y) < xmin): + k = k + 1 + temp1 = temp * betain + if any(temp1*beta == y) and any(temp != y): + nxres = 3 + xmin = y + break + else: + break + else: + raise RuntimeError(msg % (_, one.dtype)) + minexp = -k + + # Determine maxexp, xmax + if mx <= k + k - 3 and ibeta != 10: + mx = mx + mx + iexp = iexp + 1 + maxexp = mx + minexp + irnd = irnd + nxres + if irnd >= 2: + maxexp = maxexp - 2 + i = maxexp + minexp + if ibeta == 2 and not i: + maxexp = maxexp - 1 + if i > 20: + maxexp = maxexp - 1 + if any(a != y): + maxexp = maxexp - 2 + xmax = one - epsneg + if any(xmax*one != xmax): + xmax = one - beta*epsneg + xmax = xmax / (xmin*beta*beta*beta) + i = maxexp + minexp + 3 + for j in range(i): + if ibeta == 2: + xmax = xmax + xmax + else: + xmax = xmax * beta + + self.ibeta = ibeta + self.it = it + self.negep = negep + self.epsneg = float_to_float(epsneg) + self._str_epsneg = float_to_str(epsneg) + self.machep = machep + self.eps = float_to_float(eps) + self._str_eps = float_to_str(eps) + self.ngrd = ngrd + self.iexp = iexp + self.minexp = minexp + self.xmin = float_to_float(xmin) + self._str_xmin = float_to_str(xmin) + self.maxexp = maxexp + self.xmax = float_to_float(xmax) + self._str_xmax = float_to_str(xmax) + self.irnd = irnd + + self.title = title + # Commonly used parameters + self.epsilon = self.eps + self.tiny = self.xmin + self.huge = self.xmax + + import math + self.precision = int(-math.log10(float_to_float(self.eps))) + ten = two + two + two + two + two + resolution = ten ** (-self.precision) + self.resolution = float_to_float(resolution) + self._str_resolution = float_to_str(resolution) + + def __str__(self): + fmt = ( + 'Machine parameters for %(title)s\n' + '---------------------------------------------------------------------\n' + 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' + 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' + 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' + 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' + 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' + '---------------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + +if __name__ == '__main__': + print(MachAr()) diff --git a/MLPY/Lib/site-packages/numpy/core/memmap.py b/MLPY/Lib/site-packages/numpy/core/memmap.py new file mode 100644 index 0000000000000000000000000000000000000000..80e3351e45253291ec2189ba4d9f2f911219fa6b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/memmap.py @@ -0,0 +1,337 @@ +from contextlib import nullcontext + +import numpy as np +from .numeric import uint8, ndarray, dtype +from numpy.compat import os_fspath, is_pathlib_path +from numpy.core.overrides import set_module + +__all__ = ['memmap'] + +dtypedescr = dtype +valid_filemodes = ["r", "c", "r+", "w+"] +writeable_filemodes = ["r+", "w+"] + +mode_equivalents = { + "readonly":"r", + "copyonwrite":"c", + "readwrite":"r+", + "write":"w+" + } + + +@set_module('numpy') +class memmap(ndarray): + """Create a memory-map to an array stored in a *binary* file on disk. + + Memory-mapped files are used for accessing small segments of large files + on disk, without reading the entire file into memory. NumPy's + memmap's are array-like objects. This differs from Python's ``mmap`` + module, which uses file-like objects. + + This subclass of ndarray has some unpleasant interactions with + some operations, because it doesn't quite fit properly as a subclass. + An alternative to using this subclass is to create the ``mmap`` + object yourself, then create an ndarray with ndarray.__new__ directly, + passing the object created in its 'buffer=' parameter. + + This class may at some point be turned into a factory function + which returns a view into an mmap buffer. + + Flush the memmap instance to write the changes to the file. Currently there + is no API to close the underlying ``mmap``. It is tricky to ensure the + resource is actually closed, since it may be shared between different + memmap instances. + + + Parameters + ---------- + filename : str, file-like object, or pathlib.Path instance + The file name or file object to be used as the array data buffer. + dtype : data-type, optional + The data-type used to interpret the file contents. + Default is `uint8`. + mode : {'r+', 'r', 'w+', 'c'}, optional + The file is opened in this mode: + + +------+-------------------------------------------------------------+ + | 'r' | Open existing file for reading only. | + +------+-------------------------------------------------------------+ + | 'r+' | Open existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'w+' | Create or overwrite existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'c' | Copy-on-write: assignments affect data in memory, but | + | | changes are not saved to disk. The file on disk is | + | | read-only. | + +------+-------------------------------------------------------------+ + + Default is 'r+'. + offset : int, optional + In the file, array data starts at this offset. Since `offset` is + measured in bytes, it should normally be a multiple of the byte-size + of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of + file are valid; The file will be extended to accommodate the + additional data. By default, ``memmap`` will start at the beginning of + the file, even if ``filename`` is a file pointer ``fp`` and + ``fp.tell() != 0``. + shape : tuple, optional + The desired shape of the array. If ``mode == 'r'`` and the number + of remaining bytes after `offset` is not a multiple of the byte-size + of `dtype`, you must specify `shape`. By default, the returned array + will be 1-D with the number of elements determined by file size + and data-type. + order : {'C', 'F'}, optional + Specify the order of the ndarray memory layout: + :term:`row-major`, C-style or :term:`column-major`, + Fortran-style. This only has an effect if the shape is + greater than 1-D. The default order is 'C'. + + Attributes + ---------- + filename : str or pathlib.Path instance + Path to the mapped file. + offset : int + Offset position in the file. + mode : str + File mode. + + Methods + ------- + flush + Flush any changes in memory to file on disk. + When you delete a memmap object, flush is called first to write + changes to disk. + + + See also + -------- + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + The memmap object can be used anywhere an ndarray is accepted. + Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns + ``True``. + + Memory-mapped files cannot be larger than 2GB on 32-bit systems. + + When a memmap causes a file to be created or extended beyond its + current size in the filesystem, the contents of the new part are + unspecified. On systems with POSIX filesystem semantics, the extended + part will be filled with zero bytes. + + Examples + -------- + >>> data = np.arange(12, dtype='float32') + >>> data.resize((3,4)) + + This example uses a temporary file so that doctest doesn't write + files to your directory. You would use a 'normal' filename. + + >>> from tempfile import mkdtemp + >>> import os.path as path + >>> filename = path.join(mkdtemp(), 'newfile.dat') + + Create a memmap with dtype and shape that matches our data: + + >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp + memmap([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], dtype=float32) + + Write data to memmap array: + + >>> fp[:] = data[:] + >>> fp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + >>> fp.filename == path.abspath(filename) + True + + Flushes memory changes to disk in order to read them back + + >>> fp.flush() + + Load the memmap and verify data was stored: + + >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Read-only memmap: + + >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr.flags.writeable + False + + Copy-on-write memmap: + + >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc.flags.writeable + True + + It's possible to assign to copy-on-write array, but values are only + written into the memory copy of the array, and not written to disk: + + >>> fpc + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + >>> fpc[0,:] = 0 + >>> fpc + memmap([[ 0., 0., 0., 0.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + File on disk is unchanged: + + >>> fpr + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Offset into a memmap: + + >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo + memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) + + """ + + __array_priority__ = -100.0 + + def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + shape=None, order='C'): + # Import here to minimize 'import numpy' overhead + import mmap + import os.path + try: + mode = mode_equivalents[mode] + except KeyError as e: + if mode not in valid_filemodes: + raise ValueError( + "mode must be one of {!r} (got {!r})" + .format(valid_filemodes + list(mode_equivalents.keys()), mode) + ) from None + + if mode == 'w+' and shape is None: + raise ValueError("shape must be given") + + if hasattr(filename, 'read'): + f_ctx = nullcontext(filename) + else: + f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b') + + with f_ctx as fid: + fid.seek(0, 2) + flen = fid.tell() + descr = dtypedescr(dtype) + _dbytes = descr.itemsize + + if shape is None: + bytes = flen - offset + if bytes % _dbytes: + raise ValueError("Size of available data is not a " + "multiple of the data-type size.") + size = bytes // _dbytes + shape = (size,) + else: + if not isinstance(shape, tuple): + shape = (shape,) + size = np.intp(1) # avoid default choice of np.int_, which might overflow + for k in shape: + size *= k + + bytes = int(offset + size*_dbytes) + + if mode in ('w+', 'r+') and flen < bytes: + fid.seek(bytes - 1, 0) + fid.write(b'\0') + fid.flush() + + if mode == 'c': + acc = mmap.ACCESS_COPY + elif mode == 'r': + acc = mmap.ACCESS_READ + else: + acc = mmap.ACCESS_WRITE + + start = offset - offset % mmap.ALLOCATIONGRANULARITY + bytes -= start + array_offset = offset - start + mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) + + self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + offset=array_offset, order=order) + self._mmap = mm + self.offset = offset + self.mode = mode + + if is_pathlib_path(filename): + # special case - if we were constructed with a pathlib.path, + # then filename is a path object, not a string + self.filename = filename.resolve() + elif hasattr(fid, "name") and isinstance(fid.name, str): + # py3 returns int for TemporaryFile().name + self.filename = os.path.abspath(fid.name) + # same as memmap copies (e.g. memmap + 1) + else: + self.filename = None + + return self + + def __array_finalize__(self, obj): + if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): + self._mmap = obj._mmap + self.filename = obj.filename + self.offset = obj.offset + self.mode = obj.mode + else: + self._mmap = None + self.filename = None + self.offset = None + self.mode = None + + def flush(self): + """ + Write any changes in the array to the file on disk. + + For further information, see `memmap`. + + Parameters + ---------- + None + + See Also + -------- + memmap + + """ + if self.base is not None and hasattr(self.base, 'flush'): + self.base.flush() + + def __array_wrap__(self, arr, context=None): + arr = super().__array_wrap__(arr, context) + + # Return a memmap if a memmap was given as the output of the + # ufunc. Leave the arr class unchanged if self is not a memmap + # to keep original memmap subclasses behavior + if self is arr or type(self) is not memmap: + return arr + # Return scalar instead of 0d memmap, e.g. for np.sum with + # axis=None + if arr.shape == (): + return arr[()] + # Return ndarray otherwise + return arr.view(np.ndarray) + + def __getitem__(self, index): + res = super().__getitem__(index) + if type(res) is memmap and res._mmap is None: + return res.view(type=ndarray) + return res diff --git a/MLPY/Lib/site-packages/numpy/core/multiarray.py b/MLPY/Lib/site-packages/numpy/core/multiarray.py new file mode 100644 index 0000000000000000000000000000000000000000..402f7a41d5e9e3312a5c4b5f110e821f8eb0c51f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/multiarray.py @@ -0,0 +1,1690 @@ +""" +Create the numpy.core.multiarray namespace for backward compatibility. In v1.16 +the multiarray and umath c-extension modules were merged into a single +_multiarray_umath extension module. So we replicate the old namespace +by importing from the extension module. + +""" + +import functools +import warnings + +from . import overrides +from . import _multiarray_umath +from ._multiarray_umath import * # noqa: F403 +# These imports are needed for backward compatibility, +# do not change them. issue gh-15518 +# _get_ndarray_c_version is semi-public, on purpose not added to __all__ +from ._multiarray_umath import ( + _fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string, + _ARRAY_API, _monotonicity, _get_ndarray_c_version, _set_madvise_hugepage, + ) + +__all__ = [ + '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', + 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', + 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', + 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose', + '_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity', + 'add_docstring', 'arange', 'array', 'asarray', 'asanyarray', + 'ascontiguousarray', 'asfortranarray', 'bincount', 'broadcast', + 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', + 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', + 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', + 'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', + 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', + 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner', + 'interp', 'interp_complex', 'is_busday', 'lexsort', + 'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', + 'nested_iters', 'normalize_axis_index', 'packbits', + 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', + 'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops', + 'set_string_function', 'set_typeDict', 'shares_memory', + 'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot', + 'where', 'zeros'] + +# For backward compatibility, make sure pickle imports these functions from here +_reconstruct.__module__ = 'numpy.core.multiarray' +scalar.__module__ = 'numpy.core.multiarray' + + +arange.__module__ = 'numpy' +array.__module__ = 'numpy' +asarray.__module__ = 'numpy' +asanyarray.__module__ = 'numpy' +ascontiguousarray.__module__ = 'numpy' +asfortranarray.__module__ = 'numpy' +datetime_data.__module__ = 'numpy' +empty.__module__ = 'numpy' +frombuffer.__module__ = 'numpy' +fromfile.__module__ = 'numpy' +fromiter.__module__ = 'numpy' +frompyfunc.__module__ = 'numpy' +fromstring.__module__ = 'numpy' +geterrobj.__module__ = 'numpy' +may_share_memory.__module__ = 'numpy' +nested_iters.__module__ = 'numpy' +promote_types.__module__ = 'numpy' +set_numeric_ops.__module__ = 'numpy' +seterrobj.__module__ = 'numpy' +zeros.__module__ = 'numpy' + + +# We can't verify dispatcher signatures because NumPy's C functions don't +# support introspection. +array_function_from_c_func_and_dispatcher = functools.partial( + overrides.array_function_from_dispatcher, + module='numpy', docs_from_dispatcher=True, verify=False) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) +def empty_like(prototype, dtype=None, order=None, subok=None, shape=None): + """ + empty_like(prototype, dtype=None, order='K', subok=True, shape=None) + + Return a new array with the same shape and type as a given array. + + Parameters + ---------- + prototype : array_like + The shape and data-type of `prototype` define these same attributes + of the returned array. + dtype : data-type, optional + Overrides the data type of the result. + + .. versionadded:: 1.6.0 + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `prototype` is Fortran + contiguous, 'C' otherwise. 'K' means match the layout of `prototype` + as closely as possible. + + .. versionadded:: 1.6.0 + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `prototype`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + + .. versionadded:: 1.17.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data with the same + shape and type as `prototype`. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + + Notes + ----- + This function does *not* initialize the returned array; to do that use + `zeros_like` or `ones_like` instead. It may be marginally faster than + the functions that do set the array values. + + Examples + -------- + >>> a = ([1,2,3], [4,5,6]) # a is array-like + >>> np.empty_like(a) + array([[-1073741821, -1073741821, 3], # uninitialized + [ 0, 0, -1073741821]]) + >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) + >>> np.empty_like(a) + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized + [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) + + """ + return (prototype,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) +def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): + """ + concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind") + + Join a sequence of arrays along an existing axis. + + Parameters + ---------- + a1, a2, ... : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. If axis is None, + arrays are flattened before use. Default is 0. + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what concatenate would have returned if no + out argument were specified. + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.20.0 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.20.0 + + Returns + ------- + res : ndarray + The concatenated array. + + See Also + -------- + ma.concatenate : Concatenate function that preserves input masks. + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. + split : Split array into a list of multiple sub-arrays of equal size. + hsplit : Split array into multiple sub-arrays horizontally (column wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + stack : Stack a sequence of arrays along a new axis. + block : Assemble arrays from blocks. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + column_stack : Stack 1-D arrays as columns into a 2-D array. + + Notes + ----- + When one or more of the arrays to be concatenated is a MaskedArray, + this function will return a MaskedArray object instead of an ndarray, + but the input masks are *not* preserved. In cases where a MaskedArray + is expected as input, use the ma.concatenate function from the masked + array module instead. + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> b = np.array([[5, 6]]) + >>> np.concatenate((a, b), axis=0) + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.concatenate((a, b.T), axis=1) + array([[1, 2, 5], + [3, 4, 6]]) + >>> np.concatenate((a, b), axis=None) + array([1, 2, 3, 4, 5, 6]) + + This function will not preserve masking of MaskedArray inputs. + + >>> a = np.ma.arange(3) + >>> a[1] = np.ma.masked + >>> b = np.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + array([2, 3, 4]) + >>> np.concatenate([a, b]) + masked_array(data=[0, 1, 2, 2, 3, 4], + mask=False, + fill_value=999999) + >>> np.ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) +def inner(a, b): + """ + inner(a, b) + + Inner product of two arrays. + + Ordinary inner product of vectors for 1-D arrays (without complex + conjugation), in higher dimensions a sum product over the last axes. + + Parameters + ---------- + a, b : array_like + If `a` and `b` are nonscalar, their last dimensions must match. + + Returns + ------- + out : ndarray + If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` + + Raises + ------ + ValueError + If both `a` and `b` are nonscalar and their last dimensions have + different sizes. + + See Also + -------- + tensordot : Sum products over arbitrary axes. + dot : Generalised matrix product, using second last dimension of `b`. + einsum : Einstein summation convention. + + Notes + ----- + For vectors (1-D arrays) it computes the ordinary inner-product:: + + np.inner(a, b) = sum(a[:]*b[:]) + + More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: + + np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) + + or explicitly:: + + np.inner(a, b)[i0,...,ir-2,j0,...,js-2] + = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) + + In addition `a` or `b` may be scalars, in which case:: + + np.inner(a,b) = a*b + + Examples + -------- + Ordinary inner product for vectors: + + >>> a = np.array([1,2,3]) + >>> b = np.array([0,1,0]) + >>> np.inner(a, b) + 2 + + Some multidimensional examples: + + >>> a = np.arange(24).reshape((2,3,4)) + >>> b = np.arange(4) + >>> c = np.inner(a, b) + >>> c.shape + (2, 3) + >>> c + array([[ 14, 38, 62], + [ 86, 110, 134]]) + + >>> a = np.arange(2).reshape((1,1,2)) + >>> b = np.arange(6).reshape((3,2)) + >>> c = np.inner(a, b) + >>> c.shape + (1, 1, 3) + >>> c + array([[[1, 3, 5]]]) + + An example where `b` is a scalar: + + >>> np.inner(np.eye(2), 7) + array([[7., 0.], + [0., 7.]]) + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) +def where(condition, x=None, y=None): + """ + where(condition, [x, y]) + + Return elements chosen from `x` or `y` depending on `condition`. + + .. note:: + When only `condition` is provided, this function is a shorthand for + ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be + preferred, as it behaves correctly for subclasses. The rest of this + documentation covers only the case where all three arguments are + provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : ndarray + An array with elements from `x` where `condition` is True, and elements + from `y` elsewhere. + + See Also + -------- + choose + nonzero : The function that is called when x and y are omitted + + Notes + ----- + If all the arrays are 1-D, `where` is equivalent to:: + + [xv if c else yv + for c, xv, yv in zip(condition, x, y)] + + Examples + -------- + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.where(a < 5, a, 10*a) + array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) + + This can be used on multidimensional arrays too: + + >>> np.where([[True, False], [True, True]], + ... [[1, 2], [3, 4]], + ... [[9, 8], [7, 6]]) + array([[1, 8], + [3, 4]]) + + The shapes of x, y, and the condition are broadcast together: + + >>> x, y = np.ogrid[:3, :4] + >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast + array([[10, 0, 0, 0], + [10, 11, 1, 1], + [10, 11, 12, 2]]) + + >>> a = np.array([[0, 1, 2], + ... [0, 2, 4], + ... [0, 3, 6]]) + >>> np.where(a < 4, a, -1) # -1 is broadcast + array([[ 0, 1, 2], + [ 0, 2, -1], + [ 0, 3, -1]]) + """ + return (condition, x, y) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) +def lexsort(keys, axis=None): + """ + lexsort(keys, axis=-1) + + Perform an indirect stable sort using a sequence of keys. + + Given multiple sorting keys, which can be interpreted as columns in a + spreadsheet, lexsort returns an array of integer indices that describes + the sort order by multiple columns. The last key in the sequence is used + for the primary sort order, the second-to-last key for the secondary sort + order, and so on. The keys argument must be a sequence of objects that + can be converted to arrays of the same shape. If a 2D array is provided + for the keys argument, its rows are interpreted as the sorting keys and + sorting is according to the last row, second last row etc. + + Parameters + ---------- + keys : (k, N) array or tuple containing k (N,)-shaped sequences + The `k` different "columns" to be sorted. The last column (or row if + `keys` is a 2D array) is the primary sort key. + axis : int, optional + Axis to be indirectly sorted. By default, sort over the last axis. + + Returns + ------- + indices : (N,) ndarray of ints + Array of indices that sort the keys along the specified axis. + + See Also + -------- + argsort : Indirect sort. + ndarray.sort : In-place sort. + sort : Return a sorted copy of an array. + + Examples + -------- + Sort names: first by surname, then by name. + + >>> surnames = ('Hertz', 'Galilei', 'Hertz') + >>> first_names = ('Heinrich', 'Galileo', 'Gustav') + >>> ind = np.lexsort((first_names, surnames)) + >>> ind + array([1, 2, 0]) + + >>> [surnames[i] + ", " + first_names[i] for i in ind] + ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] + + Sort two columns of numbers: + + >>> a = [1,5,1,4,3,4,4] # First column + >>> b = [9,4,0,4,0,2,1] # Second column + >>> ind = np.lexsort((b,a)) # Sort by a, then by b + >>> ind + array([2, 0, 4, 6, 5, 3, 1]) + + >>> [(a[i],b[i]) for i in ind] + [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] + + Note that sorting is first according to the elements of ``a``. + Secondary sorting is according to the elements of ``b``. + + A normal ``argsort`` would have yielded: + + >>> [(a[i],b[i]) for i in np.argsort(a)] + [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] + + Structured arrays are sorted lexically by ``argsort``: + + >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], + ... dtype=np.dtype([('x', int), ('y', int)])) + + >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) + array([2, 0, 4, 6, 5, 3, 1]) + + """ + if isinstance(keys, tuple): + return keys + else: + return (keys,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) +def can_cast(from_, to, casting=None): + """ + can_cast(from_, to, casting='safe') + + Returns True if cast between data types can occur according to the + casting rule. If from is a scalar or array scalar, also returns + True if the scalar value can be cast without overflow or truncation + to an integer. + + Parameters + ---------- + from_ : dtype, dtype specifier, scalar, or array + Data type, scalar, or array to cast from. + to : dtype or dtype specifier + Data type to cast to. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Returns + ------- + out : bool + True if cast can occur according to the casting rule. + + Notes + ----- + .. versionchanged:: 1.17.0 + Casting between a simple data type and a structured one is possible only + for "unsafe" casting. Casting to multiple fields is allowed, but + casting from multiple fields is not. + + .. versionchanged:: 1.9.0 + Casting from numeric to string types in 'safe' casting mode requires + that the string dtype length is long enough to store the maximum + integer/float value converted. + + See also + -------- + dtype, result_type + + Examples + -------- + Basic examples + + >>> np.can_cast(np.int32, np.int64) + True + >>> np.can_cast(np.float64, complex) + True + >>> np.can_cast(complex, float) + False + + >>> np.can_cast('i8', 'f8') + True + >>> np.can_cast('i8', 'f4') + False + >>> np.can_cast('i4', 'S4') + False + + Casting scalars + + >>> np.can_cast(100, 'i1') + True + >>> np.can_cast(150, 'i1') + False + >>> np.can_cast(150, 'u1') + True + + >>> np.can_cast(3.5e100, np.float32) + False + >>> np.can_cast(1000.0, np.float32) + True + + Array scalar checks the value, array does not + + >>> np.can_cast(np.array(1000.0), np.float32) + True + >>> np.can_cast(np.array([1000.0]), np.float32) + False + + Using the casting rules + + >>> np.can_cast('i8', 'i8', 'no') + True + >>> np.can_cast('i8', 'no') + False + + >>> np.can_cast('i8', 'equiv') + True + >>> np.can_cast('i8', 'equiv') + False + + >>> np.can_cast('i8', 'safe') + True + >>> np.can_cast('i4', 'safe') + False + + >>> np.can_cast('i4', 'same_kind') + True + >>> np.can_cast('u4', 'same_kind') + False + + >>> np.can_cast('u4', 'unsafe') + True + + """ + return (from_,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) +def min_scalar_type(a): + """ + min_scalar_type(a) + + For scalar ``a``, returns the data type with the smallest size + and smallest scalar kind which can hold its value. For non-scalar + array ``a``, returns the vector's dtype unmodified. + + Floating point values are not demoted to integers, + and complex values are not demoted to floats. + + Parameters + ---------- + a : scalar or array_like + The value whose minimal data type is to be found. + + Returns + ------- + out : dtype + The minimal data type. + + Notes + ----- + .. versionadded:: 1.6.0 + + See Also + -------- + result_type, promote_types, dtype, can_cast + + Examples + -------- + >>> np.min_scalar_type(10) + dtype('uint8') + + >>> np.min_scalar_type(-260) + dtype('int16') + + >>> np.min_scalar_type(3.1) + dtype('float16') + + >>> np.min_scalar_type(1e50) + dtype('float64') + + >>> np.min_scalar_type(np.arange(4,dtype='f8')) + dtype('float64') + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) +def result_type(*arrays_and_dtypes): + """ + result_type(*arrays_and_dtypes) + + Returns the type that results from applying the NumPy + type promotion rules to the arguments. + + Type promotion in NumPy works similarly to the rules in languages + like C++, with some slight differences. When both scalars and + arrays are used, the array's type takes precedence and the actual value + of the scalar is taken into account. + + For example, calculating 3*a, where a is an array of 32-bit floats, + intuitively should result in a 32-bit float output. If the 3 is a + 32-bit integer, the NumPy rules indicate it can't convert losslessly + into a 32-bit float, so a 64-bit float should be the result type. + By examining the value of the constant, '3', we see that it fits in + an 8-bit integer, which can be cast losslessly into the 32-bit float. + + Parameters + ---------- + arrays_and_dtypes : list of arrays and dtypes + The operands of some operation whose result type is needed. + + Returns + ------- + out : dtype + The result type. + + See also + -------- + dtype, promote_types, min_scalar_type, can_cast + + Notes + ----- + .. versionadded:: 1.6.0 + + The specific algorithm used is as follows. + + Categories are determined by first checking which of boolean, + integer (int/uint), or floating point (float/complex) the maximum + kind of all the arrays and the scalars are. + + If there are only scalars or the maximum category of the scalars + is higher than the maximum category of the arrays, + the data types are combined with :func:`promote_types` + to produce the return value. + + Otherwise, `min_scalar_type` is called on each array, and + the resulting data types are all combined with :func:`promote_types` + to produce the return value. + + The set of int values is not a subset of the uint values for types + with the same number of bits, something not reflected in + :func:`min_scalar_type`, but handled as a special case in `result_type`. + + Examples + -------- + >>> np.result_type(3, np.arange(7, dtype='i1')) + dtype('int8') + + >>> np.result_type('i4', 'c8') + dtype('complex128') + + >>> np.result_type(3.0, -2) + dtype('float64') + + """ + return arrays_and_dtypes + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) +def dot(a, b, out=None): + """ + dot(a, b, out=None) + + Dot product of two arrays. Specifically, + + - If both `a` and `b` are 1-D arrays, it is inner product of vectors + (without complex conjugation). + + - If both `a` and `b` are 2-D arrays, it is matrix multiplication, + but using :func:`matmul` or ``a @ b`` is preferred. + + - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply` + and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred. + + - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over + the last axis of `a` and `b`. + + - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a + sum product over the last axis of `a` and the second-to-last axis of `b`:: + + dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) + + Parameters + ---------- + a : array_like + First argument. + b : array_like + Second argument. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of `a` and `b`. If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + If `out` is given, then it is returned. + + Raises + ------ + ValueError + If the last dimension of `a` is not the same size as + the second-to-last dimension of `b`. + + See Also + -------- + vdot : Complex-conjugating dot product. + tensordot : Sum products over arbitrary axes. + einsum : Einstein summation convention. + matmul : '@' operator as method with out parameter. + linalg.multi_dot : Chained dot product. + + Examples + -------- + >>> np.dot(3, 4) + 12 + + Neither argument is complex-conjugated: + + >>> np.dot([2j, 3j], [2j, 3j]) + (-13+0j) + + For 2-D arrays it is the matrix product: + + >>> a = [[1, 0], [0, 1]] + >>> b = [[4, 1], [2, 2]] + >>> np.dot(a, b) + array([[4, 1], + [2, 2]]) + + >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) + >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) + >>> np.dot(a, b)[2,3,2,1,2,2] + 499128 + >>> sum(a[2,3,2,:] * b[1,2,:,2]) + 499128 + + """ + return (a, b, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) +def vdot(a, b): + """ + vdot(a, b) + + Return the dot product of two vectors. + + The vdot(`a`, `b`) function handles complex numbers differently than + dot(`a`, `b`). If the first argument is complex the complex conjugate + of the first argument is used for the calculation of the dot product. + + Note that `vdot` handles multidimensional arrays differently than `dot`: + it does *not* perform a matrix product, but flattens input arguments + to 1-D vectors first. Consequently, it should only be used for vectors. + + Parameters + ---------- + a : array_like + If `a` is complex the complex conjugate is taken before calculation + of the dot product. + b : array_like + Second argument to the dot product. + + Returns + ------- + output : ndarray + Dot product of `a` and `b`. Can be an int, float, or + complex depending on the types of `a` and `b`. + + See Also + -------- + dot : Return the dot product without using the complex conjugate of the + first argument. + + Examples + -------- + >>> a = np.array([1+2j,3+4j]) + >>> b = np.array([5+6j,7+8j]) + >>> np.vdot(a, b) + (70-8j) + >>> np.vdot(b, a) + (70+8j) + + Note that higher-dimensional arrays are flattened! + + >>> a = np.array([[1, 4], [5, 6]]) + >>> b = np.array([[4, 1], [2, 2]]) + >>> np.vdot(a, b) + 30 + >>> np.vdot(b, a) + 30 + >>> 1*4 + 4*1 + 5*2 + 6*2 + 30 + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) +def bincount(x, weights=None, minlength=None): + """ + bincount(x, weights=None, minlength=0) + + Count number of occurrences of each value in array of non-negative ints. + + The number of bins (of size 1) is one larger than the largest value in + `x`. If `minlength` is specified, there will be at least this number + of bins in the output array (though it will be longer if necessary, + depending on the contents of `x`). + Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. + + Parameters + ---------- + x : array_like, 1 dimension, nonnegative ints + Input array. + weights : array_like, optional + Weights, array of the same shape as `x`. + minlength : int, optional + A minimum number of bins for the output array. + + .. versionadded:: 1.6.0 + + Returns + ------- + out : ndarray of ints + The result of binning the input array. + The length of `out` is equal to ``np.amax(x)+1``. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or contains elements with negative + values, or if `minlength` is negative. + TypeError + If the type of the input is float or complex. + + See Also + -------- + histogram, digitize, unique + + Examples + -------- + >>> np.bincount(np.arange(5)) + array([1, 1, 1, 1, 1]) + >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) + array([1, 3, 1, 1, 0, 0, 0, 1]) + + >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) + >>> np.bincount(x).size == np.amax(x)+1 + True + + The input array needs to be of integer dtype, otherwise a + TypeError is raised: + + >>> np.bincount(np.arange(5, dtype=float)) + Traceback (most recent call last): + ... + TypeError: Cannot cast array data from dtype('float64') to dtype('int64') + according to the rule 'safe' + + A possible use of ``bincount`` is to perform sums over + variable-size chunks of an array, using the ``weights`` keyword. + + >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights + >>> x = np.array([0, 1, 1, 2, 2, 2]) + >>> np.bincount(x, weights=w) + array([ 0.3, 0.7, 1.1]) + + """ + return (x, weights) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) +def ravel_multi_index(multi_index, dims, mode=None, order=None): + """ + ravel_multi_index(multi_index, dims, mode='raise', order='C') + + Converts a tuple of index arrays into an array of flat + indices, applying boundary modes to the multi-index. + + Parameters + ---------- + multi_index : tuple of array_like + A tuple of integer arrays, one array for each dimension. + dims : tuple of ints + The shape of array into which the indices from ``multi_index`` apply. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices are handled. Can specify + either one mode or a tuple of modes, one mode per index. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + In 'clip' mode, a negative index which would normally + wrap will clip to 0 instead. + order : {'C', 'F'}, optional + Determines whether the multi-index should be viewed as + indexing in row-major (C-style) or column-major + (Fortran-style) order. + + Returns + ------- + raveled_indices : ndarray + An array of indices into the flattened version of an array + of dimensions ``dims``. + + See Also + -------- + unravel_index + + Notes + ----- + .. versionadded:: 1.6.0 + + Examples + -------- + >>> arr = np.array([[3,6,6],[4,5,1]]) + >>> np.ravel_multi_index(arr, (7,6)) + array([22, 41, 37]) + >>> np.ravel_multi_index(arr, (7,6), order='F') + array([31, 41, 13]) + >>> np.ravel_multi_index(arr, (4,6), mode='clip') + array([22, 23, 19]) + >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) + array([12, 13, 13]) + + >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) + 1621 + """ + return multi_index + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) +def unravel_index(indices, shape=None, order=None): + """ + unravel_index(indices, shape, order='C') + + Converts a flat index or array of flat indices into a tuple + of coordinate arrays. + + Parameters + ---------- + indices : array_like + An integer array whose elements are indices into the flattened + version of an array of dimensions ``shape``. Before version 1.6.0, + this function accepted just one index value. + shape : tuple of ints + The shape of the array to use for unraveling ``indices``. + + .. versionchanged:: 1.16.0 + Renamed from ``dims`` to ``shape``. + + order : {'C', 'F'}, optional + Determines whether the indices should be viewed as indexing in + row-major (C-style) or column-major (Fortran-style) order. + + .. versionadded:: 1.6.0 + + Returns + ------- + unraveled_coords : tuple of ndarray + Each array in the tuple has the same shape as the ``indices`` + array. + + See Also + -------- + ravel_multi_index + + Examples + -------- + >>> np.unravel_index([22, 41, 37], (7,6)) + (array([3, 6, 6]), array([4, 5, 1])) + >>> np.unravel_index([31, 41, 13], (7,6), order='F') + (array([3, 6, 6]), array([4, 5, 1])) + + >>> np.unravel_index(1621, (6,7,8,9)) + (3, 1, 4, 1) + + """ + return (indices,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) +def copyto(dst, src, casting=None, where=None): + """ + copyto(dst, src, casting='same_kind', where=True) + + Copies values from one array to another, broadcasting as necessary. + + Raises a TypeError if the `casting` rule is violated, and if + `where` is provided, it selects which elements to copy. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dst : ndarray + The array into which values are copied. + src : array_like + The array from which values are copied. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when copying. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `dst`, and selects elements to copy from `src` to `dst` + wherever it contains the value True. + """ + return (dst, src, where) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) +def putmask(a, mask, values): + """ + putmask(a, mask, values) + + Changes elements of an array based on conditional and input values. + + Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. + + If `values` is not the same size as `a` and `mask` then it will repeat. + This gives behavior different from ``a[mask] = values``. + + Parameters + ---------- + a : ndarray + Target array. + mask : array_like + Boolean mask array. It has to be the same shape as `a`. + values : array_like + Values to put into `a` where `mask` is True. If `values` is smaller + than `a` it will be repeated. + + See Also + -------- + place, put, take, copyto + + Examples + -------- + >>> x = np.arange(6).reshape(2, 3) + >>> np.putmask(x, x>2, x**2) + >>> x + array([[ 0, 1, 2], + [ 9, 16, 25]]) + + If `values` is smaller than `a` it is repeated: + + >>> x = np.arange(5) + >>> np.putmask(x, x>1, [-33, -44]) + >>> x + array([ 0, 1, -33, -44, -33]) + + """ + return (a, mask, values) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) +def packbits(a, axis=None, bitorder='big'): + """ + packbits(a, axis=None, bitorder='big') + + Packs the elements of a binary-valued array into bits in a uint8 array. + + The result is padded to full bytes by inserting zero bits at the end. + + Parameters + ---------- + a : array_like + An array of integers or booleans whose elements should be packed to + bits. + axis : int, optional + The dimension over which bit-packing is done. + ``None`` implies packing the flattened array. + bitorder : {'big', 'little'}, optional + The order of the input bits. 'big' will mimic bin(val), + ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will + reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. + Defaults to 'big'. + + .. versionadded:: 1.17.0 + + Returns + ------- + packed : ndarray + Array of type uint8 whose elements represent bits corresponding to the + logical (0 or nonzero) value of the input elements. The shape of + `packed` has the same number of dimensions as the input (unless `axis` + is None, in which case the output is 1-D). + + See Also + -------- + unpackbits: Unpacks elements of a uint8 array into a binary-valued output + array. + + Examples + -------- + >>> a = np.array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = np.packbits(a, axis=-1) + >>> b + array([[[160], + [ 64]], + [[192], + [ 32]]], dtype=uint8) + + Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, + and 32 = 0010 0000. + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) +def unpackbits(a, axis=None, count=None, bitorder='big'): + """ + unpackbits(a, axis=None, count=None, bitorder='big') + + Unpacks elements of a uint8 array into a binary-valued output array. + + Each element of `a` represents a bit-field that should be unpacked + into a binary-valued output array. The shape of the output array is + either 1-D (if `axis` is ``None``) or the same shape as the input + array with unpacking done along the axis specified. + + Parameters + ---------- + a : ndarray, uint8 type + Input array. + axis : int, optional + The dimension over which bit-unpacking is done. + ``None`` implies unpacking the flattened array. + count : int or None, optional + The number of elements to unpack along `axis`, provided as a way + of undoing the effect of packing a size that is not a multiple + of eight. A non-negative number means to only unpack `count` + bits. A negative number means to trim off that many bits from + the end. ``None`` means to unpack the entire array (the + default). Counts larger than the available number of bits will + add zero padding to the output. Negative counts must not + exceed the available number of bits. + + .. versionadded:: 1.17.0 + + bitorder : {'big', 'little'}, optional + The order of the returned bits. 'big' will mimic bin(val), + ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse + the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. + Defaults to 'big'. + + .. versionadded:: 1.17.0 + + Returns + ------- + unpacked : ndarray, uint8 type + The elements are binary-valued (0 or 1). + + See Also + -------- + packbits : Packs the elements of a binary-valued array into bits in + a uint8 array. + + Examples + -------- + >>> a = np.array([[2], [7], [23]], dtype=np.uint8) + >>> a + array([[ 2], + [ 7], + [23]], dtype=uint8) + >>> b = np.unpackbits(a, axis=1) + >>> b + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) + >>> c = np.unpackbits(a, axis=1, count=-3) + >>> c + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0]], dtype=uint8) + + >>> p = np.packbits(b, axis=0) + >>> np.unpackbits(p, axis=0) + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) + >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0])) + True + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) +def shares_memory(a, b, max_work=None): + """ + shares_memory(a, b, max_work=None) + + Determine if two arrays share memory. + + .. warning:: + + This function can be exponentially slow for some inputs, unless + `max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``. + If in doubt, use `numpy.may_share_memory` instead. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem (maximum number + of candidate solutions to consider). The following special + values are recognized: + + max_work=MAY_SHARE_EXACT (default) + The problem is solved exactly. In this case, the function returns + True only if there is an element shared between the arrays. Finding + the exact solution may take extremely long in some cases. + max_work=MAY_SHARE_BOUNDS + Only the memory bounds of a and b are checked. + + Raises + ------ + numpy.TooHardError + Exceeded max_work. + + Returns + ------- + out : bool + + See Also + -------- + may_share_memory + + Examples + -------- + >>> x = np.array([1, 2, 3, 4]) + >>> np.shares_memory(x, np.array([5, 6, 7])) + False + >>> np.shares_memory(x[::2], x) + True + >>> np.shares_memory(x[::2], x[1::2]) + False + + Checking whether two arrays share memory is NP-complete, and + runtime may increase exponentially in the number of + dimensions. Hence, `max_work` should generally be set to a finite + number, as it is possible to construct examples that take + extremely long to run: + + >>> from numpy.lib.stride_tricks import as_strided + >>> x = np.zeros([192163377], dtype=np.int8) + >>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049)) + >>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1)) + >>> np.shares_memory(x1, x2, max_work=1000) + Traceback (most recent call last): + ... + numpy.TooHardError: Exceeded max_work + + Running ``np.shares_memory(x1, x2)`` without `max_work` set takes + around 1 minute for this case. It is possible to find problems + that take still significantly longer. + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) +def may_share_memory(a, b, max_work=None): + """ + may_share_memory(a, b, max_work=None) + + Determine if two arrays might share memory + + A return of True does not necessarily mean that the two arrays + share any element. It just means that they *might*. + + Only the memory bounds of a and b are checked by default. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem. See + `shares_memory` for details. Default for ``may_share_memory`` + is to do a bounds check. + + Returns + ------- + out : bool + + See Also + -------- + shares_memory + + Examples + -------- + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + >>> x = np.zeros([3, 4]) + >>> np.may_share_memory(x[:,0], x[:,1]) + True + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) +def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): + """ + is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) + + Calculates which of the given dates are valid days, and which are not. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of bool, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of bool + An array with the same shape as ``dates``, containing True for + each valid day, and False for each invalid day. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> # The weekdays are Friday, Saturday, and Monday + ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + array([False, False, True]) + """ + return (dates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) +def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) + + First adjusts the date to fall on a valid day according to + the ``roll`` rule, then applies offsets to the given dates + counted in valid days. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + offsets : array_like of int + The array of offsets, which is broadcast with ``dates``. + roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional + How to treat dates that do not fall on a valid day. The default + is 'raise'. + + * 'raise' means to raise an exception for an invalid day. + * 'nat' means to return a NaT (not-a-time) for an invalid day. + * 'forward' and 'following' mean to take the first valid day + later in time. + * 'backward' and 'preceding' mean to take the first valid day + earlier in time. + * 'modifiedfollowing' means to take the first valid day + later in time unless it is across a Month boundary, in which + case to take the first valid day earlier in time. + * 'modifiedpreceding' means to take the first valid day + earlier in time unless it is across a Month boundary, in which + case to take the first valid day later in time. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of datetime64[D], optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of datetime64[D] + An array with a shape from broadcasting ``dates`` and ``offsets`` + together, containing the dates with offsets applied. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> # First business day in October 2011 (not accounting for holidays) + ... np.busday_offset('2011-10', 0, roll='forward') + numpy.datetime64('2011-10-03') + >>> # Last business day in February 2012 (not accounting for holidays) + ... np.busday_offset('2012-03', -1, roll='forward') + numpy.datetime64('2012-02-29') + >>> # Third Wednesday in January 2011 + ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') + numpy.datetime64('2011-01-19') + >>> # 2012 Mother's Day in Canada and the U.S. + ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') + numpy.datetime64('2012-05-13') + + >>> # First business day on or after a date + ... np.busday_offset('2011-03-20', 0, roll='forward') + numpy.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 0, roll='forward') + numpy.datetime64('2011-03-22') + >>> # First business day after a date + ... np.busday_offset('2011-03-20', 1, roll='backward') + numpy.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 1, roll='backward') + numpy.datetime64('2011-03-23') + """ + return (dates, offsets, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) +def busday_count(begindates, enddates, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) + + Counts the number of valid days between `begindates` and + `enddates`, not including the day of `enddates`. + + If ``enddates`` specifies a date value that is earlier than the + corresponding ``begindates`` date value, the count will be negative. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + begindates : array_like of datetime64[D] + The array of the first dates for counting. + enddates : array_like of datetime64[D] + The array of the end dates for counting, which are excluded + from the count themselves. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of int, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of int + An array with a shape from broadcasting ``begindates`` and ``enddates`` + together, containing the number of valid days between + the begin and end dates. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + + Examples + -------- + >>> # Number of weekdays in January 2011 + ... np.busday_count('2011-01', '2011-02') + 21 + >>> # Number of weekdays in 2011 + >>> np.busday_count('2011', '2012') + 260 + >>> # Number of Saturdays in 2011 + ... np.busday_count('2011', '2012', weekmask='Sat') + 53 + """ + return (begindates, enddates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher( + _multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone=None, casting=None): + """ + datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') + + Convert an array of datetimes into an array of strings. + + Parameters + ---------- + arr : array_like of datetime64 + The array of UTC timestamps to format. + unit : str + One of None, 'auto', or a :ref:`datetime unit `. + timezone : {'naive', 'UTC', 'local'} or tzinfo + Timezone information to use when displaying the datetime. If 'UTC', end + with a Z to indicate UTC time. If 'local', convert to the local timezone + first, and suffix with a +-#### timezone offset. If a tzinfo object, + then do as with 'local', but use the specified timezone. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} + Casting to allow when changing between datetime units. + + Returns + ------- + str_arr : ndarray + An array of strings the same shape as `arr`. + + Examples + -------- + >>> import pytz + >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') + >>> d + array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', + '2002-10-27T07:30'], dtype='datetime64[m]') + + Setting the timezone to UTC shows the same information, but with a Z suffix + + >>> np.datetime_as_string(d, timezone='UTC') + array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', + '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', + '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h') + array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], + dtype='>> np.datetime_as_string(d, unit='s') + array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', + '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe') + Traceback (most recent call last): + ... + TypeError: Cannot create a datetime string as units 'h' from a NumPy + datetime with units 'm' according to the rule 'safe' + """ + return (arr,) diff --git a/MLPY/Lib/site-packages/numpy/core/numeric.py b/MLPY/Lib/site-packages/numpy/core/numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..29b202b62f7ab83bd9465d28e1054f52c2565f9b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/numeric.py @@ -0,0 +1,2537 @@ +import functools +import itertools +import operator +import sys +import warnings +import numbers + +import numpy as np +from . import multiarray +from .multiarray import ( + _fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS, + BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE, + WRAP, arange, array, asarray, asanyarray, ascontiguousarray, + asfortranarray, broadcast, can_cast, compare_chararrays, + concatenate, copyto, dot, dtype, empty, + empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring, + inner, lexsort, matmul, may_share_memory, + min_scalar_type, ndarray, nditer, nested_iters, promote_types, + putmask, result_type, set_numeric_ops, shares_memory, vdot, where, + zeros, normalize_axis_index) + +from . import overrides +from . import umath +from . import shape_base +from .overrides import set_array_function_like_doc, set_module +from .umath import (multiply, invert, sin, PINF, NAN) +from . import numerictypes +from .numerictypes import longlong, intc, int_, float_, complex_, bool_ +from ._exceptions import TooHardError, AxisError +from ._ufunc_config import errstate + +bitwise_not = invert +ufunc = type(sin) +newaxis = None + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', + 'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray', + 'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', + 'fromstring', 'fromfile', 'frombuffer', 'where', + 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort', + 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type', + 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', + 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', + 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', + 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', + 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', + 'identity', 'allclose', 'compare_chararrays', 'putmask', + 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', + 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', + 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', + 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', + 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError'] + + +@set_module('numpy') +class ComplexWarning(RuntimeWarning): + """ + The warning raised when casting a complex dtype to a real dtype. + + As implemented, casting a complex number to a real discards its imaginary + part, but this behavior may not be what the user actually wants. + + """ + pass + + +def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): + return (a,) + + +@array_function_dispatch(_zeros_like_dispatcher) +def zeros_like(a, dtype=None, order='K', subok=True, shape=None): + """ + Return an array of zeros with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + + .. versionadded:: 1.6.0 + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + + .. versionadded:: 1.6.0 + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + + .. versionadded:: 1.17.0 + + Returns + ------- + out : ndarray + Array of zeros with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + full_like : Return a new array with shape of input filled with value. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.zeros_like(x) + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.zeros_like(y) + array([0., 0., 0.]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) + # needed instead of a 0 to get same result as zeros for for string dtypes + z = zeros(1, dtype=res.dtype) + multiarray.copyto(res, z, casting='unsafe') + return res + + +def _ones_dispatcher(shape, dtype=None, order=None, *, like=None): + return(like,) + + +@set_array_function_like_doc +@set_module('numpy') +def ones(shape, dtype=None, order='C', *, like=None): + """ + Return a new array of given shape and type, filled with ones. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: C + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of ones with the given shape, dtype, and order. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + empty : Return a new uninitialized array. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + + Examples + -------- + >>> np.ones(5) + array([1., 1., 1., 1., 1.]) + + >>> np.ones((5,), dtype=int) + array([1, 1, 1, 1, 1]) + + >>> np.ones((2, 1)) + array([[1.], + [1.]]) + + >>> s = (2,2) + >>> np.ones(s) + array([[1., 1.], + [1., 1.]]) + + """ + if like is not None: + return _ones_with_like(shape, dtype=dtype, order=order, like=like) + + a = empty(shape, dtype, order) + multiarray.copyto(a, 1, casting='unsafe') + return a + + +_ones_with_like = array_function_dispatch( + _ones_dispatcher +)(ones) + + +def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None): + return (a,) + + +@array_function_dispatch(_ones_like_dispatcher) +def ones_like(a, dtype=None, order='K', subok=True, shape=None): + """ + Return an array of ones with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + + .. versionadded:: 1.6.0 + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + + .. versionadded:: 1.6.0 + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + + .. versionadded:: 1.17.0 + + Returns + ------- + out : ndarray + Array of ones with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + ones : Return a new array setting values to one. + + Examples + -------- + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.ones_like(x) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.ones_like(y) + array([1., 1., 1.]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) + multiarray.copyto(res, 1, casting='unsafe') + return res + + +def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None): + return(like,) + + +@set_array_function_like_doc +@set_module('numpy') +def full(shape, fill_value, dtype=None, order='C', *, like=None): + """ + Return a new array of given shape and type, filled with `fill_value`. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + fill_value : scalar or array_like + Fill value. + dtype : data-type, optional + The desired data-type for the array The default, None, means + ``np.array(fill_value).dtype``. + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the given shape, dtype, and order. + + See Also + -------- + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> np.full((2, 2), np.inf) + array([[inf, inf], + [inf, inf]]) + >>> np.full((2, 2), 10) + array([[10, 10], + [10, 10]]) + + >>> np.full((2, 2), [1, 2]) + array([[1, 2], + [1, 2]]) + + """ + if like is not None: + return _full_with_like(shape, fill_value, dtype=dtype, order=order, like=like) + + if dtype is None: + fill_value = asarray(fill_value) + dtype = fill_value.dtype + a = empty(shape, dtype, order) + multiarray.copyto(a, fill_value, casting='unsafe') + return a + + +_full_with_like = array_function_dispatch( + _full_dispatcher +)(full) + + +def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None): + return (a,) + + +@array_function_dispatch(_full_like_dispatcher) +def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): + """ + Return a full array with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + fill_value : scalar + Fill value. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + + .. versionadded:: 1.17.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> x = np.arange(6, dtype=int) + >>> np.full_like(x, 1) + array([1, 1, 1, 1, 1, 1]) + >>> np.full_like(x, 0.1) + array([0, 0, 0, 0, 0, 0]) + >>> np.full_like(x, 0.1, dtype=np.double) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + >>> np.full_like(x, np.nan, dtype=np.double) + array([nan, nan, nan, nan, nan, nan]) + + >>> y = np.arange(6, dtype=np.double) + >>> np.full_like(y, 0.1) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + + """ + res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) + multiarray.copyto(res, fill_value, casting='unsafe') + return res + + +def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_count_nonzero_dispatcher) +def count_nonzero(a, axis=None, *, keepdims=False): + """ + Counts the number of non-zero values in the array ``a``. + + The word "non-zero" is in reference to the Python 2.x + built-in method ``__nonzero__()`` (renamed ``__bool__()`` + in Python 3.x) of Python objects that tests an object's + "truthfulness". For example, any number is considered + truthful if it is nonzero, whereas any string is considered + truthful if it is not the empty string. Thus, this function + (recursively) counts how many elements in ``a`` (and in + sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` + method evaluated to ``True``. + + Parameters + ---------- + a : array_like + The array for which to count non-zeros. + axis : int or tuple, optional + Axis or tuple of axes along which to count non-zeros. + Default is None, meaning that non-zeros will be counted + along a flattened version of ``a``. + + .. versionadded:: 1.12.0 + + keepdims : bool, optional + If this is set to True, the axes that are counted are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + .. versionadded:: 1.19.0 + + Returns + ------- + count : int or array of int + Number of non-zero values in the array along a given axis. + Otherwise, the total number of non-zero values in the array + is returned. + + See Also + -------- + nonzero : Return the coordinates of all the non-zero values. + + Examples + -------- + >>> np.count_nonzero(np.eye(4)) + 4 + >>> a = np.array([[0, 1, 7, 0], + ... [3, 0, 2, 19]]) + >>> np.count_nonzero(a) + 5 + >>> np.count_nonzero(a, axis=0) + array([1, 1, 2, 1]) + >>> np.count_nonzero(a, axis=1) + array([2, 3]) + >>> np.count_nonzero(a, axis=1, keepdims=True) + array([[2], + [3]]) + """ + if axis is None and not keepdims: + return multiarray.count_nonzero(a) + + a = asanyarray(a) + + # TODO: this works around .astype(bool) not working properly (gh-9847) + if np.issubdtype(a.dtype, np.character): + a_bool = a != a.dtype.type() + else: + a_bool = a.astype(np.bool_, copy=False) + + return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims) + + +@set_module('numpy') +def isfortran(a): + """ + Check if the array is Fortran contiguous but *not* C contiguous. + + This function is obsolete and, because of changes due to relaxed stride + checking, its return value for the same array may differ for versions + of NumPy >= 1.10.0 and previous versions. If you only want to check if an + array is Fortran contiguous use ``a.flags.f_contiguous`` instead. + + Parameters + ---------- + a : ndarray + Input array. + + Returns + ------- + isfortran : bool + Returns True if the array is Fortran contiguous but *not* C contiguous. + + + Examples + -------- + + np.array allows to specify whether the array is written in C-contiguous + order (last index varies the fastest), or FORTRAN-contiguous order in + memory (first index varies the fastest). + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + + >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') + >>> b + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(b) + True + + + The transpose of a C-ordered array is a FORTRAN-ordered array. + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + >>> b = a.T + >>> b + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.isfortran(b) + True + + C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. + + >>> np.isfortran(np.array([1, 2], order='F')) + False + + """ + return a.flags.fnc + + +def _argwhere_dispatcher(a): + return (a,) + + +@array_function_dispatch(_argwhere_dispatcher) +def argwhere(a): + """ + Find the indices of array elements that are non-zero, grouped by element. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + index_array : (N, a.ndim) ndarray + Indices of elements that are non-zero. Indices are grouped by element. + This array will have shape ``(N, a.ndim)`` where ``N`` is the number of + non-zero items. + + See Also + -------- + where, nonzero + + Notes + ----- + ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, + but produces a result of the correct shape for a 0D array. + + The output of ``argwhere`` is not suitable for indexing arrays. + For this purpose use ``nonzero(a)`` instead. + + Examples + -------- + >>> x = np.arange(6).reshape(2,3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.argwhere(x>1) + array([[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + """ + # nonzero does not behave well on 0d, so promote to 1d + if np.ndim(a) == 0: + a = shape_base.atleast_1d(a) + # then remove the added dimension + return argwhere(a)[:,:0] + return transpose(nonzero(a)) + + +def _flatnonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_flatnonzero_dispatcher) +def flatnonzero(a): + """ + Return indices that are non-zero in the flattened version of a. + + This is equivalent to np.nonzero(np.ravel(a))[0]. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + res : ndarray + Output array, containing the indices of the elements of `a.ravel()` + that are non-zero. + + See Also + -------- + nonzero : Return the indices of the non-zero elements of the input array. + ravel : Return a 1-D array containing the elements of the input array. + + Examples + -------- + >>> x = np.arange(-2, 3) + >>> x + array([-2, -1, 0, 1, 2]) + >>> np.flatnonzero(x) + array([0, 1, 3, 4]) + + Use the indices of the non-zero elements as an index array to extract + these elements: + + >>> x.ravel()[np.flatnonzero(x)] + array([-2, -1, 1, 2]) + + """ + return np.nonzero(np.ravel(a))[0] + + +def _correlate_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_correlate_dispatcher) +def correlate(a, v, mode='valid'): + """ + Cross-correlation of two 1-dimensional sequences. + + This function computes the correlation as generally defined in signal + processing texts:: + + c_{av}[k] = sum_n a[n+k] * conj(v[n]) + + with a and v sequences being zero-padded where necessary and conj being + the conjugate. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + old_behavior : bool + `old_behavior` was removed in NumPy 1.10. If you need the old + behavior, use `multiarray.correlate`. + + Returns + ------- + out : ndarray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + convolve : Discrete, linear convolution of two one-dimensional sequences. + multiarray.correlate : Old, no conjugate, version of correlate. + scipy.signal.correlate : uses FFT which has superior performance on large arrays. + + Notes + ----- + The definition of correlation above is not unique and sometimes correlation + may be defined differently. Another common definition is:: + + c'_{av}[k] = sum_n a[n] conj(v[n+k]) + + which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``. + + `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does + not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might + be preferable. + + + Examples + -------- + >>> np.correlate([1, 2, 3], [0, 1, 0.5]) + array([3.5]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") + array([2. , 3.5, 3. ]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") + array([0.5, 2. , 3.5, 3. , 0. ]) + + Using complex sequences: + + >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') + array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) + + Note that you get the time reversed, complex conjugated result + when the two input sequences change places, i.e., + ``c_{va}[k] = c^{*}_{av}[-k]``: + + >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') + array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) + + """ + return multiarray.correlate2(a, v, mode) + + +def _convolve_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_convolve_dispatcher) +def convolve(a, v, mode='full'): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + The convolution operator is often seen in signal processing, where it + models the effect of a linear time-invariant system on a signal [1]_. In + probability theory, the sum of two independent random variables is + distributed according to the convolution of their individual + distributions. + + If `v` is longer than `a`, the arrays are swapped before computation. + + Parameters + ---------- + a : (N,) array_like + First one-dimensional input array. + v : (M,) array_like + Second one-dimensional input array. + mode : {'full', 'valid', 'same'}, optional + 'full': + By default, mode is 'full'. This returns the convolution + at each point of overlap, with an output shape of (N+M-1,). At + the end-points of the convolution, the signals do not overlap + completely, and boundary effects may be seen. + + 'same': + Mode 'same' returns output of length ``max(M, N)``. Boundary + effects are still visible. + + 'valid': + Mode 'valid' returns output of length + ``max(M, N) - min(M, N) + 1``. The convolution product is only given + for points where the signals overlap completely. Values outside + the signal boundary have no effect. + + Returns + ------- + out : ndarray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier + Transform. + scipy.linalg.toeplitz : Used to construct the convolution operator. + polymul : Polynomial multiplication. Same output as convolve, but also + accepts poly1d objects as input. + + Notes + ----- + The discrete convolution operation is defined as + + .. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m] + + It can be shown that a convolution :math:`x(t) * y(t)` in time/space + is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier + domain, after appropriate padding (padding is necessary to prevent + circular convolution). Since multiplication is more efficient (faster) + than convolution, the function `scipy.signal.fftconvolve` exploits the + FFT to calculate the convolution of large data-sets. + + References + ---------- + .. [1] Wikipedia, "Convolution", + https://en.wikipedia.org/wiki/Convolution + + Examples + -------- + Note how the convolution operator flips the second array + before "sliding" the two across one another: + + >>> np.convolve([1, 2, 3], [0, 1, 0.5]) + array([0. , 1. , 2.5, 4. , 1.5]) + + Only return the middle values of the convolution. + Contains boundary effects, where zeros are taken + into account: + + >>> np.convolve([1,2,3],[0,1,0.5], 'same') + array([1. , 2.5, 4. ]) + + The two arrays are of the same length, so there + is only one position where they completely overlap: + + >>> np.convolve([1,2,3],[0,1,0.5], 'valid') + array([2.5]) + + """ + a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1) + if (len(v) > len(a)): + a, v = v, a + if len(a) == 0: + raise ValueError('a cannot be empty') + if len(v) == 0: + raise ValueError('v cannot be empty') + return multiarray.correlate(a, v[::-1], mode) + + +def _outer_dispatcher(a, b, out=None): + return (a, b, out) + + +@array_function_dispatch(_outer_dispatcher) +def outer(a, b, out=None): + """ + Compute the outer product of two vectors. + + Given two vectors, ``a = [a0, a1, ..., aM]`` and + ``b = [b0, b1, ..., bN]``, + the outer product [1]_ is:: + + [[a0*b0 a0*b1 ... a0*bN ] + [a1*b0 . + [ ... . + [aM*b0 aM*bN ]] + + Parameters + ---------- + a : (M,) array_like + First input vector. Input is flattened if + not already 1-dimensional. + b : (N,) array_like + Second input vector. Input is flattened if + not already 1-dimensional. + out : (M, N) ndarray, optional + A location where the result is stored + + .. versionadded:: 1.9.0 + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + inner + einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. + ufunc.outer : A generalization to dimensions other than 1D and other + operations. ``np.multiply.outer(a.ravel(), b.ravel())`` + is the equivalent. + tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))`` + is the equivalent. + + References + ---------- + .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd + ed., Baltimore, MD, Johns Hopkins University Press, 1996, + pg. 8. + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + + """ + a = asarray(a) + b = asarray(b) + return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out) + + +def _tensordot_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(a, b, axes=2): + """ + Compute tensor dot product along specified axes. + + Given two tensors, `a` and `b`, and an array_like object containing + two array_like objects, ``(a_axes, b_axes)``, sum the products of + `a`'s and `b`'s elements (components) over the axes specified by + ``a_axes`` and ``b_axes``. The third argument can be a single non-negative + integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions + of `a` and the first ``N`` dimensions of `b` are summed over. + + Parameters + ---------- + a, b : array_like + Tensors to "dot". + + axes : int or (2,) array_like + * integer_like + If an int N, sum over the last N axes of `a` and the first N axes + of `b` in order. The sizes of the corresponding axes must match. + * (2,) array_like + Or, a list of axes to be summed over, first sequence applying to `a`, + second to `b`. Both elements array_like must be of the same length. + + Returns + ------- + output : ndarray + The tensor dot product of the input. + + See Also + -------- + dot, einsum + + Notes + ----- + Three common use cases are: + * ``axes = 0`` : tensor product :math:`a\\otimes b` + * ``axes = 1`` : tensor dot product :math:`a\\cdot b` + * ``axes = 2`` : (default) tensor double contraction :math:`a:b` + + When `axes` is integer_like, the sequence for evaluation will be: first + the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and + Nth axis in `b` last. + + When there is more than one axis to sum over - and they are not the last + (first) axes of `a` (`b`) - the argument `axes` should consist of + two sequences of the same length, with the first axis to sum over given + first in both sequences, the second axis second, and so forth. + + The shape of the result consists of the non-contracted axes of the + first tensor, followed by the non-contracted axes of the second. + + Examples + -------- + A "traditional" example: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) + >>> c.shape + (5, 2) + >>> c + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> # A slower but equivalent way of computing the same... + >>> d = np.zeros((5,2)) + >>> for i in range(5): + ... for j in range(2): + ... for k in range(3): + ... for n in range(4): + ... d[i,j] += a[k,n,i] * b[n,k,j] + >>> c == d + array([[ True, True], + [ True, True], + [ True, True], + [ True, True], + [ True, True]]) + + An extended example taking advantage of the overloading of + and \\*: + + >>> a = np.array(range(1, 9)) + >>> a.shape = (2, 2, 2) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A.shape = (2, 2) + >>> a; A + array([[[1, 2], + [3, 4]], + [[5, 6], + [7, 8]]]) + array([['a', 'b'], + ['c', 'd']], dtype=object) + + >>> np.tensordot(a, A) # third argument default is 2 for double-contraction + array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, 1) + array([[['acc', 'bdd'], + ['aaacccc', 'bbbdddd']], + [['aaaaacccccc', 'bbbbbdddddd'], + ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) + array([[[[['a', 'b'], + ['c', 'd']], + ... + + >>> np.tensordot(a, A, (0, 1)) + array([[['abbbbb', 'cddddd'], + ['aabbbbbb', 'ccdddddd']], + [['aaabbbbbbb', 'cccddddddd'], + ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, (2, 1)) + array([[['abb', 'cdd'], + ['aaabbbb', 'cccdddd']], + [['aaaaabbbbbb', 'cccccdddddd'], + ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, ((0, 1), (0, 1))) + array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, ((2, 1), (1, 0))) + array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) + + """ + try: + iter(axes) + except Exception: + axes_a = list(range(-axes, 0)) + axes_b = list(range(0, axes)) + else: + axes_a, axes_b = axes + try: + na = len(axes_a) + axes_a = list(axes_a) + except TypeError: + axes_a = [axes_a] + na = 1 + try: + nb = len(axes_b) + axes_b = list(axes_b) + except TypeError: + axes_b = [axes_b] + nb = 1 + + a, b = asarray(a), asarray(b) + as_ = a.shape + nda = a.ndim + bs = b.shape + ndb = b.ndim + equal = True + if na != nb: + equal = False + else: + for k in range(na): + if as_[axes_a[k]] != bs[axes_b[k]]: + equal = False + break + if axes_a[k] < 0: + axes_a[k] += nda + if axes_b[k] < 0: + axes_b[k] += ndb + if not equal: + raise ValueError("shape-mismatch for sum") + + # Move the axes to sum over to the end of "a" + # and to the front of "b" + notin = [k for k in range(nda) if k not in axes_a] + newaxes_a = notin + axes_a + N2 = 1 + for axis in axes_a: + N2 *= as_[axis] + newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2) + olda = [as_[axis] for axis in notin] + + notin = [k for k in range(ndb) if k not in axes_b] + newaxes_b = axes_b + notin + N2 = 1 + for axis in axes_b: + N2 *= bs[axis] + newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin]))) + oldb = [bs[axis] for axis in notin] + + at = a.transpose(newaxes_a).reshape(newshape_a) + bt = b.transpose(newaxes_b).reshape(newshape_b) + res = dot(at, bt) + return res.reshape(olda + oldb) + + +def _roll_dispatcher(a, shift, axis=None): + return (a,) + + +@array_function_dispatch(_roll_dispatcher) +def roll(a, shift, axis=None): + """ + Roll array elements along a given axis. + + Elements that roll beyond the last position are re-introduced at + the first. + + Parameters + ---------- + a : array_like + Input array. + shift : int or tuple of ints + The number of places by which elements are shifted. If a tuple, + then `axis` must be a tuple of the same size, and each of the + given axes is shifted by the corresponding number. If an int + while `axis` is a tuple of ints, then the same value is used for + all given axes. + axis : int or tuple of ints, optional + Axis or axes along which elements are shifted. By default, the + array is flattened before shifting, after which the original + shape is restored. + + Returns + ------- + res : ndarray + Output array, with the same shape as `a`. + + See Also + -------- + rollaxis : Roll the specified axis backwards, until it lies in a + given position. + + Notes + ----- + .. versionadded:: 1.12.0 + + Supports rolling over multiple dimensions simultaneously. + + Examples + -------- + >>> x = np.arange(10) + >>> np.roll(x, 2) + array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) + >>> np.roll(x, -2) + array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) + + >>> x2 = np.reshape(x, (2,5)) + >>> x2 + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> np.roll(x2, 1) + array([[9, 0, 1, 2, 3], + [4, 5, 6, 7, 8]]) + >>> np.roll(x2, -1) + array([[1, 2, 3, 4, 5], + [6, 7, 8, 9, 0]]) + >>> np.roll(x2, 1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, -1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, 1, axis=1) + array([[4, 0, 1, 2, 3], + [9, 5, 6, 7, 8]]) + >>> np.roll(x2, -1, axis=1) + array([[1, 2, 3, 4, 0], + [6, 7, 8, 9, 5]]) + + """ + a = asanyarray(a) + if axis is None: + return roll(a.ravel(), shift, 0).reshape(a.shape) + + else: + axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) + broadcasted = broadcast(shift, axis) + if broadcasted.ndim > 1: + raise ValueError( + "'shift' and 'axis' should be scalars or 1D sequences") + shifts = {ax: 0 for ax in range(a.ndim)} + for sh, ax in broadcasted: + shifts[ax] += sh + + rolls = [((slice(None), slice(None)),)] * a.ndim + for ax, offset in shifts.items(): + offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. + if offset: + # (original, result), (original, result) + rolls[ax] = ((slice(None, -offset), slice(offset, None)), + (slice(-offset, None), slice(None, offset))) + + result = empty_like(a) + for indices in itertools.product(*rolls): + arr_index, res_index = zip(*indices) + result[res_index] = a[arr_index] + + return result + + +def _rollaxis_dispatcher(a, axis, start=None): + return (a,) + + +@array_function_dispatch(_rollaxis_dispatcher) +def rollaxis(a, axis, start=0): + """ + Roll the specified axis backwards, until it lies in a given position. + + This function continues to be supported for backward compatibility, but you + should prefer `moveaxis`. The `moveaxis` function was added in NumPy + 1.11. + + Parameters + ---------- + a : ndarray + Input array. + axis : int + The axis to be rolled. The positions of the other axes do not + change relative to one another. + start : int, optional + When ``start <= axis``, the axis is rolled back until it lies in + this position. When ``start > axis``, the axis is rolled until it + lies before this position. The default, 0, results in a "complete" + roll. The following table describes how negative values of ``start`` + are interpreted: + + .. table:: + :align: left + + +-------------------+----------------------+ + | ``start`` | Normalized ``start`` | + +===================+======================+ + | ``-(arr.ndim+1)`` | raise ``AxisError`` | + +-------------------+----------------------+ + | ``-arr.ndim`` | 0 | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``-1`` | ``arr.ndim-1`` | + +-------------------+----------------------+ + | ``0`` | ``0`` | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``arr.ndim`` | ``arr.ndim`` | + +-------------------+----------------------+ + | ``arr.ndim + 1`` | raise ``AxisError`` | + +-------------------+----------------------+ + + .. |vdots| unicode:: U+22EE .. Vertical Ellipsis + + Returns + ------- + res : ndarray + For NumPy >= 1.10.0 a view of `a` is always returned. For earlier + NumPy versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + See Also + -------- + moveaxis : Move array axes to new positions. + roll : Roll the elements of an array by a number of positions along a + given axis. + + Examples + -------- + >>> a = np.ones((3,4,5,6)) + >>> np.rollaxis(a, 3, 1).shape + (3, 6, 4, 5) + >>> np.rollaxis(a, 2).shape + (5, 3, 4, 6) + >>> np.rollaxis(a, 1, 4).shape + (3, 5, 6, 4) + + """ + n = a.ndim + axis = normalize_axis_index(axis, n) + if start < 0: + start += n + msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" + if not (0 <= start < n + 1): + raise AxisError(msg % ('start', -n, 'start', n + 1, start)) + if axis < start: + # it's been removed + start -= 1 + if axis == start: + return a[...] + axes = list(range(0, n)) + axes.remove(axis) + axes.insert(start, axis) + return a.transpose(axes) + + +def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): + """ + Normalizes an axis argument into a tuple of non-negative integer axes. + + This handles shorthands such as ``1`` and converts them to ``(1,)``, + as well as performing the handling of negative indices covered by + `normalize_axis_index`. + + By default, this forbids axes from being specified multiple times. + + Used internally by multi-axis-checking logic. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + axis : int, iterable of int + The un-normalized index or indices of the axis. + ndim : int + The number of dimensions of the array that `axis` should be normalized + against. + argname : str, optional + A prefix to put before the error message, typically the name of the + argument. + allow_duplicate : bool, optional + If False, the default, disallow an axis from being specified twice. + + Returns + ------- + normalized_axes : tuple of int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If any axis provided is out of range + ValueError + If an axis is repeated + + See also + -------- + normalize_axis_index : normalizing a single scalar axis + """ + # Optimization to speed-up the most common cases. + if type(axis) not in (tuple, list): + try: + axis = [operator.index(axis)] + except TypeError: + pass + # Going via an iterator directly is slower than via list comprehension. + axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) + if not allow_duplicate and len(set(axis)) != len(axis): + if argname: + raise ValueError('repeated axis in `{}` argument'.format(argname)) + else: + raise ValueError('repeated axis') + return axis + + +def _moveaxis_dispatcher(a, source, destination): + return (a,) + + +@array_function_dispatch(_moveaxis_dispatcher) +def moveaxis(a, source, destination): + """ + Move axes of an array to new positions. + + Other axes remain in their original order. + + .. versionadded:: 1.11.0 + + Parameters + ---------- + a : np.ndarray + The array whose axes should be reordered. + source : int or sequence of int + Original positions of the axes to move. These must be unique. + destination : int or sequence of int + Destination positions for each of the original axes. These must also be + unique. + + Returns + ------- + result : np.ndarray + Array with moved axes. This array is a view of the input array. + + See Also + -------- + transpose : Permute the dimensions of an array. + swapaxes : Interchange two axes of an array. + + Examples + -------- + >>> x = np.zeros((3, 4, 5)) + >>> np.moveaxis(x, 0, -1).shape + (4, 5, 3) + >>> np.moveaxis(x, -1, 0).shape + (5, 3, 4) + + These all achieve the same result: + + >>> np.transpose(x).shape + (5, 4, 3) + >>> np.swapaxes(x, 0, -1).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1], [-1, -2]).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape + (5, 4, 3) + + """ + try: + # allow duck-array types if they define transpose + transpose = a.transpose + except AttributeError: + a = asarray(a) + transpose = a.transpose + + source = normalize_axis_tuple(source, a.ndim, 'source') + destination = normalize_axis_tuple(destination, a.ndim, 'destination') + if len(source) != len(destination): + raise ValueError('`source` and `destination` arguments must have ' + 'the same number of elements') + + order = [n for n in range(a.ndim) if n not in source] + + for dest, src in sorted(zip(destination, source)): + order.insert(dest, src) + + result = transpose(order) + return result + + +# fix hack in scipy which imports this function +def _move_axis_to_0(a, axis): + return moveaxis(a, axis, 0) + + +def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None): + return (a, b) + + +@array_function_dispatch(_cross_dispatcher) +def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): + """ + Return the cross product of two (arrays of) vectors. + + The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular + to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors + are defined by the last axis of `a` and `b` by default, and these axes + can have dimensions 2 or 3. Where the dimension of either `a` or `b` is + 2, the third component of the input vector is assumed to be zero and the + cross product calculated accordingly. In cases where both input vectors + have dimension 2, the z-component of the cross product is returned. + + Parameters + ---------- + a : array_like + Components of the first vector(s). + b : array_like + Components of the second vector(s). + axisa : int, optional + Axis of `a` that defines the vector(s). By default, the last axis. + axisb : int, optional + Axis of `b` that defines the vector(s). By default, the last axis. + axisc : int, optional + Axis of `c` containing the cross product vector(s). Ignored if + both input vectors have dimension 2, as the return is scalar. + By default, the last axis. + axis : int, optional + If defined, the axis of `a`, `b` and `c` that defines the vector(s) + and cross product(s). Overrides `axisa`, `axisb` and `axisc`. + + Returns + ------- + c : ndarray + Vector cross product(s). + + Raises + ------ + ValueError + When the dimension of the vector(s) in `a` and/or `b` does not + equal 2 or 3. + + See Also + -------- + inner : Inner product + outer : Outer product. + ix_ : Construct index arrays. + + Notes + ----- + .. versionadded:: 1.9.0 + + Supports full broadcasting of the inputs. + + Examples + -------- + Vector cross-product. + + >>> x = [1, 2, 3] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([-3, 6, -3]) + + One vector with dimension 2. + + >>> x = [1, 2] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Equivalently: + + >>> x = [1, 2, 0] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Both vectors with dimension 2. + + >>> x = [1,2] + >>> y = [4,5] + >>> np.cross(x, y) + array(-3) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the `right-hand rule`. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + The orientation of `c` can be changed using the `axisc` keyword. + + >>> np.cross(x, y, axisc=0) + array([[-3, 3], + [ 6, -6], + [-3, 3]]) + + Change the vector definition of `x` and `y` using `axisa` and `axisb`. + + >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) + >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[ -6, 12, -6], + [ 0, 0, 0], + [ 6, -12, 6]]) + >>> np.cross(x, y, axisa=0, axisb=0) + array([[-24, 48, -24], + [-30, 60, -30], + [-36, 72, -36]]) + + """ + if axis is not None: + axisa, axisb, axisc = (axis,) * 3 + a = asarray(a) + b = asarray(b) + # Check axisa and axisb are within bounds + axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa') + axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') + + # Move working axis to the end of the shape + a = moveaxis(a, axisa, -1) + b = moveaxis(b, axisb, -1) + msg = ("incompatible dimensions for cross product\n" + "(dimension must be 2 or 3)") + if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): + raise ValueError(msg) + + # Create the output array + shape = broadcast(a[..., 0], b[..., 0]).shape + if a.shape[-1] == 3 or b.shape[-1] == 3: + shape += (3,) + # Check axisc is within bounds + axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') + dtype = promote_types(a.dtype, b.dtype) + cp = empty(shape, dtype) + + # create local aliases for readability + a0 = a[..., 0] + a1 = a[..., 1] + if a.shape[-1] == 3: + a2 = a[..., 2] + b0 = b[..., 0] + b1 = b[..., 1] + if b.shape[-1] == 3: + b2 = b[..., 2] + if cp.ndim != 0 and cp.shape[-1] == 3: + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + if a.shape[-1] == 2: + if b.shape[-1] == 2: + # a0 * b1 - a1 * b0 + multiply(a0, b1, out=cp) + cp -= a1 * b0 + return cp + else: + assert b.shape[-1] == 3 + # cp0 = a1 * b2 - 0 (a2 = 0) + # cp1 = 0 - a0 * b2 (a2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + multiply(a0, b2, out=cp1) + negative(cp1, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + else: + assert a.shape[-1] == 3 + if b.shape[-1] == 3: + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = array(a2 * b1) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp + else: + assert b.shape[-1] == 2 + # cp0 = 0 - a2 * b1 (b2 = 0) + # cp1 = a2 * b0 - 0 (b2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a2, b1, out=cp0) + negative(cp0, out=cp0) + multiply(a2, b0, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + + return moveaxis(cp, -1, axisc) + + +little_endian = (sys.byteorder == 'little') + + +@set_module('numpy') +def indices(dimensions, dtype=int, sparse=False): + """ + Return an array representing the indices of a grid. + + Compute an array where the subarrays contain index values 0, 1, ... + varying only along the corresponding axis. + + Parameters + ---------- + dimensions : sequence of ints + The shape of the grid. + dtype : dtype, optional + Data type of the result. + sparse : boolean, optional + Return a sparse representation of the grid instead of a dense + representation. Default is False. + + .. versionadded:: 1.17 + + Returns + ------- + grid : one ndarray or tuple of ndarrays + If sparse is False: + Returns one array of grid indices, + ``grid.shape = (len(dimensions),) + tuple(dimensions)``. + If sparse is True: + Returns a tuple of arrays, with + ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with + dimensions[i] in the ith place + + See Also + -------- + mgrid, ogrid, meshgrid + + Notes + ----- + The output shape in the dense case is obtained by prepending the number + of dimensions in front of the tuple of dimensions, i.e. if `dimensions` + is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is + ``(N, r0, ..., rN-1)``. + + The subarrays ``grid[k]`` contains the N-D array of indices along the + ``k-th`` axis. Explicitly:: + + grid[k, i0, i1, ..., iN-1] = ik + + Examples + -------- + >>> grid = np.indices((2, 3)) + >>> grid.shape + (2, 2, 3) + >>> grid[0] # row indices + array([[0, 0, 0], + [1, 1, 1]]) + >>> grid[1] # column indices + array([[0, 1, 2], + [0, 1, 2]]) + + The indices can be used as an index into an array. + + >>> x = np.arange(20).reshape(5, 4) + >>> row, col = np.indices((2, 3)) + >>> x[row, col] + array([[0, 1, 2], + [4, 5, 6]]) + + Note that it would be more straightforward in the above example to + extract the required elements directly with ``x[:2, :3]``. + + If sparse is set to true, the grid will be returned in a sparse + representation. + + >>> i, j = np.indices((2, 3), sparse=True) + >>> i.shape + (2, 1) + >>> j.shape + (1, 3) + >>> i # row indices + array([[0], + [1]]) + >>> j # column indices + array([[0, 1, 2]]) + + """ + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,)*N + if sparse: + res = tuple() + else: + res = empty((N,)+dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + idx = arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i+1:] + ) + if sparse: + res = res + (idx,) + else: + res[i] = idx + return res + + +def _fromfunction_dispatcher(function, shape, *, dtype=None, like=None, **kwargs): + return (like,) + + +@set_array_function_like_doc +@set_module('numpy') +def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): + """ + Construct an array by executing a function over each coordinate. + + The resulting array therefore has a value ``fn(x, y, z)`` at + coordinate ``(x, y, z)``. + + Parameters + ---------- + function : callable + The function is called with N parameters, where N is the rank of + `shape`. Each parameter represents the coordinates of the array + varying along a specific axis. For example, if `shape` + were ``(2, 2)``, then the parameters would be + ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` + shape : (N,) tuple of ints + Shape of the output array, which also determines the shape of + the coordinate arrays passed to `function`. + dtype : data-type, optional + Data-type of the coordinate arrays passed to `function`. + By default, `dtype` is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + fromfunction : any + The result of the call to `function` is passed back directly. + Therefore the shape of `fromfunction` is completely determined by + `function`. If `function` returns a scalar value, the shape of + `fromfunction` would not match the `shape` parameter. + + See Also + -------- + indices, meshgrid + + Notes + ----- + Keywords other than `dtype` are passed to `function`. + + Examples + -------- + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + array([[ True, False, False], + [False, True, False], + [False, False, True]]) + + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4]]) + + """ + if like is not None: + return _fromfunction_with_like(function, shape, dtype=dtype, like=like, **kwargs) + + args = indices(shape, dtype=dtype) + return function(*args, **kwargs) + + +_fromfunction_with_like = array_function_dispatch( + _fromfunction_dispatcher +)(fromfunction) + + +def _frombuffer(buf, dtype, shape, order): + return frombuffer(buf, dtype=dtype).reshape(shape, order=order) + + +@set_module('numpy') +def isscalar(element): + """ + Returns True if the type of `element` is a scalar type. + + Parameters + ---------- + element : any + Input argument, can be of any type and shape. + + Returns + ------- + val : bool + True if `element` is a scalar type, False if it is not. + + See Also + -------- + ndim : Get the number of dimensions of an array + + Notes + ----- + If you need a stricter way to identify a *numerical* scalar, use + ``isinstance(x, numbers.Number)``, as that returns ``False`` for most + non-numerical elements such as strings. + + In most cases ``np.ndim(x) == 0`` should be used instead of this function, + as that will also return true for 0d arrays. This is how numpy overloads + functions in the style of the ``dx`` arguments to `gradient` and the ``bins`` + argument to `histogram`. Some key differences: + + +--------------------------------------+---------------+-------------------+ + | x |``isscalar(x)``|``np.ndim(x) == 0``| + +======================================+===============+===================+ + | PEP 3141 numeric objects (including | ``True`` | ``True`` | + | builtins) | | | + +--------------------------------------+---------------+-------------------+ + | builtin string and buffer objects | ``True`` | ``True`` | + +--------------------------------------+---------------+-------------------+ + | other builtin objects, like | ``False`` | ``True`` | + | `pathlib.Path`, `Exception`, | | | + | the result of `re.compile` | | | + +--------------------------------------+---------------+-------------------+ + | third-party objects like | ``False`` | ``True`` | + | `matplotlib.figure.Figure` | | | + +--------------------------------------+---------------+-------------------+ + | zero-dimensional numpy arrays | ``False`` | ``True`` | + +--------------------------------------+---------------+-------------------+ + | other numpy arrays | ``False`` | ``False`` | + +--------------------------------------+---------------+-------------------+ + | `list`, `tuple`, and other sequence | ``False`` | ``False`` | + | objects | | | + +--------------------------------------+---------------+-------------------+ + + Examples + -------- + >>> np.isscalar(3.1) + True + >>> np.isscalar(np.array(3.1)) + False + >>> np.isscalar([3.1]) + False + >>> np.isscalar(False) + True + >>> np.isscalar('numpy') + True + + NumPy supports PEP 3141 numbers: + + >>> from fractions import Fraction + >>> np.isscalar(Fraction(5, 17)) + True + >>> from numbers import Number + >>> np.isscalar(Number()) + True + + """ + return (isinstance(element, generic) + or type(element) in ScalarType + or isinstance(element, numbers.Number)) + + +@set_module('numpy') +def binary_repr(num, width=None): + """ + Return the binary representation of the input number as a string. + + For negative numbers, if width is not given, a minus sign is added to the + front. If width is given, the two's complement of the number is + returned, with respect to that width. + + In a two's-complement system negative numbers are represented by the two's + complement of the absolute value. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range + :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + + Parameters + ---------- + num : int + Only an integer decimal number can be used. + width : int, optional + The length of the returned string if `num` is positive, or the length + of the two's complement if `num` is negative, provided that `width` is + at least a sufficient number of bits for `num` to be represented in the + designated form. + + If the `width` value is insufficient, it will be ignored, and `num` will + be returned in binary (`num` > 0) or two's complement (`num` < 0) form + with its width equal to the minimum number of bits needed to represent + the number in the designated form. This behavior is deprecated and will + later raise an error. + + .. deprecated:: 1.12.0 + + Returns + ------- + bin : str + Binary representation of `num` or two's complement of `num`. + + See Also + -------- + base_repr: Return a string representation of a number in the given base + system. + bin: Python's built-in binary representation generator of an integer. + + Notes + ----- + `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x + faster. + + References + ---------- + .. [1] Wikipedia, "Two's complement", + https://en.wikipedia.org/wiki/Two's_complement + + Examples + -------- + >>> np.binary_repr(3) + '11' + >>> np.binary_repr(-3) + '-11' + >>> np.binary_repr(3, width=4) + '0011' + + The two's complement is returned when the input number is negative and + width is specified: + + >>> np.binary_repr(-3, width=3) + '101' + >>> np.binary_repr(-3, width=5) + '11101' + + """ + def warn_if_insufficient(width, binwidth): + if width is not None and width < binwidth: + warnings.warn( + "Insufficient bit width provided. This behavior " + "will raise an error in the future.", DeprecationWarning, + stacklevel=3) + + # Ensure that num is a Python integer to avoid overflow or unwanted + # casts to floating point. + num = operator.index(num) + + if num == 0: + return '0' * (width or 1) + + elif num > 0: + binary = bin(num)[2:] + binwidth = len(binary) + outwidth = (binwidth if width is None + else max(binwidth, width)) + warn_if_insufficient(width, binwidth) + return binary.zfill(outwidth) + + else: + if width is None: + return '-' + bin(-num)[2:] + + else: + poswidth = len(bin(-num)[2:]) + + # See gh-8679: remove extra digit + # for numbers at boundaries. + if 2**(poswidth - 1) == -num: + poswidth -= 1 + + twocomp = 2**(poswidth + 1) + num + binary = bin(twocomp)[2:] + binwidth = len(binary) + + outwidth = max(binwidth, width) + warn_if_insufficient(width, binwidth) + return '1' * (outwidth - binwidth) + binary + + +@set_module('numpy') +def base_repr(number, base=2, padding=0): + """ + Return a string representation of a number in the given base system. + + Parameters + ---------- + number : int + The value to convert. Positive and negative values are handled. + base : int, optional + Convert `number` to the `base` number system. The valid range is 2-36, + the default value is 2. + padding : int, optional + Number of zeros padded on the left. Default is 0 (no padding). + + Returns + ------- + out : str + String representation of `number` in `base` system. + + See Also + -------- + binary_repr : Faster version of `base_repr` for base 2. + + Examples + -------- + >>> np.base_repr(5) + '101' + >>> np.base_repr(6, 5) + '11' + >>> np.base_repr(7, base=5, padding=3) + '00012' + + >>> np.base_repr(10, base=16) + 'A' + >>> np.base_repr(32, base=16) + '20' + + """ + digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + if base > len(digits): + raise ValueError("Bases greater than 36 not handled in base_repr.") + elif base < 2: + raise ValueError("Bases less than 2 not handled in base_repr.") + + num = abs(number) + res = [] + while num: + res.append(digits[num % base]) + num //= base + if padding: + res.append('0' * padding) + if number < 0: + res.append('-') + return ''.join(reversed(res or '0')) + + +# These are all essentially abbreviations +# These might wind up in a special abbreviations module + + +def _maketup(descr, val): + dt = dtype(descr) + # Place val in all scalar tuples: + fields = dt.fields + if fields is None: + return val + else: + res = [_maketup(fields[name][0], val) for name in dt.names] + return tuple(res) + + +def _identity_dispatcher(n, dtype=None, *, like=None): + return (like,) + + +@set_array_function_like_doc +@set_module('numpy') +def identity(n, dtype=None, *, like=None): + """ + Return the identity array. + + The identity array is a square array with ones on + the main diagonal. + + Parameters + ---------- + n : int + Number of rows (and columns) in `n` x `n` output. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + `n` x `n` array with its main diagonal set to one, + and all other elements 0. + + Examples + -------- + >>> np.identity(3) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + """ + if like is not None: + return _identity_with_like(n, dtype=dtype, like=like) + + from numpy import eye + return eye(n, dtype=dtype, like=like) + + +_identity_with_like = array_function_dispatch( + _identity_dispatcher +)(identity) + + +def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b) + + +@array_function_dispatch(_allclose_dispatcher) +def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + NaNs are treated as equal if they are in the same place and if + ``equal_nan=True``. Infs are treated as equal if they are in the same + place and of the same sign in both arrays. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : float + The relative tolerance parameter (see Notes). + atol : float + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + .. versionadded:: 1.10.0 + + Returns + ------- + allclose : bool + Returns True if the two arrays are equal within the given + tolerance; False otherwise. + + See Also + -------- + isclose, all, any, equal + + Notes + ----- + If the following equation is element-wise True, then allclose returns + True. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + The above equation is not symmetric in `a` and `b`, so that + ``allclose(a, b)`` might be different from ``allclose(b, a)`` in + some rare cases. + + The comparison of `a` and `b` uses standard broadcasting, which + means that `a` and `b` need not have the same shape in order for + ``allclose(a, b)`` to evaluate to True. The same is true for + `equal` but not `array_equal`. + + `allclose` is not defined for non-numeric data types. + + Examples + -------- + >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) + False + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) + True + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) + False + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) + False + >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + True + + """ + res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) + return bool(res) + + +def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b) + + +@array_function_dispatch(_isclose_dispatcher) +def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns a boolean array where two arrays are element-wise equal within a + tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + that are much smaller than one (see Notes). + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : float + The relative tolerance parameter (see Notes). + atol : float + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + y : array_like + Returns a boolean array of where `a` and `b` are equal within the + given tolerance. If both `a` and `b` are scalars, returns a single + boolean value. + + See Also + -------- + allclose + math.isclose + + Notes + ----- + .. versionadded:: 1.7.0 + + For finite values, isclose uses the following equation to test whether + two floating point values are equivalent. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Unlike the built-in `math.isclose`, the above equation is not symmetric + in `a` and `b` -- it assumes `b` is the reference value -- so that + `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore, + the default value of atol is not zero, and is used to determine what + small values should be considered close to zero. The default value is + appropriate for expected values of order unity: if the expected values + are significantly smaller than one, it can result in false positives. + `atol` should be carefully selected for the use case at hand. A zero value + for `atol` will result in `False` if either `a` or `b` is zero. + + `isclose` is not defined for non-numeric data types. + + Examples + -------- + >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) + array([ True, False]) + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) + array([ True, True]) + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) + array([False, True]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) + array([ True, False]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + array([ True, True]) + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) + array([ True, False]) + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) + array([False, False]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) + array([ True, True]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) + array([False, True]) + """ + def within_tol(x, y, atol, rtol): + with errstate(invalid='ignore'): + return less_equal(abs(x-y), atol + rtol * abs(y)) + + x = asanyarray(a) + y = asanyarray(b) + + # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). + # This will cause casting of x later. Also, make sure to allow subclasses + # (e.g., for numpy.ma). + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dt = multiarray.result_type(y, 1.) + y = asanyarray(y, dtype=dt) + + xfin = isfinite(x) + yfin = isfinite(y) + if all(xfin) and all(yfin): + return within_tol(x, y, atol, rtol) + else: + finite = xfin & yfin + cond = zeros_like(finite, subok=True) + # Because we're using boolean indexing, x & y must be the same shape. + # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in + # lib.stride_tricks, though, so we can't import it here. + x = x * ones_like(cond) + y = y * ones_like(cond) + # Avoid subtraction with infinite/nan values... + cond[finite] = within_tol(x[finite], y[finite], atol, rtol) + # Check for equality of infinite values... + cond[~finite] = (x[~finite] == y[~finite]) + if equal_nan: + # Make NaN == NaN + both_nan = isnan(x) & isnan(y) + + # Needed to treat masked arrays correctly. = True would not work. + cond[both_nan] = both_nan[both_nan] + + return cond[()] # Flatten 0d arrays to scalars + + +def _array_equal_dispatcher(a1, a2, equal_nan=None): + return (a1, a2) + + +@array_function_dispatch(_array_equal_dispatcher) +def array_equal(a1, a2, equal_nan=False): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + equal_nan : bool + Whether to compare NaN's as equal. If the dtype of a1 and a2 is + complex, values will be considered equal if either the real or the + imaginary component of a given value is ``nan``. + + .. versionadded:: 1.19.0 + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> np.array_equal([1, 2], [1, 2]) + True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + >>> np.array_equal([1, 2], [1, 2, 3]) + False + >>> np.array_equal([1, 2], [1, 4]) + False + >>> a = np.array([1, np.nan]) + >>> np.array_equal(a, a) + False + >>> np.array_equal(a, a, equal_nan=True) + True + + When ``equal_nan`` is True, complex values with nan components are + considered equal if either the real *or* the imaginary components are nan. + + >>> a = np.array([1 + 1j]) + >>> b = a.copy() + >>> a.real = np.nan + >>> b.imag = np.nan + >>> np.array_equal(a, b, equal_nan=True) + True + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + if a1.shape != a2.shape: + return False + if not equal_nan: + return bool(asarray(a1 == a2).all()) + # Handling NaN values if equal_nan is True + a1nan, a2nan = isnan(a1), isnan(a2) + # NaN's occur at different locations + if not (a1nan == a2nan).all(): + return False + # Shapes of a1, a2 and masks are guaranteed to be consistent by this point + return bool(asarray(a1[~a1nan] == a2[~a1nan]).all()) + + +def _array_equiv_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_array_equiv_dispatcher) +def array_equiv(a1, a2): + """ + Returns True if input arrays are shape consistent and all elements equal. + + Shape consistent means they are either the same shape, or one input array + can be broadcasted to create the same shape as the other one. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + out : bool + True if equivalent, False otherwise. + + Examples + -------- + >>> np.array_equiv([1, 2], [1, 2]) + True + >>> np.array_equiv([1, 2], [1, 3]) + False + + Showing the shape equivalence: + + >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) + True + >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) + False + + >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + try: + multiarray.broadcast(a1, a2) + except Exception: + return False + + return bool(asarray(a1 == a2).all()) + + +Inf = inf = infty = Infinity = PINF +nan = NaN = NAN +False_ = bool_(False) +True_ = bool_(True) + + +def extend_all(module): + existing = set(__all__) + mall = getattr(module, '__all__') + for a in mall: + if a not in existing: + __all__.append(a) + + +from .umath import * +from .numerictypes import * +from . import fromnumeric +from .fromnumeric import * +from . import arrayprint +from .arrayprint import * +from . import _asarray +from ._asarray import * +from . import _ufunc_config +from ._ufunc_config import * +extend_all(fromnumeric) +extend_all(umath) +extend_all(numerictypes) +extend_all(arrayprint) +extend_all(_asarray) +extend_all(_ufunc_config) diff --git a/MLPY/Lib/site-packages/numpy/core/numeric.pyi b/MLPY/Lib/site-packages/numpy/core/numeric.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2a933a3bd888818b2e773659fe9296d3d8738f7e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/numeric.pyi @@ -0,0 +1,243 @@ +import sys +from typing import ( + Any, + Optional, + Union, + Sequence, + Tuple, + Callable, + List, + overload, + TypeVar, + Iterable, +) + +from numpy import ndarray, generic, dtype, bool_, signedinteger, _OrderKACF, _OrderCF +from numpy.typing import ArrayLike, DTypeLike, _ShapeLike + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +_T = TypeVar("_T") +_ArrayType = TypeVar("_ArrayType", bound=ndarray) + +_CorrelateMode = Literal["valid", "same", "full"] + +@overload +def zeros_like( + a: _ArrayType, + dtype: None = ..., + order: _OrderKACF = ..., + subok: Literal[True] = ..., + shape: None = ..., +) -> _ArrayType: ... +@overload +def zeros_like( + a: ArrayLike, + dtype: DTypeLike = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: Optional[_ShapeLike] = ..., +) -> ndarray: ... + +def ones( + shape: _ShapeLike, + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + like: ArrayLike = ..., +) -> ndarray: ... + +@overload +def ones_like( + a: _ArrayType, + dtype: None = ..., + order: _OrderKACF = ..., + subok: Literal[True] = ..., + shape: None = ..., +) -> _ArrayType: ... +@overload +def ones_like( + a: ArrayLike, + dtype: DTypeLike = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: Optional[_ShapeLike] = ..., +) -> ndarray: ... + +@overload +def empty_like( + a: _ArrayType, + dtype: None = ..., + order: _OrderKACF = ..., + subok: Literal[True] = ..., + shape: None = ..., +) -> _ArrayType: ... +@overload +def empty_like( + a: ArrayLike, + dtype: DTypeLike = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: Optional[_ShapeLike] = ..., +) -> ndarray: ... + +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + like: ArrayLike = ..., +) -> ndarray: ... + +@overload +def full_like( + a: _ArrayType, + fill_value: Any, + dtype: None = ..., + order: _OrderKACF = ..., + subok: Literal[True] = ..., + shape: None = ..., +) -> _ArrayType: ... +@overload +def full_like( + a: ArrayLike, + fill_value: Any, + dtype: DTypeLike = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: Optional[_ShapeLike] = ..., +) -> ndarray: ... + +@overload +def count_nonzero( + a: ArrayLike, + axis: None = ..., + *, + keepdims: Literal[False] = ..., +) -> int: ... +@overload +def count_nonzero( + a: ArrayLike, + axis: _ShapeLike = ..., + *, + keepdims: bool = ..., +) -> Any: ... # TODO: np.intp or ndarray[np.intp] + +def isfortran(a: Union[ndarray, generic]) -> bool: ... + +def argwhere(a: ArrayLike) -> ndarray: ... + +def flatnonzero(a: ArrayLike) -> ndarray: ... + +def correlate( + a: ArrayLike, + v: ArrayLike, + mode: _CorrelateMode = ..., +) -> ndarray: ... + +def convolve( + a: ArrayLike, + v: ArrayLike, + mode: _CorrelateMode = ..., +) -> ndarray: ... + +@overload +def outer( + a: ArrayLike, + b: ArrayLike, + out: None = ..., +) -> ndarray: ... +@overload +def outer( + a: ArrayLike, + b: ArrayLike, + out: _ArrayType = ..., +) -> _ArrayType: ... + +def tensordot( + a: ArrayLike, + b: ArrayLike, + axes: Union[int, Tuple[_ShapeLike, _ShapeLike]] = ..., +) -> ndarray: ... + +def roll( + a: ArrayLike, + shift: _ShapeLike, + axis: Optional[_ShapeLike] = ..., +) -> ndarray: ... + +def rollaxis(a: ndarray, axis: int, start: int = ...) -> ndarray: ... + +def moveaxis( + a: ndarray, + source: _ShapeLike, + destination: _ShapeLike, +) -> ndarray: ... + +def cross( + a: ArrayLike, + b: ArrayLike, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: Optional[int] = ..., +) -> ndarray: ... + +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike = ..., + sparse: Literal[False] = ..., +) -> ndarray: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike = ..., + sparse: Literal[True] = ..., +) -> Tuple[ndarray, ...]: ... + +def fromfunction( + function: Callable[..., _T], + shape: Sequence[int], + *, + dtype: DTypeLike = ..., + like: ArrayLike = ..., + **kwargs: Any, +) -> _T: ... + +def isscalar(element: Any) -> bool: ... + +def binary_repr(num: int, width: Optional[int] = ...) -> str: ... + +def base_repr(number: int, base: int = ..., padding: int = ...) -> str: ... + +def identity( + n: int, + dtype: DTypeLike = ..., + *, + like: ArrayLike = ..., +) -> ndarray: ... + +def allclose( + a: ArrayLike, + b: ArrayLike, + rtol: float = ..., + atol: float = ..., + equal_nan: bool = ..., +) -> bool: ... + +def isclose( + a: ArrayLike, + b: ArrayLike, + rtol: float = ..., + atol: float = ..., + equal_nan: bool = ..., +) -> Any: ... + +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... + +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... diff --git a/MLPY/Lib/site-packages/numpy/core/numerictypes.py b/MLPY/Lib/site-packages/numpy/core/numerictypes.py new file mode 100644 index 0000000000000000000000000000000000000000..4efbf234b549e4cf72c072a88a364583504d5152 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/numerictypes.py @@ -0,0 +1,672 @@ +""" +numerictypes: Define the numeric type objects + +This module is designed so "from numerictypes import \\*" is safe. +Exported symbols include: + + Dictionary with all registered number types (including aliases): + sctypeDict + + Type objects (not all will be available, depends on platform): + see variable sctypes for which ones you have + + Bit-width names + + int8 int16 int32 int64 int128 + uint8 uint16 uint32 uint64 uint128 + float16 float32 float64 float96 float128 float256 + complex32 complex64 complex128 complex192 complex256 complex512 + datetime64 timedelta64 + + c-based names + + bool_ + + object_ + + void, str_, unicode_ + + byte, ubyte, + short, ushort + intc, uintc, + intp, uintp, + int_, uint, + longlong, ulonglong, + + single, csingle, + float_, complex_, + longfloat, clongfloat, + + As part of the type-hierarchy: xx -- is bit-width + + generic + +-> bool_ (kind=b) + +-> number + | +-> integer + | | +-> signedinteger (intxx) (kind=i) + | | | byte + | | | short + | | | intc + | | | intp int0 + | | | int_ + | | | longlong + | | \\-> unsignedinteger (uintxx) (kind=u) + | | ubyte + | | ushort + | | uintc + | | uintp uint0 + | | uint_ + | | ulonglong + | +-> inexact + | +-> floating (floatxx) (kind=f) + | | half + | | single + | | float_ (double) + | | longfloat + | \\-> complexfloating (complexxx) (kind=c) + | csingle (singlecomplex) + | complex_ (cfloat, cdouble) + | clongfloat (longcomplex) + +-> flexible + | +-> character + | | str_ (string_, bytes_) (kind=S) [Python 2] + | | unicode_ (kind=U) [Python 2] + | | + | | bytes_ (string_) (kind=S) [Python 3] + | | str_ (unicode_) (kind=U) [Python 3] + | | + | \\-> void (kind=V) + \\-> object_ (not used much) (kind=O) + +""" +import numbers +import warnings + +from numpy.core.multiarray import ( + typeinfo, ndarray, array, empty, dtype, datetime_data, + datetime_as_string, busday_offset, busday_count, is_busday, + busdaycalendar + ) +from numpy.core.overrides import set_module + +# we add more at the bottom +__all__ = ['sctypeDict', 'sctypes', + 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', + 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', + 'issubdtype', 'datetime_data', 'datetime_as_string', + 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', + ] + +# we don't need all these imports, but we need to keep them for compatibility +# for users using np.core.numerictypes.UPPER_TABLE +from ._string_helpers import ( + english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE +) + +from ._type_aliases import ( + sctypeDict, + allTypes, + bitname, + sctypes, + _concrete_types, + _concrete_typeinfo, + _bits_of, +) +from ._dtype import _kind_name + +# we don't export these for import *, but we do want them accessible +# as numerictypes.bool, etc. +from builtins import bool, int, float, complex, object, str, bytes +from numpy.compat import long, unicode + + +# We use this later +generic = allTypes['generic'] + +genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', + 'int32', 'uint32', 'int64', 'uint64', 'int128', + 'uint128', 'float16', + 'float32', 'float64', 'float80', 'float96', 'float128', + 'float256', + 'complex32', 'complex64', 'complex128', 'complex160', + 'complex192', 'complex256', 'complex512', 'object'] + +@set_module('numpy') +def maximum_sctype(t): + """ + Return the scalar type of highest precision of the same kind as the input. + + Parameters + ---------- + t : dtype or dtype specifier + The input data type. This can be a `dtype` object or an object that + is convertible to a `dtype`. + + Returns + ------- + out : dtype + The highest precision data type of the same kind (`dtype.kind`) as `t`. + + See Also + -------- + obj2sctype, mintypecode, sctype2char + dtype + + Examples + -------- + >>> np.maximum_sctype(int) + + >>> np.maximum_sctype(np.uint8) + + >>> np.maximum_sctype(complex) + # may vary + + >>> np.maximum_sctype(str) + + + >>> np.maximum_sctype('i2') + + >>> np.maximum_sctype('f4') + # may vary + + """ + g = obj2sctype(t) + if g is None: + return t + t = g + base = _kind_name(dtype(t)) + if base in sctypes: + return sctypes[base][-1] + else: + return t + + +@set_module('numpy') +def issctype(rep): + """ + Determines whether the given object represents a scalar data-type. + + Parameters + ---------- + rep : any + If `rep` is an instance of a scalar dtype, True is returned. If not, + False is returned. + + Returns + ------- + out : bool + Boolean result of check whether `rep` is a scalar dtype. + + See Also + -------- + issubsctype, issubdtype, obj2sctype, sctype2char + + Examples + -------- + >>> np.issctype(np.int32) + True + >>> np.issctype(list) + False + >>> np.issctype(1.1) + False + + Strings are also a scalar type: + + >>> np.issctype(np.dtype('str')) + True + + """ + if not isinstance(rep, (type, dtype)): + return False + try: + res = obj2sctype(rep) + if res and res != object_: + return True + return False + except Exception: + return False + + +@set_module('numpy') +def obj2sctype(rep, default=None): + """ + Return the scalar dtype or NumPy equivalent of Python type of an object. + + Parameters + ---------- + rep : any + The object of which the type is returned. + default : any, optional + If given, this is returned for objects whose types can not be + determined. If not given, None is returned for those objects. + + Returns + ------- + dtype : dtype or Python type + The data type of `rep`. + + See Also + -------- + sctype2char, issctype, issubsctype, issubdtype, maximum_sctype + + Examples + -------- + >>> np.obj2sctype(np.int32) + + >>> np.obj2sctype(np.array([1., 2.])) + + >>> np.obj2sctype(np.array([1.j])) + + + >>> np.obj2sctype(dict) + + >>> np.obj2sctype('string') + + >>> np.obj2sctype(1, default=list) + + + """ + # prevent abstract classes being upcast + if isinstance(rep, type) and issubclass(rep, generic): + return rep + # extract dtype from arrays + if isinstance(rep, ndarray): + return rep.dtype.type + # fall back on dtype to convert + try: + res = dtype(rep) + except Exception: + return default + else: + return res.type + + +@set_module('numpy') +def issubclass_(arg1, arg2): + """ + Determine if a class is a subclass of a second class. + + `issubclass_` is equivalent to the Python built-in ``issubclass``, + except that it returns False instead of raising a TypeError if one + of the arguments is not a class. + + Parameters + ---------- + arg1 : class + Input class. True is returned if `arg1` is a subclass of `arg2`. + arg2 : class or tuple of classes. + Input class. If a tuple of classes, True is returned if `arg1` is a + subclass of any of the tuple elements. + + Returns + ------- + out : bool + Whether `arg1` is a subclass of `arg2` or not. + + See Also + -------- + issubsctype, issubdtype, issctype + + Examples + -------- + >>> np.issubclass_(np.int32, int) + False + >>> np.issubclass_(np.int32, float) + False + >>> np.issubclass_(np.float64, float) + True + + """ + try: + return issubclass(arg1, arg2) + except TypeError: + return False + + +@set_module('numpy') +def issubsctype(arg1, arg2): + """ + Determine if the first argument is a subclass of the second argument. + + Parameters + ---------- + arg1, arg2 : dtype or dtype specifier + Data-types. + + Returns + ------- + out : bool + The result. + + See Also + -------- + issctype, issubdtype, obj2sctype + + Examples + -------- + >>> np.issubsctype('S8', str) + False + >>> np.issubsctype(np.array([1]), int) + True + >>> np.issubsctype(np.array([1]), float) + False + + """ + return issubclass(obj2sctype(arg1), obj2sctype(arg2)) + + +@set_module('numpy') +def issubdtype(arg1, arg2): + r""" + Returns True if first argument is a typecode lower/equal in type hierarchy. + + This is like the builtin :func:`issubclass`, but for `dtype`\ s. + + Parameters + ---------- + arg1, arg2 : dtype_like + `dtype` or object coercible to one + + Returns + ------- + out : bool + + See Also + -------- + :ref:`arrays.scalars` : Overview of the numpy type hierarchy. + issubsctype, issubclass_ + + Examples + -------- + `issubdtype` can be used to check the type of arrays: + + >>> ints = np.array([1, 2, 3], dtype=np.int32) + >>> np.issubdtype(ints.dtype, np.integer) + True + >>> np.issubdtype(ints.dtype, np.floating) + False + + >>> floats = np.array([1, 2, 3], dtype=np.float32) + >>> np.issubdtype(floats.dtype, np.integer) + False + >>> np.issubdtype(floats.dtype, np.floating) + True + + Similar types of different sizes are not subdtypes of each other: + + >>> np.issubdtype(np.float64, np.float32) + False + >>> np.issubdtype(np.float32, np.float64) + False + + but both are subtypes of `floating`: + + >>> np.issubdtype(np.float64, np.floating) + True + >>> np.issubdtype(np.float32, np.floating) + True + + For convenience, dtype-like objects are allowed too: + + >>> np.issubdtype('S1', np.string_) + True + >>> np.issubdtype('i4', np.signedinteger) + True + + """ + if not issubclass_(arg1, generic): + arg1 = dtype(arg1).type + if not issubclass_(arg2, generic): + arg2 = dtype(arg2).type + + return issubclass(arg1, arg2) + + +# This dictionary allows look up based on any alias for an array data-type +class _typedict(dict): + """ + Base object for a dictionary for look-up with any alias for an array dtype. + + Instances of `_typedict` can not be used as dictionaries directly, + first they have to be populated. + + """ + + def __getitem__(self, obj): + return dict.__getitem__(self, obj2sctype(obj)) + +nbytes = _typedict() +_alignment = _typedict() +_maxvals = _typedict() +_minvals = _typedict() +def _construct_lookups(): + for name, info in _concrete_typeinfo.items(): + obj = info.type + nbytes[obj] = info.bits // 8 + _alignment[obj] = info.alignment + if len(info) > 5: + _maxvals[obj] = info.max + _minvals[obj] = info.min + else: + _maxvals[obj] = None + _minvals[obj] = None + +_construct_lookups() + + +@set_module('numpy') +def sctype2char(sctype): + """ + Return the string representation of a scalar dtype. + + Parameters + ---------- + sctype : scalar dtype or object + If a scalar dtype, the corresponding string character is + returned. If an object, `sctype2char` tries to infer its scalar type + and then return the corresponding string character. + + Returns + ------- + typechar : str + The string character corresponding to the scalar type. + + Raises + ------ + ValueError + If `sctype` is an object for which the type can not be inferred. + + See Also + -------- + obj2sctype, issctype, issubsctype, mintypecode + + Examples + -------- + >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]: + ... print(np.sctype2char(sctype)) + l # may vary + d + D + S + O + + >>> x = np.array([1., 2-1.j]) + >>> np.sctype2char(x) + 'D' + >>> np.sctype2char(list) + 'O' + + """ + sctype = obj2sctype(sctype) + if sctype is None: + raise ValueError("unrecognized type") + if sctype not in _concrete_types: + # for compatibility + raise KeyError(sctype) + return dtype(sctype).char + +# Create dictionary of casting functions that wrap sequences +# indexed by type or type character +cast = _typedict() +for key in _concrete_types: + cast[key] = lambda x, k=key: array(x, copy=False).astype(k) + + +def _scalar_type_key(typ): + """A ``key`` function for `sorted`.""" + dt = dtype(typ) + return (dt.kind.lower(), dt.itemsize) + + +ScalarType = [int, float, complex, int, bool, bytes, str, memoryview] +ScalarType += sorted(_concrete_types, key=_scalar_type_key) +ScalarType = tuple(ScalarType) + + +# Now add the types we've determined to this module +for key in allTypes: + globals()[key] = allTypes[key] + __all__.append(key) + +del key + +typecodes = {'Character':'c', + 'Integer':'bhilqp', + 'UnsignedInteger':'BHILQP', + 'Float':'efdg', + 'Complex':'FDG', + 'AllInteger':'bBhHiIlLqQpP', + 'AllFloat':'efdgFDG', + 'Datetime': 'Mm', + 'All':'?bhilqpBHILQPefdgFDGSUVOMm'} + +# backwards compatibility --- deprecated name +# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py) +typeDict = sctypeDict + +# b -> boolean +# u -> unsigned integer +# i -> signed integer +# f -> floating point +# c -> complex +# M -> datetime +# m -> timedelta +# S -> string +# U -> Unicode string +# V -> record +# O -> Python object +_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm'] + +__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O' +__len_test_types = len(__test_types) + +# Keep incrementing until a common type both can be coerced to +# is found. Otherwise, return None +def _find_common_coerce(a, b): + if a > b: + return a + try: + thisind = __test_types.index(a.char) + except ValueError: + return None + return _can_coerce_all([a, b], start=thisind) + +# Find a data-type that all data-types in a list can be coerced to +def _can_coerce_all(dtypelist, start=0): + N = len(dtypelist) + if N == 0: + return None + if N == 1: + return dtypelist[0] + thisind = start + while thisind < __len_test_types: + newdtype = dtype(__test_types[thisind]) + numcoerce = len([x for x in dtypelist if newdtype >= x]) + if numcoerce == N: + return newdtype + thisind += 1 + return None + +def _register_types(): + numbers.Integral.register(integer) + numbers.Complex.register(inexact) + numbers.Real.register(floating) + numbers.Number.register(number) + +_register_types() + + +@set_module('numpy') +def find_common_type(array_types, scalar_types): + """ + Determine common type following standard coercion rules. + + Parameters + ---------- + array_types : sequence + A list of dtypes or dtype convertible objects representing arrays. + scalar_types : sequence + A list of dtypes or dtype convertible objects representing scalars. + + Returns + ------- + datatype : dtype + The common data type, which is the maximum of `array_types` ignoring + `scalar_types`, unless the maximum of `scalar_types` is of a + different kind (`dtype.kind`). If the kind is not understood, then + None is returned. + + See Also + -------- + dtype, common_type, can_cast, mintypecode + + Examples + -------- + >>> np.find_common_type([], [np.int64, np.float32, complex]) + dtype('complex128') + >>> np.find_common_type([np.int64, np.float32], []) + dtype('float64') + + The standard casting rules ensure that a scalar cannot up-cast an + array unless the scalar is of a fundamentally different kind of data + (i.e. under a different hierarchy in the data type hierarchy) then + the array: + + >>> np.find_common_type([np.float32], [np.int64, np.float64]) + dtype('float32') + + Complex is of a different type, so it up-casts the float in the + `array_types` argument: + + >>> np.find_common_type([np.float32], [complex]) + dtype('complex128') + + Type specifier strings are convertible to dtypes and can therefore + be used instead of dtypes: + + >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8']) + dtype('complex128') + + """ + array_types = [dtype(x) for x in array_types] + scalar_types = [dtype(x) for x in scalar_types] + + maxa = _can_coerce_all(array_types) + maxsc = _can_coerce_all(scalar_types) + + if maxa is None: + return maxsc + + if maxsc is None: + return maxa + + try: + index_a = _kind_list.index(maxa.kind) + index_sc = _kind_list.index(maxsc.kind) + except ValueError: + return None + + if index_sc > index_a: + return _find_common_coerce(maxsc, maxa) + else: + return maxa diff --git a/MLPY/Lib/site-packages/numpy/core/numerictypes.pyi b/MLPY/Lib/site-packages/numpy/core/numerictypes.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9fdb09aad5de551e988cfab98933ba7bfab65ede --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/numerictypes.pyi @@ -0,0 +1,140 @@ +import sys +from typing import ( + TypeVar, + Optional, + Type, + Union, + Tuple, + Sequence, + overload, + Any, + TypeVar, + Dict, + List, +) + +from numpy import ( + ndarray, + dtype, + generic, + bool_, + ubyte, + ushort, + uintc, + uint, + ulonglong, + byte, + short, + intc, + int_, + longlong, + half, + single, + double, + longdouble, + csingle, + cdouble, + clongdouble, + datetime64, + timedelta64, + object_, + str_, + bytes_, + void, +) + +from numpy.core._type_aliases import ( + sctypeDict as sctypeDict, + sctypes as sctypes, +) + +from numpy.typing import DTypeLike, ArrayLike + +if sys.version_info >= (3, 8): + from typing import Literal, Protocol, TypedDict +else: + from typing_extensions import Literal, Protocol, TypedDict + +_T = TypeVar("_T") +_ScalarType = TypeVar("_ScalarType", bound=generic) + +class _CastFunc(Protocol): + def __call__( + self, x: ArrayLike, k: DTypeLike = ... + ) -> ndarray[Any, dtype[Any]]: ... + +class _TypeCodes(TypedDict): + Character: Literal['c'] + Integer: Literal['bhilqp'] + UnsignedInteger: Literal['BHILQP'] + Float: Literal['efdg'] + Complex: Literal['FDG'] + AllInteger: Literal['bBhHiIlLqQpP'] + AllFloat: Literal['efdgFDG'] + Datetime: Literal['Mm'] + All: Literal['?bhilqpBHILQPefdgFDGSUVOMm'] + +class _typedict(Dict[Type[generic], _T]): + def __getitem__(self, key: DTypeLike) -> _T: ... + +__all__: List[str] + +# TODO: Clean up the annotations for the 7 functions below + +def maximum_sctype(t: DTypeLike) -> dtype: ... +def issctype(rep: object) -> bool: ... +@overload +def obj2sctype(rep: object) -> Optional[generic]: ... +@overload +def obj2sctype(rep: object, default: None) -> Optional[generic]: ... +@overload +def obj2sctype( + rep: object, default: Type[_T] +) -> Union[generic, Type[_T]]: ... +def issubclass_(arg1: object, arg2: Union[object, Tuple[object, ...]]) -> bool: ... +def issubsctype( + arg1: Union[ndarray, DTypeLike], arg2: Union[ndarray, DTypeLike] +) -> bool: ... +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ... +def sctype2char(sctype: object) -> str: ... +def find_common_type( + array_types: Sequence[DTypeLike], scalar_types: Sequence[DTypeLike] +) -> dtype: ... + +cast: _typedict[_CastFunc] +nbytes: _typedict[int] +typecodes: _TypeCodes +ScalarType: Tuple[ + Type[int], + Type[float], + Type[complex], + Type[int], + Type[bool], + Type[bytes], + Type[str], + Type[memoryview], + Type[bool_], + Type[csingle], + Type[cdouble], + Type[clongdouble], + Type[half], + Type[single], + Type[double], + Type[longdouble], + Type[byte], + Type[short], + Type[intc], + Type[int_], + Type[longlong], + Type[timedelta64], + Type[datetime64], + Type[object_], + Type[bytes_], + Type[str_], + Type[ubyte], + Type[ushort], + Type[uintc], + Type[uint], + Type[ulonglong], + Type[void], +] diff --git a/MLPY/Lib/site-packages/numpy/core/overrides.py b/MLPY/Lib/site-packages/numpy/core/overrides.py new file mode 100644 index 0000000000000000000000000000000000000000..643c41e697883978825c20b259f9731069650084 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/overrides.py @@ -0,0 +1,227 @@ +"""Implementation of __array_function__ overrides from NEP-18.""" +import collections +import functools +import os +import textwrap + +from numpy.core._multiarray_umath import ( + add_docstring, implement_array_function, _get_implementing_args) +from numpy.compat._inspect import getargspec + + +ARRAY_FUNCTION_ENABLED = bool( + int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1))) + +array_function_like_doc = ( + """like : array_like + Reference object to allow the creation of arrays which are not + NumPy arrays. If an array-like passed in as ``like`` supports + the ``__array_function__`` protocol, the result will be defined + by it. In this case, it ensures the creation of an array object + compatible with that passed in via this argument.""" +) + +def set_array_function_like_doc(public_api): + if public_api.__doc__ is not None: + public_api.__doc__ = public_api.__doc__.replace( + "${ARRAY_FUNCTION_LIKE}", + array_function_like_doc, + ) + return public_api + + +add_docstring( + implement_array_function, + """ + Implement a function with checks for __array_function__ overrides. + + All arguments are required, and can only be passed by position. + + Parameters + ---------- + implementation : function + Function that implements the operation on NumPy array without + overrides when called like ``implementation(*args, **kwargs)``. + public_api : function + Function exposed by NumPy's public API originally called like + ``public_api(*args, **kwargs)`` on which arguments are now being + checked. + relevant_args : iterable + Iterable of arguments to check for __array_function__ methods. + args : tuple + Arbitrary positional arguments originally passed into ``public_api``. + kwargs : dict + Arbitrary keyword arguments originally passed into ``public_api``. + + Returns + ------- + Result from calling ``implementation()`` or an ``__array_function__`` + method, as appropriate. + + Raises + ------ + TypeError : if no implementation is found. + """) + + +# exposed for testing purposes; used internally by implement_array_function +add_docstring( + _get_implementing_args, + """ + Collect arguments on which to call __array_function__. + + Parameters + ---------- + relevant_args : iterable of array-like + Iterable of possibly array-like arguments to check for + __array_function__ methods. + + Returns + ------- + Sequence of arguments with __array_function__ methods, in the order in + which they should be called. + """) + + +ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults') + + +def verify_matching_signatures(implementation, dispatcher): + """Verify that a dispatcher function has the right signature.""" + implementation_spec = ArgSpec(*getargspec(implementation)) + dispatcher_spec = ArgSpec(*getargspec(dispatcher)) + + if (implementation_spec.args != dispatcher_spec.args or + implementation_spec.varargs != dispatcher_spec.varargs or + implementation_spec.keywords != dispatcher_spec.keywords or + (bool(implementation_spec.defaults) != + bool(dispatcher_spec.defaults)) or + (implementation_spec.defaults is not None and + len(implementation_spec.defaults) != + len(dispatcher_spec.defaults))): + raise RuntimeError('implementation and dispatcher for %s have ' + 'different function signatures' % implementation) + + if implementation_spec.defaults is not None: + if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): + raise RuntimeError('dispatcher functions can only use None for ' + 'default argument values') + + +def set_module(module): + """Decorator for overriding __module__ on a function or class. + + Example usage:: + + @set_module('numpy') + def example(): + pass + + assert example.__module__ == 'numpy' + """ + def decorator(func): + if module is not None: + func.__module__ = module + return func + return decorator + + + +# Call textwrap.dedent here instead of in the function so as to avoid +# calling dedent multiple times on the same text +_wrapped_func_source = textwrap.dedent(""" + @functools.wraps(implementation) + def {name}(*args, **kwargs): + relevant_args = dispatcher(*args, **kwargs) + return implement_array_function( + implementation, {name}, relevant_args, args, kwargs) + """) + + +def array_function_dispatch(dispatcher, module=None, verify=True, + docs_from_dispatcher=False): + """Decorator for adding dispatch with the __array_function__ protocol. + + See NEP-18 for example usage. + + Parameters + ---------- + dispatcher : callable + Function that when called like ``dispatcher(*args, **kwargs)`` with + arguments from the NumPy function call returns an iterable of + array-like arguments to check for ``__array_function__``. + module : str, optional + __module__ attribute to set on new function, e.g., ``module='numpy'``. + By default, module is copied from the decorated function. + verify : bool, optional + If True, verify the that the signature of the dispatcher and decorated + function signatures match exactly: all required and optional arguments + should appear in order with the same names, but the default values for + all optional arguments should be ``None``. Only disable verification + if the dispatcher's signature needs to deviate for some particular + reason, e.g., because the function has a signature like + ``func(*args, **kwargs)``. + docs_from_dispatcher : bool, optional + If True, copy docs from the dispatcher function onto the dispatched + function, rather than from the implementation. This is useful for + functions defined in C, which otherwise don't have docstrings. + + Returns + ------- + Function suitable for decorating the implementation of a NumPy function. + """ + + if not ARRAY_FUNCTION_ENABLED: + def decorator(implementation): + if docs_from_dispatcher: + add_docstring(implementation, dispatcher.__doc__) + if module is not None: + implementation.__module__ = module + return implementation + return decorator + + def decorator(implementation): + if verify: + verify_matching_signatures(implementation, dispatcher) + + if docs_from_dispatcher: + add_docstring(implementation, dispatcher.__doc__) + + # Equivalently, we could define this function directly instead of using + # exec. This version has the advantage of giving the helper function a + # more interpettable name. Otherwise, the original function does not + # show up at all in many cases, e.g., if it's written in C or if the + # dispatcher gets an invalid keyword argument. + source = _wrapped_func_source.format(name=implementation.__name__) + + source_object = compile( + source, filename='<__array_function__ internals>', mode='exec') + scope = { + 'implementation': implementation, + 'dispatcher': dispatcher, + 'functools': functools, + 'implement_array_function': implement_array_function, + } + exec(source_object, scope) + + public_api = scope[implementation.__name__] + + if module is not None: + public_api.__module__ = module + + public_api._implementation = implementation + + return public_api + + return decorator + + +def array_function_from_dispatcher( + implementation, module=None, verify=True, docs_from_dispatcher=True): + """Like array_function_dispatcher, but with function arguments flipped.""" + + def decorator(dispatcher): + return array_function_dispatch( + dispatcher, module, verify=verify, + docs_from_dispatcher=docs_from_dispatcher)(implementation) + return decorator diff --git a/MLPY/Lib/site-packages/numpy/core/records.py b/MLPY/Lib/site-packages/numpy/core/records.py new file mode 100644 index 0000000000000000000000000000000000000000..3a150e2d48f31e841ceb09dfb7622d8e4528971c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/records.py @@ -0,0 +1,1092 @@ +""" +Record Arrays +============= +Record arrays expose the fields of structured arrays as properties. + +Most commonly, ndarrays contain elements of a single type, e.g. floats, +integers, bools etc. However, it is possible for elements to be combinations +of these using structured types, such as:: + + >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)]) + >>> a + array([(1, 2.), (1, 2.)], dtype=[('x', '>> a['x'] + array([1, 1]) + + >>> a['y'] + array([2., 2.]) + +Record arrays allow us to access fields as properties:: + + >>> ar = np.rec.array(a) + + >>> ar.x + array([1, 1]) + + >>> ar.y + array([2., 2.]) + +""" +import os +import warnings +from collections import Counter +from contextlib import nullcontext + +from . import numeric as sb +from . import numerictypes as nt +from numpy.compat import os_fspath +from numpy.core.overrides import set_module +from .arrayprint import get_printoptions + +# All of the functions allow formats to be a dtype +__all__ = [ + 'record', 'recarray', 'format_parser', + 'fromarrays', 'fromrecords', 'fromstring', 'fromfile', 'array', +] + + +ndarray = sb.ndarray + +_byteorderconv = {'b':'>', + 'l':'<', + 'n':'=', + 'B':'>', + 'L':'<', + 'N':'=', + 'S':'s', + 's':'s', + '>':'>', + '<':'<', + '=':'=', + '|':'|', + 'I':'|', + 'i':'|'} + +# formats regular expression +# allows multidimension spec with a tuple syntax in front +# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' +# are equally allowed + +numfmt = nt.sctypeDict + + +def find_duplicate(list): + """Find duplication in a list, return a list of duplicated elements""" + return [ + item + for item, counts in Counter(list).items() + if counts > 1 + ] + + +@set_module('numpy') +class format_parser: + """ + Class to convert formats, names, titles description to a dtype. + + After constructing the format_parser object, the dtype attribute is + the converted data-type: + ``dtype = format_parser(formats, names, titles).dtype`` + + Attributes + ---------- + dtype : dtype + The converted data-type. + + Parameters + ---------- + formats : str or list of str + The format description, either specified as a string with + comma-separated format descriptions in the form ``'f8, i4, a5'``, or + a list of format description strings in the form + ``['f8', 'i4', 'a5']``. + names : str or list/tuple of str + The field names, either specified as a comma-separated string in the + form ``'col1, col2, col3'``, or as a list or tuple of strings in the + form ``['col1', 'col2', 'col3']``. + An empty list can be used, in that case default field names + ('f0', 'f1', ...) are used. + titles : sequence + Sequence of title strings. An empty list can be used to leave titles + out. + aligned : bool, optional + If True, align the fields by padding as the C-compiler would. + Default is False. + byteorder : str, optional + If specified, all the fields will be changed to the + provided byte-order. Otherwise, the default byte-order is + used. For all available string specifiers, see `dtype.newbyteorder`. + + See Also + -------- + dtype, typename, sctype2char + + Examples + -------- + >>> np.format_parser(['>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + ... []).dtype + dtype([('col1', '>> np.format_parser([' len(titles): + self._titles += [None] * (self._nfields - len(titles)) + + def _createdtype(self, byteorder): + dtype = sb.dtype({ + 'names': self._names, + 'formats': self._f_formats, + 'offsets': self._offsets, + 'titles': self._titles, + }) + if byteorder is not None: + byteorder = _byteorderconv[byteorder[0]] + dtype = dtype.newbyteorder(byteorder) + + self.dtype = dtype + + +class record(nt.void): + """A data-type scalar that allows field access as attribute lookup. + """ + + # manually set name and module so that this class's type shows up + # as numpy.record when printed + __name__ = 'record' + __module__ = 'numpy' + + def __repr__(self): + if get_printoptions()['legacy'] == '1.13': + return self.__str__() + return super().__repr__() + + def __str__(self): + if get_printoptions()['legacy'] == '1.13': + return str(self.item()) + return super().__str__() + + def __getattribute__(self, attr): + if attr in ('setfield', 'getfield', 'dtype'): + return nt.void.__getattribute__(self, attr) + try: + return nt.void.__getattribute__(self, attr) + except AttributeError: + pass + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + obj = self.getfield(*res[:2]) + # if it has fields return a record, + # otherwise return the object + try: + dt = obj.dtype + except AttributeError: + #happens if field is Object type + return obj + if dt.names is not None: + return obj.view((self.__class__, obj.dtype)) + return obj + else: + raise AttributeError("'record' object has no " + "attribute '%s'" % attr) + + def __setattr__(self, attr, val): + if attr in ('setfield', 'getfield', 'dtype'): + raise AttributeError("Cannot set '%s' attribute" % attr) + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + return self.setfield(val, *res[:2]) + else: + if getattr(self, attr, None): + return nt.void.__setattr__(self, attr, val) + else: + raise AttributeError("'record' object has no " + "attribute '%s'" % attr) + + def __getitem__(self, indx): + obj = nt.void.__getitem__(self, indx) + + # copy behavior of record.__getattribute__, + if isinstance(obj, nt.void) and obj.dtype.names is not None: + return obj.view((self.__class__, obj.dtype)) + else: + # return a single element + return obj + + def pprint(self): + """Pretty-print all fields.""" + # pretty-print all fields + names = self.dtype.names + maxlen = max(len(name) for name in names) + fmt = '%% %ds: %%s' % maxlen + rows = [fmt % (name, getattr(self, name)) for name in names] + return "\n".join(rows) + +# The recarray is almost identical to a standard array (which supports +# named fields already) The biggest difference is that it can use +# attribute-lookup to find the fields and it is constructed using +# a record. + +# If byteorder is given it forces a particular byteorder on all +# the fields (and any subfields) + +class recarray(ndarray): + """Construct an ndarray that allows field access using attributes. + + Arrays may have a data-types containing fields, analogous + to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, + where each entry in the array is a pair of ``(int, float)``. Normally, + these attributes are accessed using dictionary lookups such as ``arr['x']`` + and ``arr['y']``. Record arrays allow the fields to be accessed as members + of the array, using ``arr.x`` and ``arr.y``. + + Parameters + ---------- + shape : tuple + Shape of output array. + dtype : data-type, optional + The desired data-type. By default, the data-type is determined + from `formats`, `names`, `titles`, `aligned` and `byteorder`. + formats : list of data-types, optional + A list containing the data-types for the different columns, e.g. + ``['i4', 'f8', 'i4']``. `formats` does *not* support the new + convention of using types directly, i.e. ``(int, float, int)``. + Note that `formats` must be a list, not a tuple. + Given that `formats` is somewhat limited, we recommend specifying + `dtype` instead. + names : tuple of str, optional + The name of each column, e.g. ``('x', 'y', 'z')``. + buf : buffer, optional + By default, a new array is created of the given shape and data-type. + If `buf` is specified and is an object exposing the buffer interface, + the array will use the memory from the existing buffer. In this case, + the `offset` and `strides` keywords are available. + + Other Parameters + ---------------- + titles : tuple of str, optional + Aliases for column names. For example, if `names` were + ``('x', 'y', 'z')`` and `titles` is + ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then + ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. + byteorder : {'<', '>', '='}, optional + Byte-order for all fields. + aligned : bool, optional + Align the fields in memory as the C-compiler would. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + offset : int, optional + Start reading buffer (`buf`) from this offset onwards. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Returns + ------- + rec : recarray + Empty array of the given shape and type. + + See Also + -------- + core.records.fromrecords : Construct a record array from data. + record : fundamental data-type for `recarray`. + format_parser : determine a data-type from formats, names, titles. + + Notes + ----- + This constructor can be compared to ``empty``: it creates a new record + array but does not fill it with data. To create a record array from data, + use one of the following methods: + + 1. Create a standard ndarray and convert it to a record array, + using ``arr.view(np.recarray)`` + 2. Use the `buf` keyword. + 3. Use `np.rec.fromrecords`. + + Examples + -------- + Create an array with two fields, ``x`` and ``y``: + + >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x + array([(1., 2), (3., 4)], dtype=[('x', '>> x['x'] + array([1., 3.]) + + View the array as a record array: + + >>> x = x.view(np.recarray) + + >>> x.x + array([1., 3.]) + + >>> x.y + array([2, 4]) + + Create a new, empty record array: + + >>> np.recarray((2,), + ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP + rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), + (3471280, 1.2134086255804012e-316, 0)], + dtype=[('x', ' 0 or self.shape == (0,): + lst = sb.array2string( + self, separator=', ', prefix=prefix, suffix=',') + else: + # show zero-length shape unless it is (0,) + lst = "[], shape=%s" % (repr(self.shape),) + + lf = '\n'+' '*len(prefix) + if get_printoptions()['legacy'] == '1.13': + lf = ' ' + lf # trailing space + return fmt % (lst, lf, repr_dtype) + + def field(self, attr, val=None): + if isinstance(attr, int): + names = ndarray.__getattribute__(self, 'dtype').names + attr = names[attr] + + fielddict = ndarray.__getattribute__(self, 'dtype').fields + + res = fielddict[attr][:2] + + if val is None: + obj = self.getfield(*res) + if obj.dtype.names is not None: + return obj + return obj.view(ndarray) + else: + return self.setfield(val, *res) + + +def _deprecate_shape_0_as_None(shape): + if shape == 0: + warnings.warn( + "Passing `shape=0` to have the shape be inferred is deprecated, " + "and in future will be equivalent to `shape=(0,)`. To infer " + "the shape and suppress this warning, pass `shape=None` instead.", + FutureWarning, stacklevel=3) + return None + else: + return shape + + +def fromarrays(arrayList, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create a record array from a (flat) list of arrays + + Parameters + ---------- + arrayList : list or tuple + List of array-like objects (such as lists, tuples, + and ndarrays). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of the resulting array. If not provided, inferred from + ``arrayList[0]``. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + Returns + ------- + np.recarray + Record array consisting of given arrayList columns. + + Examples + -------- + >>> x1=np.array([1,2,3,4]) + >>> x2=np.array(['a','dd','xyz','12']) + >>> x3=np.array([1.1,2,3,4]) + >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') + >>> print(r[1]) + (2, 'dd', 2.0) # may vary + >>> x1[1]=34 + >>> r.a + array([1, 2, 3, 4]) + + >>> x1 = np.array([1, 2, 3, 4]) + >>> x2 = np.array(['a', 'dd', 'xyz', '12']) + >>> x3 = np.array([1.1, 2, 3,4]) + >>> r = np.core.records.fromarrays( + ... [x1, x2, x3], + ... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)])) + >>> r + rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ), + (4, b'12', 4. )], + dtype=[('a', ' 0: + shape = shape[:-nn] + + for k, obj in enumerate(arrayList): + nn = descr[k].ndim + testshape = obj.shape[:obj.ndim - nn] + if testshape != shape: + raise ValueError("array-shape mismatch in array %d" % k) + + _array = recarray(shape, descr) + + # populate the record array (makes a copy) + for i in range(len(arrayList)): + _array[_names[i]] = arrayList[i] + + return _array + +def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None): + """Create a recarray from a list of records in text form. + + Parameters + ---------- + recList : sequence + data in the same field may be heterogeneous - they will be promoted + to the highest data type. + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + If both `formats` and `dtype` are None, then this will auto-detect + formats. Use list of tuples rather than list of lists for faster + processing. + + Returns + ------- + np.recarray + record array consisting of given recList rows. + + Examples + -------- + >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], + ... names='col1,col2,col3') + >>> print(r[0]) + (456, 'dbe', 1.2) + >>> r.col1 + array([456, 2]) + >>> r.col2 + array(['dbe', 'de'], dtype='>> import pickle + >>> pickle.loads(pickle.dumps(r)) + rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)], + dtype=[('col1', ' 1: + raise ValueError("Can only deal with 1-d array.") + _array = recarray(shape, descr) + for k in range(_array.size): + _array[k] = tuple(recList[k]) + # list of lists instead of list of tuples ? + # 2018-02-07, 1.14.1 + warnings.warn( + "fromrecords expected a list of tuples, may have received a list " + "of lists instead. In the future that will raise an error", + FutureWarning, stacklevel=2) + return _array + else: + if shape is not None and retval.shape != shape: + retval.shape = shape + + res = retval.view(recarray) + + return res + + +def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + r"""Create a record array from binary data + + Note that despite the name of this function it does not accept `str` + instances. + + Parameters + ---------- + datastring : bytes-like + Buffer of binary data + dtype : data-type, optional + Valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the buffer to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + + Returns + ------- + np.recarray + Record array view into the data in datastring. This will be readonly + if `datastring` is readonly. + + See Also + -------- + numpy.frombuffer + + Examples + -------- + >>> a = b'\x01\x02\x03abc' + >>> np.core.records.fromstring(a, dtype='u1,u1,u1,S3') + rec.array([(1, 2, 3, b'abc')], + dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')]) + + >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64), + ... ('GradeLevel', np.int32)] + >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), + ... ('Aadi', 66.6, 6)], dtype=grades_dtype) + >>> np.core.records.fromstring(grades_array.tobytes(), dtype=grades_dtype) + rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)], + dtype=[('Name', '>> s = '\x01\x02\x03abc' + >>> np.core.records.fromstring(s, dtype='u1,u1,u1,S3') + Traceback (most recent call last) + ... + TypeError: a bytes-like object is required, not 'str' + """ + + if dtype is None and formats is None: + raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + + itemsize = descr.itemsize + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape in (None, -1): + shape = (len(datastring) - offset) // itemsize + + _array = recarray(shape, descr, buf=datastring, offset=offset) + return _array + +def get_remaining_size(fd): + pos = fd.tell() + try: + fd.seek(0, 2) + return fd.tell() - pos + finally: + fd.seek(pos, 0) + +def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create an array from binary file data + + Parameters + ---------- + fd : str or file type + If file is a string or a path-like object then that file is opened, + else it is assumed to be a file object. The file object must + support random access (i.e. it must have tell and seek methods). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + offset : int, optional + Position in the file to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation + + Returns + ------- + np.recarray + record array consisting of data enclosed in file. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a[5] = (0.5,10,'abcde') + >>> + >>> fd=TemporaryFile() + >>> a = a.newbyteorder('<') + >>> a.tofile(fd) + >>> + >>> _ = fd.seek(0) + >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, + ... byteorder='<') + >>> print(r[5]) + (0.5, 10, 'abcde') + >>> r.shape + (10,) + """ + + if dtype is None and formats is None: + raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape is None: + shape = (-1,) + elif isinstance(shape, int): + shape = (shape,) + + if hasattr(fd, 'readinto'): + # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase interface. + # Example of fd: gzip, BytesIO, BufferedReader + # file already opened + ctx = nullcontext(fd) + else: + # open file + ctx = open(os_fspath(fd), 'rb') + + with ctx as fd: + if offset > 0: + fd.seek(offset, 1) + size = get_remaining_size(fd) + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + + itemsize = descr.itemsize + + shapeprod = sb.array(shape).prod(dtype=nt.intp) + shapesize = shapeprod * itemsize + if shapesize < 0: + shape = list(shape) + shape[shape.index(-1)] = size // -shapesize + shape = tuple(shape) + shapeprod = sb.array(shape).prod(dtype=nt.intp) + + nbytes = shapeprod * itemsize + + if nbytes > size: + raise ValueError( + "Not enough bytes left in file for specified shape and type") + + # create the array + _array = recarray(shape, descr) + nbytesread = fd.readinto(_array.data) + if nbytesread != nbytes: + raise IOError("Didn't read as many bytes as expected") + + return _array + +def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, copy=True): + """ + Construct a record array from a wide-variety of objects. + + A general-purpose record array constructor that dispatches to the + appropriate `recarray` creation function based on the inputs (see Notes). + + Parameters + ---------- + obj : any + Input object. See Notes for details on how various input types are + treated. + dtype : data-type, optional + Valid dtype for array. + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the file or buffer to start reading from. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + copy : bool, optional + Whether to copy the input object (True), or to use a reference instead. + This option only applies when the input is an ndarray or recarray. + Defaults to True. + + Returns + ------- + np.recarray + Record array created from the specified object. + + Notes + ----- + If `obj` is ``None``, then call the `~numpy.recarray` constructor. If + `obj` is a string, then call the `fromstring` constructor. If `obj` is a + list or a tuple, then if the first object is an `~numpy.ndarray`, call + `fromarrays`, otherwise call `fromrecords`. If `obj` is a + `~numpy.recarray`, then make a copy of the data in the recarray + (if ``copy=True``) and use the new formats, names, and titles. If `obj` + is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then + return ``obj.view(recarray)``, making a copy of the data if ``copy=True``. + + Examples + -------- + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> np.core.records.array(a) + rec.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], + dtype=int32) + + >>> b = [(1, 1), (2, 4), (3, 9)] + >>> c = np.core.records.array(b, formats = ['i2', 'f2'], names = ('x', 'y')) + >>> c + rec.array([(1, 1.0), (2, 4.0), (3, 9.0)], + dtype=[('x', '>> c.x + rec.array([1, 2, 3], dtype=int16) + + >>> c.y + rec.array([ 1.0, 4.0, 9.0], dtype=float16) + + >>> r = np.rec.array(['abc','def'], names=['col1','col2']) + >>> print(r.col1) + abc + + >>> r.col1 + array('abc', dtype='>> r.col2 + array('def', dtype=' sizeof(double) + if a == "Intel" or a == "AMD64": + deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') + +def check_math_capabilities(config, ext, moredefs, mathlibs): + def check_func(func_name): + return config.check_func(func_name, libraries=mathlibs, + decl=True, call=True) + + def check_funcs_once(funcs_name): + decl = dict([(f, True) for f in funcs_name]) + st = config.check_funcs_once(funcs_name, libraries=mathlibs, + decl=decl, call=decl) + if st: + moredefs.extend([(fname2def(f), 1) for f in funcs_name]) + return st + + def check_funcs(funcs_name): + # Use check_funcs_once first, and if it does not work, test func per + # func. Return success only if all the functions are available + if not check_funcs_once(funcs_name): + # Global check failed, check func per func + for f in funcs_name: + if check_func(f): + moredefs.append((fname2def(f), 1)) + return 0 + else: + return 1 + + #use_msvc = config.check_decl("_MSC_VER") + + if not check_funcs_once(MANDATORY_FUNCS): + raise SystemError("One of the required function to build numpy is not" + " available (the list is %s)." % str(MANDATORY_FUNCS)) + + # Standard functions which may not be available and for which we have a + # replacement implementation. Note that some of these are C99 functions. + + # XXX: hack to circumvent cpp pollution from python: python put its + # config.h in the public namespace, so we have a clash for the common + # functions we test. We remove every function tested by python's + # autoconf, hoping their own test are correct + for f in OPTIONAL_STDFUNCS_MAYBE: + if config.check_decl(fname2def(f), + headers=["Python.h", "math.h"]): + OPTIONAL_STDFUNCS.remove(f) + + check_funcs(OPTIONAL_STDFUNCS) + + for h in OPTIONAL_HEADERS: + if config.check_func("", decl=False, call=False, headers=[h]): + h = h.replace(".", "_").replace(os.path.sep, "_") + moredefs.append((fname2def(h), 1)) + + for tup in OPTIONAL_INTRINSICS: + headers = None + if len(tup) == 2: + f, args, m = tup[0], tup[1], fname2def(tup[0]) + elif len(tup) == 3: + f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0]) + else: + f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3]) + if config.check_func(f, decl=False, call=True, call_args=args, + headers=headers): + moredefs.append((m, 1)) + + for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES: + if config.check_gcc_function_attribute(dec, fn): + moredefs.append((fname2def(fn), 1)) + if fn == 'attribute_target_avx512f': + # GH-14787: Work around GCC<8.4 bug when compiling with AVX512 + # support on Windows-based platforms + if (sys.platform in ('win32', 'cygwin') and + config.check_compiler_gcc() and + not config.check_gcc_version_at_least(8, 4)): + ext.extra_compile_args.extend( + ['-ffixed-xmm%s' % n for n in range(16, 32)]) + + for dec, fn, code, header in OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS: + if config.check_gcc_function_attribute_with_intrinsics(dec, fn, code, + header): + moredefs.append((fname2def(fn), 1)) + + for fn in OPTIONAL_VARIABLE_ATTRIBUTES: + if config.check_gcc_variable_attribute(fn): + m = fn.replace("(", "_").replace(")", "_") + moredefs.append((fname2def(m), 1)) + + # C99 functions: float and long double versions + check_funcs(C99_FUNCS_SINGLE) + check_funcs(C99_FUNCS_EXTENDED) + +def check_complex(config, mathlibs): + priv = [] + pub = [] + + try: + if os.uname()[0] == "Interix": + warnings.warn("Disabling broken complex support. See #1365", stacklevel=2) + return priv, pub + except Exception: + # os.uname not available on all platforms. blanket except ugly but safe + pass + + # Check for complex support + st = config.check_header('complex.h') + if st: + priv.append(('HAVE_COMPLEX_H', 1)) + pub.append(('NPY_USE_C99_COMPLEX', 1)) + + for t in C99_COMPLEX_TYPES: + st = config.check_type(t, headers=["complex.h"]) + if st: + pub.append(('NPY_HAVE_%s' % type2def(t), 1)) + + def check_prec(prec): + flist = [f + prec for f in C99_COMPLEX_FUNCS] + decl = dict([(f, True) for f in flist]) + if not config.check_funcs_once(flist, call=decl, decl=decl, + libraries=mathlibs): + for f in flist: + if config.check_func(f, call=True, decl=True, + libraries=mathlibs): + priv.append((fname2def(f), 1)) + else: + priv.extend([(fname2def(f), 1) for f in flist]) + + check_prec('') + check_prec('f') + check_prec('l') + + return priv, pub + +def check_ieee_macros(config): + priv = [] + pub = [] + + macros = [] + + def _add_decl(f): + priv.append(fname2def("decl_%s" % f)) + pub.append('NPY_%s' % fname2def("decl_%s" % f)) + + # XXX: hack to circumvent cpp pollution from python: python put its + # config.h in the public namespace, so we have a clash for the common + # functions we test. We remove every function tested by python's + # autoconf, hoping their own test are correct + _macros = ["isnan", "isinf", "signbit", "isfinite"] + for f in _macros: + py_symbol = fname2def("decl_%s" % f) + already_declared = config.check_decl(py_symbol, + headers=["Python.h", "math.h"]) + if already_declared: + if config.check_macro_true(py_symbol, + headers=["Python.h", "math.h"]): + pub.append('NPY_%s' % fname2def("decl_%s" % f)) + else: + macros.append(f) + # Normally, isnan and isinf are macro (C99), but some platforms only have + # func, or both func and macro version. Check for macro only, and define + # replacement ones if not found. + # Note: including Python.h is necessary because it modifies some math.h + # definitions + for f in macros: + st = config.check_decl(f, headers=["Python.h", "math.h"]) + if st: + _add_decl(f) + + return priv, pub + +def check_types(config_cmd, ext, build_dir): + private_defines = [] + public_defines = [] + + # Expected size (in number of bytes) for each type. This is an + # optimization: those are only hints, and an exhaustive search for the size + # is done if the hints are wrong. + expected = {'short': [2], 'int': [4], 'long': [8, 4], + 'float': [4], 'double': [8], 'long double': [16, 12, 8], + 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8], + 'off_t': [8, 4]} + + # Check we have the python header (-dev* packages on Linux) + result = config_cmd.check_header('Python.h') + if not result: + python = 'python' + if '__pypy__' in sys.builtin_module_names: + python = 'pypy' + raise SystemError( + "Cannot compile 'Python.h'. Perhaps you need to " + "install {0}-dev|{0}-devel.".format(python)) + res = config_cmd.check_header("endian.h") + if res: + private_defines.append(('HAVE_ENDIAN_H', 1)) + public_defines.append(('NPY_HAVE_ENDIAN_H', 1)) + res = config_cmd.check_header("sys/endian.h") + if res: + private_defines.append(('HAVE_SYS_ENDIAN_H', 1)) + public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1)) + + # Check basic types sizes + for type in ('short', 'int', 'long'): + res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"]) + if res: + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type))) + else: + res = config_cmd.check_type_size(type, expected=expected[type]) + if res >= 0: + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + for type in ('float', 'double', 'long double'): + already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), + headers=["Python.h"]) + res = config_cmd.check_type_size(type, expected=expected[type]) + if res >= 0: + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) + if not already_declared and not type == 'long double': + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + # Compute size of corresponding complex type: used to check that our + # definition is binary compatible with C99 complex type (check done at + # build time in npy_common.h) + complex_def = "struct {%s __x; %s __y;}" % (type, type) + res = config_cmd.check_type_size(complex_def, + expected=[2 * x for x in expected[type]]) + if res >= 0: + public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % complex_def) + + for type in ('Py_intptr_t', 'off_t'): + res = config_cmd.check_type_size(type, headers=["Python.h"], + library_dirs=[pythonlib_dir()], + expected=expected[type]) + + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + # We check declaration AND type because that's how distutils does it. + if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): + res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], + library_dirs=[pythonlib_dir()], + expected=expected['PY_LONG_LONG']) + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') + + res = config_cmd.check_type_size('long long', + expected=expected['long long']) + if res >= 0: + #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % 'long long') + + if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): + raise RuntimeError( + "Config wo CHAR_BIT is not supported" + ", please contact the maintainers") + + return private_defines, public_defines + +def check_mathlib(config_cmd): + # Testing the C math library + mathlibs = [] + mathlibs_choices = [[], ['m'], ['cpml']] + mathlib = os.environ.get('MATHLIB') + if mathlib: + mathlibs_choices.insert(0, mathlib.split(',')) + for libs in mathlibs_choices: + if config_cmd.check_func("exp", libraries=libs, decl=True, call=True): + mathlibs = libs + break + else: + raise EnvironmentError("math library missing; rerun " + "setup.py after setting the " + "MATHLIB env variable") + return mathlibs + +def visibility_define(config): + """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty + string).""" + hide = '__attribute__((visibility("hidden")))' + if config.check_gcc_function_attribute(hide, 'hideme'): + return hide + else: + return '' + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration, dot_join + from numpy.distutils.system_info import (get_info, blas_opt_info, + lapack_opt_info) + + config = Configuration('core', parent_package, top_path) + local_dir = config.local_path + codegen_dir = join(local_dir, 'code_generators') + + if is_released(config): + warnings.simplefilter('error', MismatchCAPIWarning) + + # Check whether we have a mismatch between the set C API VERSION and the + # actual C API VERSION + check_api_version(C_API_VERSION, codegen_dir) + + generate_umath_py = join(codegen_dir, 'generate_umath.py') + n = dot_join(config.name, 'generate_umath') + generate_umath = npy_load_module('_'.join(n.split('.')), + generate_umath_py, ('.py', 'U', 1)) + + header_dir = 'include/numpy' # this is relative to config.path_in_package + + cocache = CallOnceOnly() + + def generate_config_h(ext, build_dir): + target = join(build_dir, header_dir, 'config.h') + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) + + if newer(__file__, target): + config_cmd = config.get_config_cmd() + log.info('Generating %s', target) + + # Check sizeof + moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir) + + # Check math library and C99 math funcs availability + mathlibs = check_mathlib(config_cmd) + moredefs.append(('MATHLIB', ','.join(mathlibs))) + + check_math_capabilities(config_cmd, ext, moredefs, mathlibs) + moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) + moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0]) + + # Signal check + if is_npy_no_signal(): + moredefs.append('__NPY_PRIVATE_NO_SIGNAL') + + # Windows checks + if sys.platform == 'win32' or os.name == 'nt': + win32_checks(moredefs) + + # C99 restrict keyword + moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict())) + + # Inline check + inline = config_cmd.check_inline() + + # Use relaxed stride checking + if NPY_RELAXED_STRIDES_CHECKING: + moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) + else: + moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 0)) + + # Use bogus stride debug aid when relaxed strides are enabled + if NPY_RELAXED_STRIDES_DEBUG: + moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) + else: + moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 0)) + + # Get long double representation + rep = check_long_double_representation(config_cmd) + moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1)) + + if check_for_right_shift_internal_compiler_error(config_cmd): + moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift') + moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift') + moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift') + moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift') + + # Generate the config.h file from moredefs + with open(target, 'w') as target_f: + for d in moredefs: + if isinstance(d, str): + target_f.write('#define %s\n' % (d)) + else: + target_f.write('#define %s %s\n' % (d[0], d[1])) + + # define inline to our keyword, or nothing + target_f.write('#ifndef __cplusplus\n') + if inline == 'inline': + target_f.write('/* #undef inline */\n') + else: + target_f.write('#define inline %s\n' % inline) + target_f.write('#endif\n') + + # add the guard to make sure config.h is never included directly, + # but always through npy_config.h + target_f.write(textwrap.dedent(""" + #ifndef _NPY_NPY_CONFIG_H_ + #error config.h should never be included directly, include npy_config.h instead + #endif + """)) + + log.info('File: %s' % target) + with open(target) as target_f: + log.info(target_f.read()) + log.info('EOF') + else: + mathlibs = [] + with open(target) as target_f: + for line in target_f: + s = '#define MATHLIB' + if line.startswith(s): + value = line[len(s):].strip() + if value: + mathlibs.extend(value.split(',')) + + # Ugly: this can be called within a library and not an extension, + # in which case there is no libraries attributes (and none is + # needed). + if hasattr(ext, 'libraries'): + ext.libraries.extend(mathlibs) + + incl_dir = os.path.dirname(target) + if incl_dir not in config.numpy_include_dirs: + config.numpy_include_dirs.append(incl_dir) + + return target + + def generate_numpyconfig_h(ext, build_dir): + """Depends on config.h: generate_config_h has to be called before !""" + # put common include directory in build_dir on search path + # allows using code generation in headers + config.add_include_dirs(join(build_dir, "src", "common")) + config.add_include_dirs(join(build_dir, "src", "npymath")) + + target = join(build_dir, header_dir, '_numpyconfig.h') + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) + if newer(__file__, target): + config_cmd = config.get_config_cmd() + log.info('Generating %s', target) + + # Check sizeof + ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir) + + if is_npy_no_signal(): + moredefs.append(('NPY_NO_SIGNAL', 1)) + + if is_npy_no_smp(): + moredefs.append(('NPY_NO_SMP', 1)) + else: + moredefs.append(('NPY_NO_SMP', 0)) + + mathlibs = check_mathlib(config_cmd) + moredefs.extend(cocache.check_ieee_macros(config_cmd)[1]) + moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1]) + + if NPY_RELAXED_STRIDES_CHECKING: + moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1)) + + if NPY_RELAXED_STRIDES_DEBUG: + moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1)) + + # Check whether we can use inttypes (C99) formats + if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']): + moredefs.append(('NPY_USE_C99_FORMATS', 1)) + + # visibility check + hidden_visibility = visibility_define(config_cmd) + moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility)) + + # Add the C API/ABI versions + moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION)) + moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION)) + + # Add moredefs to header + with open(target, 'w') as target_f: + for d in moredefs: + if isinstance(d, str): + target_f.write('#define %s\n' % (d)) + else: + target_f.write('#define %s %s\n' % (d[0], d[1])) + + # Define __STDC_FORMAT_MACROS + target_f.write(textwrap.dedent(""" + #ifndef __STDC_FORMAT_MACROS + #define __STDC_FORMAT_MACROS 1 + #endif + """)) + + # Dump the numpyconfig.h header to stdout + log.info('File: %s' % target) + with open(target) as target_f: + log.info(target_f.read()) + log.info('EOF') + config.add_data_files((header_dir, target)) + return target + + def generate_api_func(module_name): + def generate_api(ext, build_dir): + script = join(codegen_dir, module_name + '.py') + sys.path.insert(0, codegen_dir) + try: + m = __import__(module_name) + log.info('executing %s', script) + h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir)) + finally: + del sys.path[0] + config.add_data_files((header_dir, h_file), + (header_dir, doc_file)) + return (h_file,) + return generate_api + + generate_numpy_api = generate_api_func('generate_numpy_api') + generate_ufunc_api = generate_api_func('generate_ufunc_api') + + config.add_include_dirs(join(local_dir, "src", "common")) + config.add_include_dirs(join(local_dir, "src")) + config.add_include_dirs(join(local_dir)) + + config.add_data_dir('include/numpy') + config.add_include_dirs(join('src', 'npymath')) + config.add_include_dirs(join('src', 'multiarray')) + config.add_include_dirs(join('src', 'umath')) + config.add_include_dirs(join('src', 'npysort')) + config.add_include_dirs(join('src', '_simd')) + + config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process + config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")]) + if sys.platform[:3] == "aix": + config.add_define_macros([("_LARGE_FILES", None)]) + else: + config.add_define_macros([("_FILE_OFFSET_BITS", "64")]) + config.add_define_macros([('_LARGEFILE_SOURCE', '1')]) + config.add_define_macros([('_LARGEFILE64_SOURCE', '1')]) + + config.numpy_include_dirs.extend(config.paths('include')) + + deps = [join('src', 'npymath', '_signbit.c'), + join('include', 'numpy', '*object.h'), + join(codegen_dir, 'genapi.py'), + ] + + ####################################################################### + # npymath library # + ####################################################################### + + subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")]) + + def get_mathlib_info(*args): + # Another ugly hack: the mathlib info is known once build_src is run, + # but we cannot use add_installed_pkg_config here either, so we only + # update the substitution dictionary during npymath build + config_cmd = config.get_config_cmd() + + # Check that the toolchain works, to fail early if it doesn't + # (avoid late errors with MATHLIB which are confusing if the + # compiler does not work). + st = config_cmd.try_link('int main(void) { return 0;}') + if not st: + # rerun the failing command in verbose mode + config_cmd.compiler.verbose = True + config_cmd.try_link('int main(void) { return 0;}') + raise RuntimeError("Broken toolchain: cannot link a simple C program") + mlibs = check_mathlib(config_cmd) + + posix_mlib = ' '.join(['-l%s' % l for l in mlibs]) + msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs]) + subst_dict["posix_mathlib"] = posix_mlib + subst_dict["msvc_mathlib"] = msvc_mlib + + npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'), + join('src', 'npymath', 'npy_math.c'), + join('src', 'npymath', 'ieee754.c.src'), + join('src', 'npymath', 'npy_math_complex.c.src'), + join('src', 'npymath', 'halffloat.c') + ] + + # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977. + # Intel and Clang also don't seem happy with /GL + is_msvc = (platform.platform().startswith('Windows') and + platform.python_compiler().startswith('MS')) + config.add_installed_library('npymath', + sources=npymath_sources + [get_mathlib_info], + install_dir='lib', + build_info={ + 'include_dirs' : [], # empty list required for creating npy_math_internal.h + 'extra_compiler_args' : (['/GL-'] if is_msvc else []), + }) + config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", + subst_dict) + config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", + subst_dict) + + ####################################################################### + # multiarray_tests module # + ####################################################################### + + config.add_extension('_multiarray_tests', + sources=[join('src', 'multiarray', '_multiarray_tests.c.src'), + join('src', 'common', 'mem_overlap.c'), + join('src', 'common', 'npy_argparse.c')], + depends=[join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_argparse.h'), + join('src', 'common', 'npy_extint128.h')], + libraries=['npymath']) + + ####################################################################### + # _multiarray_umath module - common part # + ####################################################################### + + common_deps = [ + join('src', 'common', 'array_assign.h'), + join('src', 'common', 'binop_override.h'), + join('src', 'common', 'cblasfuncs.h'), + join('src', 'common', 'lowlevel_strided_loops.h'), + join('src', 'common', 'mem_overlap.h'), + join('src', 'common', 'npy_argparse.h'), + join('src', 'common', 'npy_cblas.h'), + join('src', 'common', 'npy_config.h'), + join('src', 'common', 'npy_ctypes.h'), + join('src', 'common', 'npy_extint128.h'), + join('src', 'common', 'npy_import.h'), + join('src', 'common', 'npy_longdouble.h'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'common', 'ucsnarrow.h'), + join('src', 'common', 'ufunc_override.h'), + join('src', 'common', 'umathmodule.h'), + join('src', 'common', 'numpyos.h'), + join('src', 'common', 'npy_cpu_dispatch.h'), + join('src', 'common', 'simd', 'simd.h'), + ] + + common_src = [ + join('src', 'common', 'array_assign.c'), + join('src', 'common', 'mem_overlap.c'), + join('src', 'common', 'npy_argparse.c'), + join('src', 'common', 'npy_longdouble.c'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'common', 'ucsnarrow.c'), + join('src', 'common', 'ufunc_override.c'), + join('src', 'common', 'numpyos.c'), + join('src', 'common', 'npy_cpu_features.c.src'), + ] + + if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0": + blas_info = get_info('blas_ilp64_opt', 2) + else: + blas_info = get_info('blas_opt', 0) + + have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []) + + if have_blas: + extra_info = blas_info + # These files are also in MANIFEST.in so that they are always in + # the source distribution independently of HAVE_CBLAS. + common_src.extend([join('src', 'common', 'cblasfuncs.c'), + join('src', 'common', 'python_xerbla.c'), + ]) + else: + extra_info = {} + + ####################################################################### + # _multiarray_umath module - multiarray part # + ####################################################################### + + multiarray_deps = [ + join('src', 'multiarray', 'abstractdtypes.h'), + join('src', 'multiarray', 'arrayobject.h'), + join('src', 'multiarray', 'arraytypes.h'), + join('src', 'multiarray', 'arrayfunction_override.h'), + join('src', 'multiarray', 'array_coercion.h'), + join('src', 'multiarray', 'array_method.h'), + join('src', 'multiarray', 'npy_buffer.h'), + join('src', 'multiarray', 'calculation.h'), + join('src', 'multiarray', 'common.h'), + join('src', 'multiarray', 'common_dtype.h'), + join('src', 'multiarray', 'convert_datatype.h'), + join('src', 'multiarray', 'convert.h'), + join('src', 'multiarray', 'conversion_utils.h'), + join('src', 'multiarray', 'ctors.h'), + join('src', 'multiarray', 'descriptor.h'), + join('src', 'multiarray', 'dtypemeta.h'), + join('src', 'multiarray', 'dtype_transfer.h'), + join('src', 'multiarray', 'dragon4.h'), + join('src', 'multiarray', 'einsum_debug.h'), + join('src', 'multiarray', 'einsum_sumprod.h'), + join('src', 'multiarray', 'getset.h'), + join('src', 'multiarray', 'hashdescr.h'), + join('src', 'multiarray', 'iterators.h'), + join('src', 'multiarray', 'legacy_dtype_implementation.h'), + join('src', 'multiarray', 'mapping.h'), + join('src', 'multiarray', 'methods.h'), + join('src', 'multiarray', 'multiarraymodule.h'), + join('src', 'multiarray', 'nditer_impl.h'), + join('src', 'multiarray', 'number.h'), + join('src', 'multiarray', 'refcount.h'), + join('src', 'multiarray', 'scalartypes.h'), + join('src', 'multiarray', 'sequence.h'), + join('src', 'multiarray', 'shape.h'), + join('src', 'multiarray', 'strfuncs.h'), + join('src', 'multiarray', 'typeinfo.h'), + join('src', 'multiarray', 'usertypes.h'), + join('src', 'multiarray', 'vdot.h'), + join('include', 'numpy', 'arrayobject.h'), + join('include', 'numpy', '_neighborhood_iterator_imp.h'), + join('include', 'numpy', 'npy_endian.h'), + join('include', 'numpy', 'arrayscalars.h'), + join('include', 'numpy', 'noprefix.h'), + join('include', 'numpy', 'npy_interrupt.h'), + join('include', 'numpy', 'npy_3kcompat.h'), + join('include', 'numpy', 'npy_math.h'), + join('include', 'numpy', 'halffloat.h'), + join('include', 'numpy', 'npy_common.h'), + join('include', 'numpy', 'npy_os.h'), + join('include', 'numpy', 'utils.h'), + join('include', 'numpy', 'ndarrayobject.h'), + join('include', 'numpy', 'npy_cpu.h'), + join('include', 'numpy', 'numpyconfig.h'), + join('include', 'numpy', 'ndarraytypes.h'), + join('include', 'numpy', 'npy_1_7_deprecated_api.h'), + # add library sources as distuils does not consider libraries + # dependencies + ] + npymath_sources + + multiarray_src = [ + join('src', 'multiarray', 'abstractdtypes.c'), + join('src', 'multiarray', 'alloc.c'), + join('src', 'multiarray', 'arrayobject.c'), + join('src', 'multiarray', 'arraytypes.c.src'), + join('src', 'multiarray', 'array_coercion.c'), + join('src', 'multiarray', 'array_method.c'), + join('src', 'multiarray', 'array_assign_scalar.c'), + join('src', 'multiarray', 'array_assign_array.c'), + join('src', 'multiarray', 'arrayfunction_override.c'), + join('src', 'multiarray', 'buffer.c'), + join('src', 'multiarray', 'calculation.c'), + join('src', 'multiarray', 'compiled_base.c'), + join('src', 'multiarray', 'common.c'), + join('src', 'multiarray', 'common_dtype.c'), + join('src', 'multiarray', 'convert.c'), + join('src', 'multiarray', 'convert_datatype.c'), + join('src', 'multiarray', 'conversion_utils.c'), + join('src', 'multiarray', 'ctors.c'), + join('src', 'multiarray', 'datetime.c'), + join('src', 'multiarray', 'datetime_strings.c'), + join('src', 'multiarray', 'datetime_busday.c'), + join('src', 'multiarray', 'datetime_busdaycal.c'), + join('src', 'multiarray', 'descriptor.c'), + join('src', 'multiarray', 'dtypemeta.c'), + join('src', 'multiarray', 'dragon4.c'), + join('src', 'multiarray', 'dtype_transfer.c'), + join('src', 'multiarray', 'einsum.c.src'), + join('src', 'multiarray', 'einsum_sumprod.c.src'), + join('src', 'multiarray', 'flagsobject.c'), + join('src', 'multiarray', 'getset.c'), + join('src', 'multiarray', 'hashdescr.c'), + join('src', 'multiarray', 'item_selection.c'), + join('src', 'multiarray', 'iterators.c'), + join('src', 'multiarray', 'legacy_dtype_implementation.c'), + join('src', 'multiarray', 'lowlevel_strided_loops.c.src'), + join('src', 'multiarray', 'mapping.c'), + join('src', 'multiarray', 'methods.c'), + join('src', 'multiarray', 'multiarraymodule.c'), + join('src', 'multiarray', 'nditer_templ.c.src'), + join('src', 'multiarray', 'nditer_api.c'), + join('src', 'multiarray', 'nditer_constr.c'), + join('src', 'multiarray', 'nditer_pywrap.c'), + join('src', 'multiarray', 'number.c'), + join('src', 'multiarray', 'refcount.c'), + join('src', 'multiarray', 'sequence.c'), + join('src', 'multiarray', 'shape.c'), + join('src', 'multiarray', 'scalarapi.c'), + join('src', 'multiarray', 'scalartypes.c.src'), + join('src', 'multiarray', 'strfuncs.c'), + join('src', 'multiarray', 'temp_elide.c'), + join('src', 'multiarray', 'typeinfo.c'), + join('src', 'multiarray', 'usertypes.c'), + join('src', 'multiarray', 'vdot.c'), + join('src', 'common', 'npy_sort.h.src'), + join('src', 'npysort', 'quicksort.c.src'), + join('src', 'npysort', 'mergesort.c.src'), + join('src', 'npysort', 'timsort.c.src'), + join('src', 'npysort', 'heapsort.c.src'), + join('src', 'npysort', 'radixsort.c.src'), + join('src', 'common', 'npy_partition.h.src'), + join('src', 'npysort', 'selection.c.src'), + join('src', 'common', 'npy_binsearch.h.src'), + join('src', 'npysort', 'binsearch.c.src'), + ] + + ####################################################################### + # _multiarray_umath module - umath part # + ####################################################################### + + def generate_umath_c(ext, build_dir): + target = join(build_dir, header_dir, '__umath_generated.c') + dir = os.path.dirname(target) + if not os.path.exists(dir): + os.makedirs(dir) + script = generate_umath_py + if newer(script, target): + with open(target, 'w') as f: + f.write(generate_umath.make_code(generate_umath.defdict, + generate_umath.__file__)) + return [] + + umath_src = [ + join('src', 'umath', 'umathmodule.c'), + join('src', 'umath', 'reduction.c'), + join('src', 'umath', 'funcs.inc.src'), + join('src', 'umath', 'simd.inc.src'), + join('src', 'umath', 'loops.h.src'), + join('src', 'umath', 'loops_utils.h.src'), + join('src', 'umath', 'loops.c.src'), + join('src', 'umath', 'loops_unary_fp.dispatch.c.src'), + join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'), + join('src', 'umath', 'loops_arithmetic.dispatch.c.src'), + join('src', 'umath', 'loops_trigonometric.dispatch.c.src'), + join('src', 'umath', 'loops_exponent_log.dispatch.c.src'), + join('src', 'umath', 'matmul.h.src'), + join('src', 'umath', 'matmul.c.src'), + join('src', 'umath', 'clip.h.src'), + join('src', 'umath', 'clip.c.src'), + join('src', 'umath', 'ufunc_object.c'), + join('src', 'umath', 'extobj.c'), + join('src', 'umath', 'scalarmath.c.src'), + join('src', 'umath', 'ufunc_type_resolution.c'), + join('src', 'umath', 'override.c'), + ] + + umath_deps = [ + generate_umath_py, + join('include', 'numpy', 'npy_math.h'), + join('include', 'numpy', 'halffloat.h'), + join('src', 'multiarray', 'common.h'), + join('src', 'multiarray', 'number.h'), + join('src', 'common', 'templ_common.h.src'), + join('src', 'umath', 'simd.inc.src'), + join('src', 'umath', 'override.h'), + join(codegen_dir, 'generate_ufunc_api.py'), + ] + + config.add_extension('_multiarray_umath', + sources=multiarray_src + umath_src + + common_src + + [generate_config_h, + generate_numpyconfig_h, + generate_numpy_api, + join(codegen_dir, 'generate_numpy_api.py'), + join('*.py'), + generate_umath_c, + generate_ufunc_api, + ], + depends=deps + multiarray_deps + umath_deps + + common_deps, + libraries=['npymath'], + extra_info=extra_info) + + ####################################################################### + # umath_tests module # + ####################################################################### + + config.add_extension('_umath_tests', sources=[ + join('src', 'umath', '_umath_tests.c.src'), + join('src', 'umath', '_umath_tests.dispatch.c'), + join('src', 'common', 'npy_cpu_features.c.src'), + ]) + + ####################################################################### + # custom rational dtype module # + ####################################################################### + + config.add_extension('_rational_tests', + sources=[join('src', 'umath', '_rational_tests.c.src')]) + + ####################################################################### + # struct_ufunc_test module # + ####################################################################### + + config.add_extension('_struct_ufunc_tests', + sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')]) + + + ####################################################################### + # operand_flag_tests module # + ####################################################################### + + config.add_extension('_operand_flag_tests', + sources=[join('src', 'umath', '_operand_flag_tests.c.src')]) + + ####################################################################### + # SIMD module # + ####################################################################### + + config.add_extension('_simd', sources=[ + join('src', 'common', 'npy_cpu_features.c.src'), + join('src', '_simd', '_simd.c'), + join('src', '_simd', '_simd_inc.h.src'), + join('src', '_simd', '_simd_data.inc.src'), + join('src', '_simd', '_simd.dispatch.c.src'), + ], depends=[ + join('src', 'common', 'npy_cpu_dispatch.h'), + join('src', 'common', 'simd', 'simd.h'), + join('src', '_simd', '_simd.h'), + join('src', '_simd', '_simd_inc.h.src'), + join('src', '_simd', '_simd_data.inc.src'), + join('src', '_simd', '_simd_arg.inc'), + join('src', '_simd', '_simd_convert.inc'), + join('src', '_simd', '_simd_easyintrin.inc'), + join('src', '_simd', '_simd_vector.inc'), + ]) + + config.add_subpackage('tests') + config.add_data_dir('tests/data') + config.add_data_dir('tests/examples') + config.add_data_files('*.pyi') + + config.make_svn_version_py() + + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/MLPY/Lib/site-packages/numpy/core/setup_common.py b/MLPY/Lib/site-packages/numpy/core/setup_common.py new file mode 100644 index 0000000000000000000000000000000000000000..15f57c0fbbdc1e14f7e7b6635e6e1492520416df --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/setup_common.py @@ -0,0 +1,451 @@ +# Code common to build tools +import sys +import warnings +import copy +import textwrap + +from numpy.distutils.misc_util import mingw32 + + +#------------------- +# Versioning support +#------------------- +# How to change C_API_VERSION ? +# - increase C_API_VERSION value +# - record the hash for the new C API with the cversions.py script +# and add the hash to cversions.txt +# The hash values are used to remind developers when the C API number was not +# updated - generates a MismatchCAPIWarning warning which is turned into an +# exception for released version. + +# Binary compatibility version number. This number is increased whenever the +# C-API is changed such that binary compatibility is broken, i.e. whenever a +# recompile of extension modules is needed. +C_ABI_VERSION = 0x01000009 + +# Minor API version. This number is increased whenever a change is made to the +# C-API -- whether it breaks binary compatibility or not. Some changes, such +# as adding a function pointer to the end of the function table, can be made +# without breaking binary compatibility. In this case, only the C_API_VERSION +# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is +# broken, both C_API_VERSION and C_ABI_VERSION should be increased. +# +# 0x00000008 - 1.7.x +# 0x00000009 - 1.8.x +# 0x00000009 - 1.9.x +# 0x0000000a - 1.10.x +# 0x0000000a - 1.11.x +# 0x0000000a - 1.12.x +# 0x0000000b - 1.13.x +# 0x0000000c - 1.14.x +# 0x0000000c - 1.15.x +# 0x0000000d - 1.16.x +# 0x0000000d - 1.19.x +# 0x0000000e - 1.20.x +# 0x0000000e - 1.21.x +C_API_VERSION = 0x0000000e + +class MismatchCAPIWarning(Warning): + pass + +def is_released(config): + """Return True if a released version of numpy is detected.""" + from distutils.version import LooseVersion + + v = config.get_version('../_version.py') + if v is None: + raise ValueError("Could not get version") + pv = LooseVersion(vstring=v).version + if len(pv) > 3: + return False + return True + +def get_api_versions(apiversion, codegen_dir): + """ + Return current C API checksum and the recorded checksum. + + Return current C API checksum and the recorded checksum for the given + version of the C API version. + + """ + # Compute the hash of the current API as defined in the .txt files in + # code_generators + sys.path.insert(0, codegen_dir) + try: + m = __import__('genapi') + numpy_api = __import__('numpy_api') + curapi_hash = m.fullapi_hash(numpy_api.full_api) + apis_hash = m.get_versions_hash() + finally: + del sys.path[0] + + return curapi_hash, apis_hash[apiversion] + +def check_api_version(apiversion, codegen_dir): + """Emits a MismatchCAPIWarning if the C API version needs updating.""" + curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) + + # If different hash, it means that the api .txt files in + # codegen_dir have been updated without the API version being + # updated. Any modification in those .txt files should be reflected + # in the api and eventually abi versions. + # To compute the checksum of the current API, use numpy/core/cversions.py + if not curapi_hash == api_hash: + msg = ("API mismatch detected, the C API version " + "numbers have to be updated. Current C api version is %d, " + "with checksum %s, but recorded checksum for C API version %d " + "in core/codegen_dir/cversions.txt is %s. If functions were " + "added in the C API, you have to update C_API_VERSION in %s." + ) + warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, + __file__), + MismatchCAPIWarning, stacklevel=2) +# Mandatory functions: if not found, fail the build +MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", + "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", + "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] + +# Standard functions which may not be available and for which we have a +# replacement implementation. Note that some of these are C99 functions. +OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", + "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", + "copysign", "nextafter", "ftello", "fseeko", + "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate", + "backtrace", "madvise"] + + +OPTIONAL_HEADERS = [ +# sse headers only enabled automatically on amd64/x32 builds + "xmmintrin.h", # SSE + "emmintrin.h", # SSE2 + "immintrin.h", # AVX + "features.h", # for glibc version linux + "xlocale.h", # see GH#8367 + "dlfcn.h", # dladdr + "sys/mman.h", #madvise +] + +# optional gcc compiler builtins and their call arguments and optional a +# required header and definition name (HAVE_ prepended) +# call arguments are required as the compiler will do strict signature checking +OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'), + ("__builtin_isinf", '5.'), + ("__builtin_isfinite", '5.'), + ("__builtin_bswap32", '5u'), + ("__builtin_bswap64", '5u'), + ("__builtin_expect", '5, 0'), + ("__builtin_mul_overflow", '5, 5, (int*)5'), + # MMX only needed for icc, but some clangs don't have it + ("_m_from_int64", '0', "emmintrin.h"), + ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE + ("_mm_prefetch", '(float*)0, _MM_HINT_NTA', + "xmmintrin.h"), # SSE + ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2 + ("__builtin_prefetch", "(float*)0, 0, 3"), + # check that the linker can handle avx + ("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"', + "stdio.h", "LINK_AVX"), + ("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"', + "stdio.h", "LINK_AVX2"), + ("__asm__ volatile", '"vpaddd %zmm1, %zmm2, %zmm3"', + "stdio.h", "LINK_AVX512F"), + ("__asm__ volatile", '"vfpclasspd $0x40, %zmm15, %k6\\n"\ + "vmovdqu8 %xmm0, %xmm1\\n"\ + "vpbroadcastmb2q %k0, %xmm0\\n"', + "stdio.h", "LINK_AVX512_SKX"), + ("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"), + ] + +# function attributes +# tested via "int %s %s(void *);" % (attribute, name) +# function name will be converted to HAVE_ preprocessor macro +OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))', + 'attribute_optimize_unroll_loops'), + ('__attribute__((optimize("O3")))', + 'attribute_optimize_opt_3'), + ('__attribute__((nonnull (1)))', + 'attribute_nonnull'), + ('__attribute__((target ("avx")))', + 'attribute_target_avx'), + ('__attribute__((target ("avx2")))', + 'attribute_target_avx2'), + ('__attribute__((target ("avx512f")))', + 'attribute_target_avx512f'), + ('__attribute__((target ("avx512f,avx512dq,avx512bw,avx512vl,avx512cd")))', + 'attribute_target_avx512_skx'), + ] + +# function attributes with intrinsics +# To ensure your compiler can compile avx intrinsics with just the attributes +# gcc 4.8.4 support attributes but not with intrisics +# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code) +# function name will be converted to HAVE_ preprocessor macro +# The _mm512_castps_si512 instruction is specific check for AVX-512F support +# in gcc-4.9 which is missing a subset of intrinsics. See +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61878 +OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))', + 'attribute_target_avx2_with_intrinsics', + '__m256 temp = _mm256_set1_ps(1.0); temp = \ + _mm256_fmadd_ps(temp, temp, temp)', + 'immintrin.h'), + ('__attribute__((target("avx512f")))', + 'attribute_target_avx512f_with_intrinsics', + '__m512i temp = _mm512_castps_si512(_mm512_set1_ps(1.0))', + 'immintrin.h'), + ('__attribute__((target ("avx512f,avx512dq,avx512bw,avx512vl,avx512cd")))', + 'attribute_target_avx512_skx_with_intrinsics', + '__mmask8 temp = _mm512_fpclass_pd_mask(_mm512_set1_pd(1.0), 0x01);\ + __m512i unused_temp = \ + _mm512_castps_si512(_mm512_set1_ps(1.0));\ + _mm_mask_storeu_epi8(NULL, 0xFF, _mm_broadcastmb_epi64(temp))', + 'immintrin.h'), + ] + +# variable attributes tested via "int %s a" % attribute +OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"] + +# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h +OPTIONAL_STDFUNCS_MAYBE = [ + "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign", + "ftello", "fseeko" + ] + +# C99 functions: float and long double versions +C99_FUNCS = [ + "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", + "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1", + "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2", + "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign", + "nextafter", "cbrt" + ] +C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] +C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] +C99_COMPLEX_TYPES = [ + 'complex double', 'complex float', 'complex long double' + ] +C99_COMPLEX_FUNCS = [ + "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan", + "catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow", + "cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh" + ] + +def fname2def(name): + return "HAVE_%s" % name.upper() + +def sym2def(symbol): + define = symbol.replace(' ', '') + return define.upper() + +def type2def(symbol): + define = symbol.replace(' ', '_') + return define.upper() + +# Code to detect long double representation taken from MPFR m4 macro +def check_long_double_representation(cmd): + cmd._check_compiler() + body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} + + # Disable whole program optimization (the default on vs2015, with python 3.5+) + # which generates intermediary object files and prevents checking the + # float representation. + if sys.platform == "win32" and not mingw32(): + try: + cmd.compiler.compile_options.remove("/GL") + except (AttributeError, ValueError): + pass + + # Disable multi-file interprocedural optimization in the Intel compiler on Linux + # which generates intermediary object files and prevents checking the + # float representation. + elif (sys.platform != "win32" + and cmd.compiler.compiler_type.startswith('intel') + and '-ipo' in cmd.compiler.cc_exe): + newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '') + cmd.compiler.set_executables( + compiler=newcompiler, + compiler_so=newcompiler, + compiler_cxx=newcompiler, + linker_exe=newcompiler, + linker_so=newcompiler + ' -shared' + ) + + # We need to use _compile because we need the object filename + src, obj = cmd._compile(body, None, None, 'c') + try: + ltype = long_double_representation(pyod(obj)) + return ltype + except ValueError: + # try linking to support CC="gcc -flto" or icc -ipo + # struct needs to be volatile so it isn't optimized away + # additionally "clang -flto" requires the foo struct to be used + body = body.replace('struct', 'volatile struct') + body += "int main(void) { return foo.before[0]; }\n" + src, obj = cmd._compile(body, None, None, 'c') + cmd.temp_files.append("_configtest") + cmd.compiler.link_executable([obj], "_configtest") + ltype = long_double_representation(pyod("_configtest")) + return ltype + finally: + cmd._clean() + +LONG_DOUBLE_REPRESENTATION_SRC = r""" +/* "before" is 16 bytes to ensure there's no padding between it and "x". + * We're not expecting any "long double" bigger than 16 bytes or with + * alignment requirements stricter than 16 bytes. */ +typedef %(type)s test_type; + +struct { + char before[16]; + test_type x; + char after[8]; +} foo = { + { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, + -123456789.0, + { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } +}; +""" + +def pyod(filename): + """Python implementation of the od UNIX utility (od -b, more exactly). + + Parameters + ---------- + filename : str + name of the file to get the dump from. + + Returns + ------- + out : seq + list of lines of od output + + Notes + ----- + We only implement enough to get the necessary information for long double + representation, this is not intended as a compatible replacement for od. + """ + out = [] + with open(filename, 'rb') as fid: + yo2 = [oct(o)[2:] for o in fid.read()] + for i in range(0, len(yo2), 16): + line = ['%07d' % int(oct(i)[2:])] + line.extend(['%03d' % int(c) for c in yo2[i:i+16]]) + out.append(" ".join(line)) + return out + + +_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', + '001', '043', '105', '147', '211', '253', '315', '357'] +_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] + +_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] +_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] +_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', + '031', '300', '000', '000'] +_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', + '031', '300', '000', '000', '000', '000', '000', '000'] +_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171', + '242', '240', '000', '000', '000', '000'] +_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', + '000', '000', '000', '000', '000', '000', '000', '000'] +_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] +_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] + + ['000'] * 8) +_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] + + ['000'] * 8) + +def long_double_representation(lines): + """Given a binary dump as given by GNU od -b, look for long double + representation.""" + + # Read contains a list of 32 items, each item is a byte (in octal + # representation, as a string). We 'slide' over the output until read is of + # the form before_seq + content + after_sequence, where content is the long double + # representation: + # - content is 12 bytes: 80 bits Intel representation + # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision + # - content is 8 bytes: same as double (not implemented yet) + read = [''] * 32 + saw = None + for line in lines: + # we skip the first word, as od -b output an index at the beginning of + # each line + for w in line.split()[1:]: + read.pop(0) + read.append(w) + + # If the end of read is equal to the after_sequence, read contains + # the long double + if read[-8:] == _AFTER_SEQ: + saw = copy.copy(read) + # if the content was 12 bytes, we only have 32 - 8 - 12 = 12 + # "before" bytes. In other words the first 4 "before" bytes went + # past the sliding window. + if read[:12] == _BEFORE_SEQ[4:]: + if read[12:-8] == _INTEL_EXTENDED_12B: + return 'INTEL_EXTENDED_12_BYTES_LE' + if read[12:-8] == _MOTOROLA_EXTENDED_12B: + return 'MOTOROLA_EXTENDED_12_BYTES_BE' + # if the content was 16 bytes, we are left with 32-8-16 = 16 + # "before" bytes, so 8 went past the sliding window. + elif read[:8] == _BEFORE_SEQ[8:]: + if read[8:-8] == _INTEL_EXTENDED_16B: + return 'INTEL_EXTENDED_16_BYTES_LE' + elif read[8:-8] == _IEEE_QUAD_PREC_BE: + return 'IEEE_QUAD_BE' + elif read[8:-8] == _IEEE_QUAD_PREC_LE: + return 'IEEE_QUAD_LE' + elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE: + return 'IBM_DOUBLE_DOUBLE_LE' + elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE: + return 'IBM_DOUBLE_DOUBLE_BE' + # if the content was 8 bytes, left with 32-8-8 = 16 bytes + elif read[:16] == _BEFORE_SEQ: + if read[16:-8] == _IEEE_DOUBLE_LE: + return 'IEEE_DOUBLE_LE' + elif read[16:-8] == _IEEE_DOUBLE_BE: + return 'IEEE_DOUBLE_BE' + + if saw is not None: + raise ValueError("Unrecognized format (%s)" % saw) + else: + # We never detected the after_sequence + raise ValueError("Could not lock sequences (%s)" % saw) + + +def check_for_right_shift_internal_compiler_error(cmd): + """ + On our arm CI, this fails with an internal compilation error + + The failure looks like the following, and can be reproduced on ARM64 GCC 5.4: + + : In function 'right_shift': + :4:20: internal compiler error: in expand_shift_1, at expmed.c:2349 + ip1[i] = ip1[i] >> in2; + ^ + Please submit a full bug report, + with preprocessed source if appropriate. + See for instructions. + Compiler returned: 1 + + This function returns True if this compiler bug is present, and we need to + turn off optimization for the function + """ + cmd._check_compiler() + has_optimize = cmd.try_compile(textwrap.dedent("""\ + __attribute__((optimize("O3"))) void right_shift() {} + """), None, None) + if not has_optimize: + return False + + no_err = cmd.try_compile(textwrap.dedent("""\ + typedef long the_type; /* fails also for unsigned and long long */ + __attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) { + for (int i = 0; i < n; i++) { + if (in2 < (the_type)sizeof(the_type) * 8) { + ip1[i] = ip1[i] >> in2; + } + } + } + """), None, None) + return not no_err diff --git a/MLPY/Lib/site-packages/numpy/core/shape_base.py b/MLPY/Lib/site-packages/numpy/core/shape_base.py new file mode 100644 index 0000000000000000000000000000000000000000..9c58ff1fbfc386cee96b206cbda5a9cdaa1b0fdb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/shape_base.py @@ -0,0 +1,900 @@ +__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', + 'stack', 'vstack'] + +import functools +import itertools +import operator +import warnings + +from . import numeric as _nx +from . import overrides +from .multiarray import array, asanyarray, normalize_axis_index +from . import fromnumeric as _from_nx + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _atleast_1d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_1d_dispatcher) +def atleast_1d(*arys): + """ + Convert inputs to arrays with at least one dimension. + + Scalar inputs are converted to 1-dimensional arrays, whilst + higher-dimensional inputs are preserved. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or list of arrays, each with ``a.ndim >= 1``. + Copies are made only if necessary. + + See Also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> np.atleast_1d(1.0) + array([1.]) + + >>> x = np.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + array([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]) + >>> np.atleast_1d(x) is x + True + + >>> np.atleast_1d(1, [3, 4]) + [array([1]), array([3, 4])] + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1) + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def _atleast_2d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_2d_dispatcher) +def atleast_2d(*arys): + """ + View inputs as arrays with at least two dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted + to arrays. Arrays that already have two or more dimensions are + preserved. + + Returns + ------- + res, res2, ... : ndarray + An array, or list of arrays, each with ``a.ndim >= 2``. + Copies are avoided where possible, and views with two or more + dimensions are returned. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> np.atleast_2d(3.0) + array([[3.]]) + + >>> x = np.arange(3.0) + >>> np.atleast_2d(x) + array([[0., 1., 2.]]) + >>> np.atleast_2d(x).base is x + True + + >>> np.atleast_2d(1, [1, 2], [[1, 2]]) + [array([[1]]), array([[1, 2]]), array([[1, 2]])] + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def _atleast_3d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_3d_dispatcher) +def atleast_3d(*arys): + """ + View inputs as arrays with at least three dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted to + arrays. Arrays that already have three or more dimensions are + preserved. + + Returns + ------- + res1, res2, ... : ndarray + An array, or list of arrays, each with ``a.ndim >= 3``. Copies are + avoided where possible, and views with three or more dimensions are + returned. For example, a 1-D array of shape ``(N,)`` becomes a view + of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a + view of shape ``(M, N, 1)``. + + See Also + -------- + atleast_1d, atleast_2d + + Examples + -------- + >>> np.atleast_3d(3.0) + array([[[3.]]]) + + >>> x = np.arange(3.0) + >>> np.atleast_3d(x).shape + (1, 3, 1) + + >>> x = np.arange(12.0).reshape(4,3) + >>> np.atleast_3d(x).shape + (4, 3, 1) + >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself + True + + >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): + ... print(arr, arr.shape) # doctest: +SKIP + ... + [[[1] + [2]]] (1, 2, 1) + [[[1] + [2]]] (1, 2, 1) + [[[1 2]]] (1, 1, 2) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :, _nx.newaxis] + elif ary.ndim == 2: + result = ary[:, :, _nx.newaxis] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return res + + +def _arrays_for_stack_dispatcher(arrays, stacklevel=4): + if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): + warnings.warn('arrays to stack must be passed as a "sequence" type ' + 'such as list or tuple. Support for non-sequence ' + 'iterables such as generators is deprecated as of ' + 'NumPy 1.16 and will raise an error in the future.', + FutureWarning, stacklevel=stacklevel) + return () + return arrays + + +def _vhstack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_vhstack_dispatcher) +def vstack(tup): + """ + Stack arrays in sequence vertically (row wise). + + This is equivalent to concatenation along the first axis after 1-D arrays + of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by + `vsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the first axis. + 1-D arrays must have the same length. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 2-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.vstack((a,b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[4], [5], [6]]) + >>> np.vstack((a,b)) + array([[1], + [2], + [3], + [4], + [5], + [6]]) + + """ + if not overrides.ARRAY_FUNCTION_ENABLED: + # raise warning if necessary + _arrays_for_stack_dispatcher(tup, stacklevel=2) + arrs = atleast_2d(*tup) + if not isinstance(arrs, list): + arrs = [arrs] + return _nx.concatenate(arrs, 0) + + +@array_function_dispatch(_vhstack_dispatcher) +def hstack(tup): + """ + Stack arrays in sequence horizontally (column wise). + + This is equivalent to concatenation along the second axis, except for 1-D + arrays where it concatenates along the first axis. Rebuilds arrays divided + by `hsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the second axis, + except 1-D arrays which can be any length. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + hsplit : Split an array into multiple sub-arrays horizontally (column-wise). + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((4,5,6)) + >>> np.hstack((a,b)) + array([1, 2, 3, 4, 5, 6]) + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[4],[5],[6]]) + >>> np.hstack((a,b)) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + if not overrides.ARRAY_FUNCTION_ENABLED: + # raise warning if necessary + _arrays_for_stack_dispatcher(tup, stacklevel=2) + + arrs = atleast_1d(*tup) + if not isinstance(arrs, list): + arrs = [arrs] + # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" + if arrs and arrs[0].ndim == 1: + return _nx.concatenate(arrs, 0) + else: + return _nx.concatenate(arrs, 1) + + +def _stack_dispatcher(arrays, axis=None, out=None): + arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6) + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_dispatch(_stack_dispatcher) +def stack(arrays, axis=0, out=None): + """ + Join a sequence of arrays along a new axis. + + The ``axis`` parameter specifies the index of the new axis in the + dimensions of the result. For example, if ``axis=0`` it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + arrays : sequence of array_like + Each array must have the same shape. + + axis : int, optional + The axis in the result array along which the input arrays are stacked. + + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what stack would have returned if no + out argument were specified. + + Returns + ------- + stacked : ndarray + The stacked array has one more dimension than the input arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + + Examples + -------- + >>> arrays = [np.random.randn(3, 4) for _ in range(10)] + >>> np.stack(arrays, axis=0).shape + (10, 3, 4) + + >>> np.stack(arrays, axis=1).shape + (3, 10, 4) + + >>> np.stack(arrays, axis=2).shape + (3, 4, 10) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.stack((a, b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.stack((a, b), axis=-1) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + if not overrides.ARRAY_FUNCTION_ENABLED: + # raise warning if necessary + _arrays_for_stack_dispatcher(arrays, stacklevel=2) + + arrays = [asanyarray(arr) for arr in arrays] + if not arrays: + raise ValueError('need at least one array to stack') + + shapes = {arr.shape for arr in arrays} + if len(shapes) != 1: + raise ValueError('all input arrays must have the same shape') + + result_ndim = arrays[0].ndim + 1 + axis = normalize_axis_index(axis, result_ndim) + + sl = (slice(None),) * axis + (_nx.newaxis,) + expanded_arrays = [arr[sl] for arr in arrays] + return _nx.concatenate(expanded_arrays, axis=axis, out=out) + + +# Internal functions to eliminate the overhead of repeated dispatch in one of +# the two possible paths inside np.block. +# Use getattr to protect against __array_function__ being disabled. +_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) +_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) +_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate) + + +def _block_format_index(index): + """ + Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. + """ + idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) + return 'arrays' + idx_str + + +def _block_check_depths_match(arrays, parent_index=[]): + """ + Recursive function checking that the depths of nested lists in `arrays` + all match. Mismatch raises a ValueError as described in the block + docstring below. + + The entire index (rather than just the depth) needs to be calculated + for each innermost list, in case an error needs to be raised, so that + the index of the offending list can be printed as part of the error. + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + parent_index : list of int + The full index of `arrays` within the nested lists passed to + `_block_check_depths_match` at the top of the recursion. + + Returns + ------- + first_index : list of int + The full index of an element from the bottom of the nesting in + `arrays`. If any element at the bottom is an empty list, this will + refer to it, and the last index along the empty axis will be None. + max_arr_ndim : int + The maximum of the ndims of the arrays nested in `arrays`. + final_size: int + The number of elements in the final array. This is used the motivate + the choice of algorithm used using benchmarking wisdom. + + """ + if type(arrays) is tuple: + # not strictly necessary, but saves us from: + # - more than one way to do things - no point treating tuples like + # lists + # - horribly confusing behaviour that results when tuples are + # treated like ndarray + raise TypeError( + '{} is a tuple. ' + 'Only lists can be used to arrange blocks, and np.block does ' + 'not allow implicit conversion from tuple to ndarray.'.format( + _block_format_index(parent_index) + ) + ) + elif type(arrays) is list and len(arrays) > 0: + idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) + for i, arr in enumerate(arrays)) + + first_index, max_arr_ndim, final_size = next(idxs_ndims) + for index, ndim, size in idxs_ndims: + final_size += size + if ndim > max_arr_ndim: + max_arr_ndim = ndim + if len(index) != len(first_index): + raise ValueError( + "List depths are mismatched. First element was at depth " + "{}, but there is an element at depth {} ({})".format( + len(first_index), + len(index), + _block_format_index(index) + ) + ) + # propagate our flag that indicates an empty list at the bottom + if index[-1] is None: + first_index = index + + return first_index, max_arr_ndim, final_size + elif type(arrays) is list and len(arrays) == 0: + # We've 'bottomed out' on an empty list + return parent_index + [None], 0, 0 + else: + # We've 'bottomed out' - arrays is either a scalar or an array + size = _size(arrays) + return parent_index, _ndim(arrays), size + + +def _atleast_nd(a, ndim): + # Ensures `a` has at least `ndim` dimensions by prepending + # ones to `a.shape` as necessary + return array(a, ndmin=ndim, copy=False, subok=True) + + +def _accumulate(values): + return list(itertools.accumulate(values)) + + +def _concatenate_shapes(shapes, axis): + """Given array shapes, return the resulting shape and slices prefixes. + + These help in nested concatenation. + + Returns + ------- + shape: tuple of int + This tuple satisfies: + ``` + shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) + shape == concatenate(arrs, axis).shape + ``` + + slice_prefixes: tuple of (slice(start, end), ) + For a list of arrays being concatenated, this returns the slice + in the larger array at axis that needs to be sliced into. + + For example, the following holds: + ``` + ret = concatenate([a, b, c], axis) + _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) + + ret[(slice(None),) * axis + sl_a] == a + ret[(slice(None),) * axis + sl_b] == b + ret[(slice(None),) * axis + sl_c] == c + ``` + + These are called slice prefixes since they are used in the recursive + blocking algorithm to compute the left-most slices during the + recursion. Therefore, they must be prepended to rest of the slice + that was computed deeper in the recursion. + + These are returned as tuples to ensure that they can quickly be added + to existing slice tuple without creating a new tuple every time. + + """ + # Cache a result that will be reused. + shape_at_axis = [shape[axis] for shape in shapes] + + # Take a shape, any shape + first_shape = shapes[0] + first_shape_pre = first_shape[:axis] + first_shape_post = first_shape[axis+1:] + + if any(shape[:axis] != first_shape_pre or + shape[axis+1:] != first_shape_post for shape in shapes): + raise ValueError( + 'Mismatched array shapes in block along axis {}.'.format(axis)) + + shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:]) + + offsets_at_axis = _accumulate(shape_at_axis) + slice_prefixes = [(slice(start, end),) + for start, end in zip([0] + offsets_at_axis, + offsets_at_axis)] + return shape, slice_prefixes + + +def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): + """ + Returns the shape of the final array, along with a list + of slices and a list of arrays that can be used for assignment inside the + new array + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + max_depth : list of int + The number of nested lists + result_ndim : int + The number of dimensions in thefinal array. + + Returns + ------- + shape : tuple of int + The shape that the final array will take on. + slices: list of tuple of slices + The slices into the full array required for assignment. These are + required to be prepended with ``(Ellipsis, )`` to obtain to correct + final index. + arrays: list of ndarray + The data to assign to each slice of the full array + + """ + if depth < max_depth: + shapes, slices, arrays = zip( + *[_block_info_recursion(arr, max_depth, result_ndim, depth+1) + for arr in arrays]) + + axis = result_ndim - max_depth + depth + shape, slice_prefixes = _concatenate_shapes(shapes, axis) + + # Prepend the slice prefix and flatten the slices + slices = [slice_prefix + the_slice + for slice_prefix, inner_slices in zip(slice_prefixes, slices) + for the_slice in inner_slices] + + # Flatten the array list + arrays = functools.reduce(operator.add, arrays) + + return shape, slices, arrays + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + # Return the slice and the array inside a list to be consistent with + # the recursive case. + arr = _atleast_nd(arrays, result_ndim) + return arr.shape, [()], [arr] + + +def _block(arrays, max_depth, result_ndim, depth=0): + """ + Internal implementation of block based on repeated concatenation. + `arrays` is the argument passed to + block. `max_depth` is the depth of nested lists within `arrays` and + `result_ndim` is the greatest of the dimensions of the arrays in + `arrays` and the depth of the lists in `arrays` (see block docstring + for details). + """ + if depth < max_depth: + arrs = [_block(arr, max_depth, result_ndim, depth+1) + for arr in arrays] + return _concatenate(arrs, axis=-(max_depth-depth)) + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + return _atleast_nd(arrays, result_ndim) + + +def _block_dispatcher(arrays): + # Use type(...) is list to match the behavior of np.block(), which special + # cases list specifically rather than allowing for generic iterables or + # tuple. Also, we know that list.__array_function__ will never exist. + if type(arrays) is list: + for subarrays in arrays: + yield from _block_dispatcher(subarrays) + else: + yield arrays + + +@array_function_dispatch(_block_dispatcher) +def block(arrays): + """ + Assemble an nd-array from nested lists of blocks. + + Blocks in the innermost lists are concatenated (see `concatenate`) along + the last dimension (-1), then these are concatenated along the + second-last dimension (-2), and so on until the outermost list is reached. + + Blocks can be of any dimension, but will not be broadcasted using the normal + rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` + the same for all blocks. This is primarily useful for working with scalars, + and means that code like ``np.block([v, 1])`` is valid, where + ``v.ndim == 1``. + + When the nested list is two levels deep, this allows block matrices to be + constructed from their components. + + .. versionadded:: 1.13.0 + + Parameters + ---------- + arrays : nested list of array_like or scalars (but not tuples) + If passed a single ndarray or scalar (a nested list of depth 0), this + is returned unmodified (and not copied). + + Elements shapes must match along the appropriate axes (without + broadcasting), but leading 1s will be prepended to the shape as + necessary to make the dimensions match. + + Returns + ------- + block_array : ndarray + The array assembled from the given blocks. + + The dimensionality of the output is equal to the greatest of: + * the dimensionality of all the inputs + * the depth to which the input list is nested + + Raises + ------ + ValueError + * If list depths are mismatched - for instance, ``[[a, b], c]`` is + illegal, and should be spelt ``[[a, b], [c]]`` + * If lists are empty - for instance, ``[[a, b], []]`` + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + + Notes + ----- + + When called with only scalars, ``np.block`` is equivalent to an ndarray + call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to + ``np.array([[1, 2], [3, 4]])``. + + This function does not enforce that the blocks lie on a fixed grid. + ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: + + AAAbb + AAAbb + cccDD + + But is also allowed to produce, for some ``a, b, c, d``:: + + AAAbb + AAAbb + cDDDD + + Since concatenation happens along the last axis first, `block` is _not_ + capable of producing the following directly:: + + AAAbb + cccbb + cccDD + + Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is + equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. + + Examples + -------- + The most common use of this function is to build a block matrix + + >>> A = np.eye(2) * 2 + >>> B = np.eye(3) * 3 + >>> np.block([ + ... [A, np.zeros((2, 3))], + ... [np.ones((3, 2)), B ] + ... ]) + array([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [1., 1., 3., 0., 0.], + [1., 1., 0., 3., 0.], + [1., 1., 0., 0., 3.]]) + + With a list of depth 1, `block` can be used as `hstack` + + >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) + array([1, 2, 3]) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([a, b, 10]) # hstack([a, b, 10]) + array([ 1, 2, 3, 4, 5, 6, 10]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([A, B]) # hstack([A, B]) + array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + + With a list of depth 2, `block` can be used in place of `vstack`: + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([[a], [b]]) # vstack([a, b]) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([[A], [B]]) # vstack([A, B]) + array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + + It can also be used in places of `atleast_1d` and `atleast_2d` + + >>> a = np.array(0) + >>> b = np.array([1]) + >>> np.block([a]) # atleast_1d(a) + array([0]) + >>> np.block([b]) # atleast_1d(b) + array([1]) + + >>> np.block([[a]]) # atleast_2d(a) + array([[0]]) + >>> np.block([[b]]) # atleast_2d(b) + array([[1]]) + + + """ + arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) + + # It was found through benchmarking that making an array of final size + # around 256x256 was faster by straight concatenation on a + # i7-7700HQ processor and dual channel ram 2400MHz. + # It didn't seem to matter heavily on the dtype used. + # + # A 2D array using repeated concatenation requires 2 copies of the array. + # + # The fastest algorithm will depend on the ratio of CPU power to memory + # speed. + # One can monitor the results of the benchmark + # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d + # to tune this parameter until a C version of the `_block_info_recursion` + # algorithm is implemented which would likely be faster than the python + # version. + if list_ndim * final_size > (2 * 512 * 512): + return _block_slicing(arrays, list_ndim, result_ndim) + else: + return _block_concatenate(arrays, list_ndim, result_ndim) + + +# These helper functions are mostly used for testing. +# They allow us to write tests that directly call `_block_slicing` +# or `_block_concatenate` without blocking large arrays to force the wisdom +# to trigger the desired path. +def _block_setup(arrays): + """ + Returns + (`arrays`, list_ndim, result_ndim, final_size) + """ + bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) + list_ndim = len(bottom_index) + if bottom_index and bottom_index[-1] is None: + raise ValueError( + 'List at {} cannot be empty'.format( + _block_format_index(bottom_index) + ) + ) + result_ndim = max(arr_ndim, list_ndim) + return arrays, list_ndim, result_ndim, final_size + + +def _block_slicing(arrays, list_ndim, result_ndim): + shape, slices, arrays = _block_info_recursion( + arrays, list_ndim, result_ndim) + dtype = _nx.result_type(*[arr.dtype for arr in arrays]) + + # Test preferring F only in the case that all input arrays are F + F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) + C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) + order = 'F' if F_order and not C_order else 'C' + result = _nx.empty(shape=shape, dtype=dtype, order=order) + # Note: In a c implementation, the function + # PyArray_CreateMultiSortedStridePerm could be used for more advanced + # guessing of the desired order. + + for the_slice, arr in zip(slices, arrays): + result[(Ellipsis,) + the_slice] = arr + return result + + +def _block_concatenate(arrays, list_ndim, result_ndim): + result = _block(arrays, list_ndim, result_ndim) + if list_ndim == 0: + # Catch an edge case where _block returns a view because + # `arrays` is a single numpy array and not a list of numpy arrays. + # This might copy scalars or lists twice, but this isn't a likely + # usecase for those interested in performance + result = result.copy() + return result diff --git a/MLPY/Lib/site-packages/numpy/core/shape_base.pyi b/MLPY/Lib/site-packages/numpy/core/shape_base.pyi new file mode 100644 index 0000000000000000000000000000000000000000..322244487773f18de8ca30afb5ca8f36f5e3a957 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/shape_base.pyi @@ -0,0 +1,39 @@ +import sys +from typing import TypeVar, overload, List, Sequence + +from numpy import ndarray +from numpy.typing import ArrayLike + +if sys.version_info >= (3, 8): + from typing import SupportsIndex +else: + from typing_extensions import SupportsIndex + +_ArrayType = TypeVar("_ArrayType", bound=ndarray) + +@overload +def atleast_1d(__arys: ArrayLike) -> ndarray: ... +@overload +def atleast_1d(*arys: ArrayLike) -> List[ndarray]: ... + +@overload +def atleast_2d(__arys: ArrayLike) -> ndarray: ... +@overload +def atleast_2d(*arys: ArrayLike) -> List[ndarray]: ... + +@overload +def atleast_3d(__arys: ArrayLike) -> ndarray: ... +@overload +def atleast_3d(*arys: ArrayLike) -> List[ndarray]: ... + +def vstack(tup: Sequence[ArrayLike]) -> ndarray: ... +def hstack(tup: Sequence[ArrayLike]) -> ndarray: ... +@overload +def stack( + arrays: Sequence[ArrayLike], axis: SupportsIndex = ..., out: None = ... +) -> ndarray: ... +@overload +def stack( + arrays: Sequence[ArrayLike], axis: SupportsIndex = ..., out: _ArrayType = ... +) -> _ArrayType: ... +def block(arrays: ArrayLike) -> ndarray: ... diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__init__.py b/MLPY/Lib/site-packages/numpy/core/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7298ce4486d8fa2286ef80aade7e013247ddf02f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/_locales.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/_locales.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9abcf9d51f2eea8701aa5e55f3c776dec0b79360 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/_locales.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test__exceptions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test__exceptions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9096585e04d21de1e23726878eb42be378f01665 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test__exceptions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8401dcc898bf520ebc5623077427dcf431dd193 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_api.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_api.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c06ee7495e97a2593718cc048900dda3a88443b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_api.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_argparse.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_argparse.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c284603fbb78e5502620f4c65bf3e2bee78cb5d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_argparse.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_array_coercion.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_array_coercion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2eb3ec0b78efe3bbe855949eb1825262233a9e2e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_array_coercion.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_arraymethod.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_arraymethod.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..419701f6c79560db3ee5b24fdebef8f5433458fa Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_arraymethod.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_arrayprint.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_arrayprint.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17d5f4c40a6efb6f3d5c66f0ddeff2fba65fa1e2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_arrayprint.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_casting_unittests.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_casting_unittests.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd2db55f169e79423e8a4f9aa101fdb899efdfeb Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_casting_unittests.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_conversion_utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_conversion_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..716bf5ed9b42c2cda06a599d7bbacc67aad1d389 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_conversion_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_cpu_dispatcher.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_cpu_dispatcher.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd88a091573d15384cae81ea9bd1c4b9cd1d3b82 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_cpu_dispatcher.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_cpu_features.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_cpu_features.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d84a998164a932528b9aef26fb872ada2dbb5877 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_cpu_features.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b200735ffe83bce1e09725377ac95aefc74d16f3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_datetime.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_datetime.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cefa5044a7c04d61b973f17bbe10bd224bead88 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_datetime.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_defchararray.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_defchararray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..717bb647b61f6af8fa7bf7c8600ad5fb7128f3f2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_defchararray.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2b220b227b23be018cd2b5f033175ea2605d6f2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_dtype.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_dtype.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5cc1e21f28ff3c2809105fde3270870385f969c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_dtype.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65d8ec9afd2de82f064db203b62a93a5242a374f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_errstate.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_errstate.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa4dc6eaa250490da6e9eeaf7161f33a3c0dc2cd Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_errstate.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_extint128.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_extint128.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca8e2c904bcee81a445f20a2bb48093bd92d9de7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_extint128.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_function_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_function_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e6ae4b741d4254bc10182596b5a6ed7ce00529a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_function_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_getlimits.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_getlimits.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54dc6cc657d027741a0001cfdf5bdbf73ca7ba17 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_getlimits.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_half.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_half.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbc6c9597c43eb9805aad7d626fef4f6aa0b4515 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_half.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_indexerrors.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_indexerrors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee58fc9a270e8783bdf18c0f2cec0862b0f84da5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_indexerrors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_indexing.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_indexing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a9063cac0159a09fb46bfddd0ba03079b64ce3f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_indexing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_item_selection.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_item_selection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..969f07c18274c46464614430a69baef90813d269 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_item_selection.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_longdouble.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_longdouble.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..411cb8ef41127525e06e1887cb1f2516674d63fa Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_longdouble.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_machar.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_machar.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91f81c9bdf7e5774ba4edc90467662bccdbb054e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_machar.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_mem_overlap.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_mem_overlap.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79e48fdd98898e140dc1516b7ca07e4ebf612e97 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_mem_overlap.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_memmap.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_memmap.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90e289140838df9b74e4a69855e342ba4bb4c9c7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_memmap.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_multiarray.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_multiarray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79f5383824afaf8a09e4d97cef8f4e1d7133a246 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_multiarray.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_nditer.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_nditer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..611700de92bbad77ee498719fdcec03aff5a6131 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_nditer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_numeric.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_numeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..861de36cccb32484a33c3b468380f5d7820b37a9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_numeric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_numerictypes.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_numerictypes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..235d21a3209a933667a24c240026e2babf7b9287 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_numerictypes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_overrides.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_overrides.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fadaf8c1a8fecdfc7f73899a43e1e7fd5e485fb Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_overrides.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_print.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_print.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e231688860a74c2f4c6cb49a0e2c5b7cc64355a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_print.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_protocols.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_protocols.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a1e89ce6f7bab89bfd5fd40977fba75606331f8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_protocols.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_records.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_records.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fbccdfa15bcb5334d0455fe3d74db861e9fd812 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_records.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_regression.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba1172e93234962324165f0cc328d213e53210a4 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ba57b142ff346f26387189f4f2a7e4328eb2523 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalar_methods.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalar_methods.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d04a1bcd9487b145c16026f1dd45fa329f9cf1a2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalar_methods.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarbuffer.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarbuffer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0815211c0da90b2ce17d5e35a461fca0764a547 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarbuffer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarinherit.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarinherit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eea7444c821ac7b031ffa551f9c345edd8f99382 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarinherit.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarmath.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarmath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..086de4b61e5bfeab8fab4e09d64c6f7798ccddc8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarmath.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarprint.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarprint.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21a60e049bbd750ec7aa0775e557ec5ace7d6616 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_scalarprint.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_shape_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_shape_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51f10f5683abfe4661224b07a2889342bbe792c9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_shape_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_simd.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_simd.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03cac0b0c5f8f340cd9758ca4ab0770864ef3d73 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_simd.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_simd_module.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_simd_module.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bfaaf74c37832dec3f5017b3063e9f72599a070 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_simd_module.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed2b4f1fdb1bfa9037d1f865f8f9712e6943db0c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_umath.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_umath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08c4d31ffb726a78dbd7db2a3c3a591b68309c79 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_umath.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_umath_accuracy.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_umath_accuracy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de7dfd1334e08c480c795183871336cd230e26aa Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_umath_accuracy.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_umath_complex.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_umath_complex.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17313df8555a5147060cedfb0943011e04e08192 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_umath_complex.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_unicode.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_unicode.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb05346f24c11ba0b62ddeadcf8704e2f470c52f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/__pycache__/test_unicode.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/_locales.py b/MLPY/Lib/site-packages/numpy/core/tests/_locales.py new file mode 100644 index 0000000000000000000000000000000000000000..2fd36d643a5cd1641cc26f281f1fc486bc13c6d1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/_locales.py @@ -0,0 +1,74 @@ +"""Provide class for testing in French locale + +""" +import sys +import locale + +import pytest + +__ALL__ = ['CommaDecimalPointLocale'] + + +def find_comma_decimal_point_locale(): + """See if platform has a decimal point as comma locale. + + Find a locale that uses a comma instead of a period as the + decimal point. + + Returns + ------- + old_locale: str + Locale when the function was called. + new_locale: {str, None) + First French locale found, None if none found. + + """ + if sys.platform == 'win32': + locales = ['FRENCH'] + else: + locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] + + old_locale = locale.getlocale(locale.LC_NUMERIC) + new_locale = None + try: + for loc in locales: + try: + locale.setlocale(locale.LC_NUMERIC, loc) + new_locale = loc + break + except locale.Error: + pass + finally: + locale.setlocale(locale.LC_NUMERIC, locale=old_locale) + return old_locale, new_locale + + +class CommaDecimalPointLocale: + """Sets LC_NUMERIC to a locale with comma as decimal point. + + Classes derived from this class have setup and teardown methods that run + tests with locale.LC_NUMERIC set to a locale where commas (',') are used as + the decimal point instead of periods ('.'). On exit the locale is restored + to the initial locale. It also serves as context manager with the same + effect. If no such locale is available, the test is skipped. + + .. versionadded:: 1.15.0 + + """ + (cur_locale, tst_locale) = find_comma_decimal_point_locale() + + def setup(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def teardown(self): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) + + def __enter__(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def __exit__(self, type, value, traceback): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/data/astype_copy.pkl b/MLPY/Lib/site-packages/numpy/core/tests/data/astype_copy.pkl new file mode 100644 index 0000000000000000000000000000000000000000..45694ae001c4a103365ff9fd5ae2da0dba3c11f6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/data/astype_copy.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9564b309cbf3441ff0a6e4468fddaca46230fab34f15c77d87025a455bdf59d9 +size 716 diff --git a/MLPY/Lib/site-packages/numpy/core/tests/data/recarray_from_file.fits b/MLPY/Lib/site-packages/numpy/core/tests/data/recarray_from_file.fits new file mode 100644 index 0000000000000000000000000000000000000000..ca48ee85153645a7510e201d574e9b119c089dce Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/data/recarray_from_file.fits differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-README.txt b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-README.txt new file mode 100644 index 0000000000000000000000000000000000000000..70ac062ef6999022d5f256d22d825af96f802e2e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-README.txt @@ -0,0 +1,15 @@ +Steps to validate transcendental functions: +1) Add a file 'umath-validation-set-.txt', where ufuncname is name of + the function in NumPy you want to validate +2) The file should contain 4 columns: dtype,input,expected output,ulperror + a. dtype: one of np.float16, np.float32, np.float64 + b. input: floating point input to ufunc in hex. Example: 0x414570a4 + represents 12.340000152587890625 + c. expected output: floating point output for the corresponding input in hex. + This should be computed using a high(er) precision library and then rounded to + same format as the input. + d. ulperror: expected maximum ulp error of the function. This + should be same across all rows of the same dtype. Otherwise, the function is + tested for the maximum ulp error among all entries of that dtype. +3) Add file umath-validation-set-.txt to the test file test_umath_accuracy.py + which will then validate your ufunc. diff --git a/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv new file mode 100644 index 0000000000000000000000000000000000000000..c78479e4c821c67dcb28b57db5062abb6367f14a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv @@ -0,0 +1,665 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,2 +np.float32,0x007b2490,0x3f800000,2 +np.float32,0x007c99fa,0x3f800000,2 +np.float32,0x00734a0c,0x3f800000,2 +np.float32,0x0070de24,0x3f800000,2 +np.float32,0x007fffff,0x3f800000,2 +np.float32,0x00000001,0x3f800000,2 +## -ve denormals ## +np.float32,0x80495d65,0x3f800000,2 +np.float32,0x806894f6,0x3f800000,2 +np.float32,0x80555a76,0x3f800000,2 +np.float32,0x804e1fb8,0x3f800000,2 +np.float32,0x80687de9,0x3f800000,2 +np.float32,0x807fffff,0x3f800000,2 +np.float32,0x80000001,0x3f800000,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x3f800000,2 +np.float32,0x80000000,0x3f800000,2 +np.float32,0x00800000,0x3f800000,2 +np.float32,0x80800000,0x3f800000,2 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x3f0a5140,2 +np.float32,0x3f800001,0x3f0a513f,2 +np.float32,0x3f800002,0x3f0a513d,2 +np.float32,0xc090a8b0,0xbe4332ce,2 +np.float32,0x41ce3184,0x3f4d1de1,2 +np.float32,0xc1d85848,0xbeaa8980,2 +np.float32,0x402b8820,0xbf653aa3,2 +np.float32,0x42b4e454,0xbf4a338b,2 +np.float32,0x42a67a60,0x3c58202e,2 +np.float32,0x41d92388,0xbed987c7,2 +np.float32,0x422dd66c,0x3f5dcab3,2 +np.float32,0xc28f5be6,0xbf5688d8,2 +np.float32,0x41ab2674,0xbf53aa3b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0x3f3504f3,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x4016cbe4,0xbf3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x407b53d2,0xbf3504f1,2 +np.float32,0xc07b53d2,0xbf3504f1,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x40afede0,0x3f3504f7,2 +np.float32,0xc0afede0,0x3f3504f7,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0x3f3504f3,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x410a3ae7,0xbf3504fb,2 +np.float32,0xc10a3ae7,0xbf3504fb,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x41235ce2,0xbf3504ef,2 +np.float32,0xc1235ce2,0xbf3504ef,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x413c7edd,0x3f3504f4,2 +np.float32,0xc13c7edd,0x3f3504f4,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x4155a0d9,0x3f3504eb,2 +np.float32,0xc155a0d9,0x3f3504eb,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x416ec2d4,0xbf3504f7,2 +np.float32,0xc16ec2d4,0xbf3504f7,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x4183f268,0xbf3504e7,2 +np.float32,0xc183f268,0xbf3504e7,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x41908365,0x3f3504f0,2 +np.float32,0xc1908365,0x3f3504f0,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x419d1463,0x3f3504ef,2 +np.float32,0xc19d1463,0x3f3504ef,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x41a9a561,0xbf3504ff,2 +np.float32,0xc1a9a561,0xbf3504ff,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x41b6365e,0xbf3504f6,2 +np.float32,0xc1b6365e,0xbf3504f6,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x41c2c75c,0x3f3504f8,2 +np.float32,0xc1c2c75c,0x3f3504f8,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x41cf585a,0x3f3504e7,2 +np.float32,0xc1cf585a,0x3f3504e7,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x41dbe958,0xbf350507,2 +np.float32,0xc1dbe958,0xbf350507,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x41e87a55,0xbf3504ef,2 +np.float32,0xc1e87a55,0xbf3504ef,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x41f50b53,0x3f3504ff,2 +np.float32,0xc1f50b53,0x3f3504ff,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x4200ce28,0x3f3504f6,2 +np.float32,0xc200ce28,0x3f3504f6,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x420716a7,0xbf3504f8,2 +np.float32,0xc20716a7,0xbf3504f8,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x420d5f26,0xbf3504e7,2 +np.float32,0xc20d5f26,0xbf3504e7,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x4213a7a5,0x3f350507,2 +np.float32,0xc213a7a5,0x3f350507,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4219f024,0x3f3504d8,2 +np.float32,0xc219f024,0x3f3504d8,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x431d1463,0x3f800000,2 +np.float32,0xc31d1463,0x3f800000,2 +np.float32,0x422038a3,0xbf350516,2 +np.float32,0xc22038a3,0xbf350516,2 +np.float32,0x42a038a3,0x36c6cd61,2 +np.float32,0xc2a038a3,0x36c6cd61,2 +np.float32,0x432038a3,0xbf800000,2 +np.float32,0xc32038a3,0xbf800000,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x43235ce2,0x3f800000,2 +np.float32,0xc3235ce2,0x3f800000,2 +np.float32,0x42268121,0xbf3504f6,2 +np.float32,0xc2268121,0xbf3504f6,2 +np.float32,0x42a68121,0x34e43aac,2 +np.float32,0xc2a68121,0x34e43aac,2 +np.float32,0x43268121,0xbf800000,2 +np.float32,0xc3268121,0xbf800000,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x4329a561,0x3f800000,2 +np.float32,0xc329a561,0x3f800000,2 +np.float32,0x422cc9a0,0x3f3504f8,2 +np.float32,0xc22cc9a0,0x3f3504f8,2 +np.float32,0x42acc9a0,0x35655a50,2 +np.float32,0xc2acc9a0,0x35655a50,2 +np.float32,0x432cc9a0,0xbf800000,2 +np.float32,0xc32cc9a0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x432fede0,0x3f800000,2 +np.float32,0xc32fede0,0x3f800000,2 +np.float32,0x4233121f,0x3f3504e7,2 +np.float32,0xc233121f,0x3f3504e7,2 +np.float32,0x42b3121f,0xb60f347d,2 +np.float32,0xc2b3121f,0xb60f347d,2 +np.float32,0x4333121f,0xbf800000,2 +np.float32,0xc333121f,0xbf800000,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x4336365e,0x3f800000,2 +np.float32,0xc336365e,0x3f800000,2 +np.float32,0x42395a9e,0xbf350507,2 +np.float32,0xc2395a9e,0xbf350507,2 +np.float32,0x42b95a9e,0x36651267,2 +np.float32,0xc2b95a9e,0x36651267,2 +np.float32,0x43395a9e,0xbf800000,2 +np.float32,0xc3395a9e,0xbf800000,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x433c7edd,0x3f800000,2 +np.float32,0xc33c7edd,0x3f800000,2 +np.float32,0x423fa31d,0xbf3504d7,2 +np.float32,0xc23fa31d,0xbf3504d7,2 +np.float32,0x42bfa31d,0xb69d7828,2 +np.float32,0xc2bfa31d,0xb69d7828,2 +np.float32,0x433fa31d,0xbf800000,2 +np.float32,0xc33fa31d,0xbf800000,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x4342c75c,0x3f800000,2 +np.float32,0xc342c75c,0x3f800000,2 +np.float32,0x4245eb9c,0x3f350517,2 +np.float32,0xc245eb9c,0x3f350517,2 +np.float32,0x42c5eb9c,0x36c8671d,2 +np.float32,0xc2c5eb9c,0x36c8671d,2 +np.float32,0x4345eb9c,0xbf800000,2 +np.float32,0xc345eb9c,0xbf800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x43490fdb,0x3f800000,2 +np.float32,0xc3490fdb,0x3f800000,2 +np.float32,0x424c341a,0x3f3504f5,2 +np.float32,0xc24c341a,0x3f3504f5,2 +np.float32,0x42cc341a,0x34ca9ee6,2 +np.float32,0xc2cc341a,0x34ca9ee6,2 +np.float32,0x434c341a,0xbf800000,2 +np.float32,0xc34c341a,0xbf800000,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x434f585a,0x3f800000,2 +np.float32,0xc34f585a,0x3f800000,2 +np.float32,0x42527c99,0xbf3504f9,2 +np.float32,0xc2527c99,0xbf3504f9,2 +np.float32,0x42d27c99,0x35722833,2 +np.float32,0xc2d27c99,0x35722833,2 +np.float32,0x43527c99,0xbf800000,2 +np.float32,0xc3527c99,0xbf800000,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x4355a0d9,0x3f800000,2 +np.float32,0xc355a0d9,0x3f800000,2 +np.float32,0x4258c518,0xbf3504e6,2 +np.float32,0xc258c518,0xbf3504e6,2 +np.float32,0x42d8c518,0xb61267f6,2 +np.float32,0xc2d8c518,0xb61267f6,2 +np.float32,0x4358c518,0xbf800000,2 +np.float32,0xc358c518,0xbf800000,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x435be958,0x3f800000,2 +np.float32,0xc35be958,0x3f800000,2 +np.float32,0x425f0d97,0x3f350508,2 +np.float32,0xc25f0d97,0x3f350508,2 +np.float32,0x42df0d97,0x366845e0,2 +np.float32,0xc2df0d97,0x366845e0,2 +np.float32,0x435f0d97,0xbf800000,2 +np.float32,0xc35f0d97,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x436231d6,0x3f800000,2 +np.float32,0xc36231d6,0x3f800000,2 +np.float32,0x42655616,0x3f3504d7,2 +np.float32,0xc2655616,0x3f3504d7,2 +np.float32,0x42e55616,0xb69f11e5,2 +np.float32,0xc2e55616,0xb69f11e5,2 +np.float32,0x43655616,0xbf800000,2 +np.float32,0xc3655616,0xbf800000,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x43687a55,0x3f800000,2 +np.float32,0xc3687a55,0x3f800000,2 +np.float32,0x426b9e95,0xbf350517,2 +np.float32,0xc26b9e95,0xbf350517,2 +np.float32,0x42eb9e95,0x36ca00d9,2 +np.float32,0xc2eb9e95,0x36ca00d9,2 +np.float32,0x436b9e95,0xbf800000,2 +np.float32,0xc36b9e95,0xbf800000,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x436ec2d4,0x3f800000,2 +np.float32,0xc36ec2d4,0x3f800000,2 +np.float32,0x4271e713,0xbf3504f5,2 +np.float32,0xc271e713,0xbf3504f5,2 +np.float32,0x42f1e713,0x34b10321,2 +np.float32,0xc2f1e713,0x34b10321,2 +np.float32,0x4371e713,0xbf800000,2 +np.float32,0xc371e713,0xbf800000,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x43750b53,0x3f800000,2 +np.float32,0xc3750b53,0x3f800000,2 +np.float32,0x42782f92,0x3f3504f9,2 +np.float32,0xc2782f92,0x3f3504f9,2 +np.float32,0x42f82f92,0x357ef616,2 +np.float32,0xc2f82f92,0x357ef616,2 +np.float32,0x43782f92,0xbf800000,2 +np.float32,0xc3782f92,0xbf800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x437b53d2,0x3f800000,2 +np.float32,0xc37b53d2,0x3f800000,2 +np.float32,0x427e7811,0x3f3504e6,2 +np.float32,0xc27e7811,0x3f3504e6,2 +np.float32,0x42fe7811,0xb6159b6f,2 +np.float32,0xc2fe7811,0xb6159b6f,2 +np.float32,0x437e7811,0xbf800000,2 +np.float32,0xc37e7811,0xbf800000,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4380ce28,0x3f800000,2 +np.float32,0xc380ce28,0x3f800000,2 +np.float32,0x42826048,0xbf350508,2 +np.float32,0xc2826048,0xbf350508,2 +np.float32,0x43026048,0x366b7958,2 +np.float32,0xc3026048,0x366b7958,2 +np.float32,0x43826048,0xbf800000,2 +np.float32,0xc3826048,0xbf800000,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x4383f268,0x3f800000,2 +np.float32,0xc383f268,0x3f800000,2 +np.float32,0x42858487,0xbf350504,2 +np.float32,0xc2858487,0xbf350504,2 +np.float32,0x43058487,0x363ea8be,2 +np.float32,0xc3058487,0x363ea8be,2 +np.float32,0x43858487,0xbf800000,2 +np.float32,0xc3858487,0xbf800000,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x438716a7,0x3f800000,2 +np.float32,0xc38716a7,0x3f800000,2 +np.float32,0x4288a8c7,0x3f350517,2 +np.float32,0xc288a8c7,0x3f350517,2 +np.float32,0x4308a8c7,0x36cb9a96,2 +np.float32,0xc308a8c7,0x36cb9a96,2 +np.float32,0x4388a8c7,0xbf800000,2 +np.float32,0xc388a8c7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x438a3ae7,0x3f800000,2 +np.float32,0xc38a3ae7,0x3f800000,2 +np.float32,0x428bcd06,0x3f3504f5,2 +np.float32,0xc28bcd06,0x3f3504f5,2 +np.float32,0x430bcd06,0x3497675b,2 +np.float32,0xc30bcd06,0x3497675b,2 +np.float32,0x438bcd06,0xbf800000,2 +np.float32,0xc38bcd06,0xbf800000,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x438d5f26,0x3f800000,2 +np.float32,0xc38d5f26,0x3f800000,2 +np.float32,0x428ef146,0xbf350526,2 +np.float32,0xc28ef146,0xbf350526,2 +np.float32,0x430ef146,0x3710bc40,2 +np.float32,0xc30ef146,0x3710bc40,2 +np.float32,0x438ef146,0xbf800000,2 +np.float32,0xc38ef146,0xbf800000,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x43908365,0x3f800000,2 +np.float32,0xc3908365,0x3f800000,2 +np.float32,0x42921585,0xbf3504e6,2 +np.float32,0xc2921585,0xbf3504e6,2 +np.float32,0x43121585,0xb618cee8,2 +np.float32,0xc3121585,0xb618cee8,2 +np.float32,0x43921585,0xbf800000,2 +np.float32,0xc3921585,0xbf800000,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4393a7a5,0x3f800000,2 +np.float32,0xc393a7a5,0x3f800000,2 +np.float32,0x429539c5,0x3f350536,2 +np.float32,0xc29539c5,0x3f350536,2 +np.float32,0x431539c5,0x373bab34,2 +np.float32,0xc31539c5,0x373bab34,2 +np.float32,0x439539c5,0xbf800000,2 +np.float32,0xc39539c5,0xbf800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4396cbe4,0x3f800000,2 +np.float32,0xc396cbe4,0x3f800000,2 +np.float32,0x42985e04,0x3f3504d7,2 +np.float32,0xc2985e04,0x3f3504d7,2 +np.float32,0x43185e04,0xb6a2455d,2 +np.float32,0xc3185e04,0xb6a2455d,2 +np.float32,0x43985e04,0xbf800000,2 +np.float32,0xc3985e04,0xbf800000,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x4399f024,0x3f800000,2 +np.float32,0xc399f024,0x3f800000,2 +np.float32,0x429b8243,0xbf3504ea,2 +np.float32,0xc29b8243,0xbf3504ea,2 +np.float32,0x431b8243,0xb5cb2eb8,2 +np.float32,0xc31b8243,0xb5cb2eb8,2 +np.float32,0x439b8243,0xbf800000,2 +np.float32,0xc39b8243,0xbf800000,2 +np.float32,0x435b2047,0x3f3504c1,2 +np.float32,0x42a038a2,0xb5e4ca7e,2 +np.float32,0x432038a2,0xbf800000,2 +np.float32,0x4345eb9b,0xbf800000,2 +np.float32,0x42c5eb9b,0xb5de638c,2 +np.float32,0x42eb9e94,0xb5d7fc9b,2 +np.float32,0x4350ea79,0x3631dadb,2 +np.float32,0x42dbe957,0xbf800000,2 +np.float32,0x425be957,0xb505522a,2 +np.float32,0x435be957,0x3f800000,2 +np.float32,0x46027eb2,0x3e7d94c9,2 +np.float32,0x4477baed,0xbe7f1824,2 +np.float32,0x454b8024,0x3e7f5268,2 +np.float32,0x455d2c09,0x3e7f40cb,2 +np.float32,0x4768d3de,0xba14b4af,2 +np.float32,0x46c1e7cd,0x3e7fb102,2 +np.float32,0x44a52949,0xbe7dc9d5,2 +np.float32,0x4454633a,0x3e7dbc7d,2 +np.float32,0x4689810b,0x3e7eb02b,2 +np.float32,0x473473cd,0xbe7eef6f,2 +np.float32,0x44a5193f,0x3e7e1b1f,2 +np.float32,0x46004b36,0x3e7dac59,2 +np.float32,0x467f604b,0x3d7ffd3a,2 +np.float32,0x45ea1805,0x3dffd2e0,2 +np.float32,0x457b6af3,0x3dff7831,2 +np.float32,0x44996159,0xbe7d85f4,2 +np.float32,0x47883553,0xbb80584e,2 +np.float32,0x44e19f0c,0xbdffcfe6,2 +np.float32,0x472b3bf6,0xbe7f7a82,2 +np.float32,0x4600bb4e,0x3a135e33,2 +np.float32,0x449f4556,0x3e7e42e5,2 +np.float32,0x474e9420,0x3dff77b2,2 +np.float32,0x45cbdb23,0x3dff7240,2 +np.float32,0x44222747,0x3dffb039,2 +np.float32,0x4772e419,0xbdff74b8,2 diff --git a/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv new file mode 100644 index 0000000000000000000000000000000000000000..97379247a42a9fa0f4df422dab49be5b81e3d39b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv @@ -0,0 +1,412 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,3 +np.float32,0x007b2490,0x3f800000,3 +np.float32,0x007c99fa,0x3f800000,3 +np.float32,0x00734a0c,0x3f800000,3 +np.float32,0x0070de24,0x3f800000,3 +np.float32,0x00495d65,0x3f800000,3 +np.float32,0x006894f6,0x3f800000,3 +np.float32,0x00555a76,0x3f800000,3 +np.float32,0x004e1fb8,0x3f800000,3 +np.float32,0x00687de9,0x3f800000,3 +## -ve denormals ## +np.float32,0x805b59af,0x3f800000,3 +np.float32,0x807ed8ed,0x3f800000,3 +np.float32,0x807142ad,0x3f800000,3 +np.float32,0x80772002,0x3f800000,3 +np.float32,0x8062abcb,0x3f800000,3 +np.float32,0x8045e31c,0x3f800000,3 +np.float32,0x805f01c2,0x3f800000,3 +np.float32,0x80506432,0x3f800000,3 +np.float32,0x8060089d,0x3f800000,3 +np.float32,0x8071292f,0x3f800000,3 +## floats that output a denormal ## +np.float32,0xc2cf3fc1,0x00000001,3 +np.float32,0xc2c79726,0x00000021,3 +np.float32,0xc2cb295d,0x00000005,3 +np.float32,0xc2b49e6b,0x00068c4c,3 +np.float32,0xc2ca8116,0x00000008,3 +np.float32,0xc2c23f82,0x000001d7,3 +np.float32,0xc2cb69c0,0x00000005,3 +np.float32,0xc2cc1f4d,0x00000003,3 +np.float32,0xc2ae094e,0x00affc4c,3 +np.float32,0xc2c86c44,0x00000015,3 +## random floats between -87.0f and 88.0f ## +np.float32,0x4030d7e0,0x417d9a05,3 +np.float32,0x426f60e8,0x6aa1be2c,3 +np.float32,0x41a1b220,0x4e0efc11,3 +np.float32,0xc20cc722,0x26159da7,3 +np.float32,0x41c492bc,0x512ec79d,3 +np.float32,0x40980210,0x42e73a0e,3 +np.float32,0xbf1f7b80,0x3f094de3,3 +np.float32,0x42a678a4,0x7b87a383,3 +np.float32,0xc20f3cfd,0x25a1c304,3 +np.float32,0x423ff34c,0x6216467f,3 +np.float32,0x00000000,0x3f800000,3 +## floats that cause an overflow ## +np.float32,0x7f06d8c1,0x7f800000,3 +np.float32,0x7f451912,0x7f800000,3 +np.float32,0x7ecceac3,0x7f800000,3 +np.float32,0x7f643b45,0x7f800000,3 +np.float32,0x7e910ea0,0x7f800000,3 +np.float32,0x7eb4756b,0x7f800000,3 +np.float32,0x7f4ec708,0x7f800000,3 +np.float32,0x7f6b4551,0x7f800000,3 +np.float32,0x7d8edbda,0x7f800000,3 +np.float32,0x7f730718,0x7f800000,3 +np.float32,0x42b17217,0x7f7fff84,3 +np.float32,0x42b17218,0x7f800000,3 +np.float32,0x42b17219,0x7f800000,3 +np.float32,0xfef2b0bc,0x00000000,3 +np.float32,0xff69f83e,0x00000000,3 +np.float32,0xff4ecb12,0x00000000,3 +np.float32,0xfeac6d86,0x00000000,3 +np.float32,0xfde0cdb8,0x00000000,3 +np.float32,0xff26aef4,0x00000000,3 +np.float32,0xff6f9277,0x00000000,3 +np.float32,0xff7adfc4,0x00000000,3 +np.float32,0xff0ad40e,0x00000000,3 +np.float32,0xff6fd8f3,0x00000000,3 +np.float32,0xc2cff1b4,0x00000001,3 +np.float32,0xc2cff1b5,0x00000000,3 +np.float32,0xc2cff1b6,0x00000000,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0xff800000,0x00000000,3 +np.float32,0x4292f27c,0x7480000a,3 +np.float32,0x42a920be,0x7c7fff94,3 +np.float32,0x41c214c9,0x50ffffd9,3 +np.float32,0x41abe686,0x4effffd9,3 +np.float32,0x4287db5a,0x707fffd3,3 +np.float32,0x41902cbb,0x4c800078,3 +np.float32,0x42609466,0x67ffffeb,3 +np.float32,0x41a65af5,0x4e7fffd1,3 +np.float32,0x417f13ff,0x4affffc9,3 +np.float32,0x426d0e6c,0x6a3504f2,3 +np.float32,0x41bc8934,0x507fff51,3 +np.float32,0x42a7bdde,0x7c0000d6,3 +np.float32,0x4120cf66,0x46b504f6,3 +np.float32,0x4244da8f,0x62ffff1a,3 +np.float32,0x41a0cf69,0x4e000034,3 +np.float32,0x41cd2bec,0x52000005,3 +np.float32,0x42893e41,0x7100009e,3 +np.float32,0x41b437e1,0x4fb50502,3 +np.float32,0x41d8430f,0x5300001d,3 +np.float32,0x4244da92,0x62ffffda,3 +np.float32,0x41a0cf63,0x4dffffa9,3 +np.float32,0x3eb17218,0x3fb504f3,3 +np.float32,0x428729e8,0x703504dc,3 +np.float32,0x41a0cf67,0x4e000014,3 +np.float32,0x4252b77d,0x65800011,3 +np.float32,0x41902cb9,0x4c800058,3 +np.float32,0x42a0cf67,0x79800052,3 +np.float32,0x4152b77b,0x48ffffe9,3 +np.float32,0x41265af3,0x46ffffc8,3 +np.float32,0x42187e0b,0x5affff9a,3 +np.float32,0xc0d2b77c,0x3ab504f6,3 +np.float32,0xc283b2ac,0x10000072,3 +np.float32,0xc1cff1b4,0x2cb504f5,3 +np.float32,0xc05dce9e,0x3d000000,3 +np.float32,0xc28ec9d2,0x0bfffea5,3 +np.float32,0xc23c893a,0x1d7fffde,3 +np.float32,0xc2a920c0,0x027fff6c,3 +np.float32,0xc1f9886f,0x2900002b,3 +np.float32,0xc2c42920,0x000000b5,3 +np.float32,0xc2893e41,0x0dfffec5,3 +np.float32,0xc2c4da93,0x00000080,3 +np.float32,0xc17f1401,0x3400000c,3 +np.float32,0xc1902cb6,0x327fffaf,3 +np.float32,0xc27c4e3b,0x11ffffc5,3 +np.float32,0xc268e5c5,0x157ffe9d,3 +np.float32,0xc2b4e953,0x0005a826,3 +np.float32,0xc287db5a,0x0e800016,3 +np.float32,0xc207db5a,0x2700000b,3 +np.float32,0xc2b2d4fe,0x000ffff1,3 +np.float32,0xc268e5c0,0x157fffdd,3 +np.float32,0xc22920bd,0x2100003b,3 +np.float32,0xc2902caf,0x0b80011e,3 +np.float32,0xc1902cba,0x327fff2f,3 +np.float32,0xc2ca6625,0x00000008,3 +np.float32,0xc280ece8,0x10fffeb5,3 +np.float32,0xc2918f94,0x0b0000ea,3 +np.float32,0xc29b43d5,0x077ffffc,3 +np.float32,0xc1e61ff7,0x2ab504f5,3 +np.float32,0xc2867878,0x0effff15,3 +np.float32,0xc2a2324a,0x04fffff4,3 +#float64 +## near zero ## +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x8360000000000000,0x3ff0000000000000,1 +np.float64,0x9a70000000000000,0x3ff0000000000000,1 +np.float64,0xb9b0000000000000,0x3ff0000000000000,1 +np.float64,0xb810000000000000,0x3ff0000000000000,1 +np.float64,0xbc30000000000000,0x3ff0000000000000,1 +np.float64,0xb6a0000000000000,0x3ff0000000000000,1 +np.float64,0x0000000000000000,0x3ff0000000000000,1 +np.float64,0x0010000000000000,0x3ff0000000000000,1 +np.float64,0x0000000000000001,0x3ff0000000000000,1 +np.float64,0x0360000000000000,0x3ff0000000000000,1 +np.float64,0x1a70000000000000,0x3ff0000000000000,1 +np.float64,0x3c30000000000000,0x3ff0000000000000,1 +np.float64,0x36a0000000000000,0x3ff0000000000000,1 +np.float64,0x39b0000000000000,0x3ff0000000000000,1 +np.float64,0x3810000000000000,0x3ff0000000000000,1 +## underflow ## +np.float64,0xc0c6276800000000,0x0000000000000000,1 +np.float64,0xc0c62d918ce2421d,0x0000000000000000,1 +np.float64,0xc0c62d918ce2421e,0x0000000000000000,1 +np.float64,0xc0c62d91a0000000,0x0000000000000000,1 +np.float64,0xc0c62d9180000000,0x0000000000000000,1 +np.float64,0xc0c62dea45ee3e06,0x0000000000000000,1 +np.float64,0xc0c62dea45ee3e07,0x0000000000000000,1 +np.float64,0xc0c62dea40000000,0x0000000000000000,1 +np.float64,0xc0c62dea60000000,0x0000000000000000,1 +np.float64,0xc0875f1120000000,0x0000000000000000,1 +np.float64,0xc0875f113c30b1c8,0x0000000000000000,1 +np.float64,0xc0875f1140000000,0x0000000000000000,1 +np.float64,0xc093480000000000,0x0000000000000000,1 +np.float64,0xffefffffffffffff,0x0000000000000000,1 +np.float64,0xc7efffffe0000000,0x0000000000000000,1 +## overflow ## +np.float64,0x40862e52fefa39ef,0x7ff0000000000000,1 +np.float64,0x40872e42fefa39ef,0x7ff0000000000000,1 +## +/- INF, +/- NAN ## +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xfff0000000000000,0x0000000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xfff8000000000000,0xfff8000000000000,1 +## output denormal ## +np.float64,0xc087438520000000,0x0000000000000001,1 +np.float64,0xc08743853f2f4461,0x0000000000000001,1 +np.float64,0xc08743853f2f4460,0x0000000000000001,1 +np.float64,0xc087438540000000,0x0000000000000001,1 +## between -745.13321910 and 709.78271289 ## +np.float64,0xbff760cd14774bd9,0x3fcdb14ced00ceb6,1 +np.float64,0xbff760cd20000000,0x3fcdb14cd7993879,1 +np.float64,0xbff760cd00000000,0x3fcdb14d12fbd264,1 +np.float64,0xc07f1cf360000000,0x130c1b369af14fda,1 +np.float64,0xbeb0000000000000,0x3feffffe00001000,1 +np.float64,0xbd70000000000000,0x3fefffffffffe000,1 +np.float64,0xc084fd46e5c84952,0x0360000000000139,1 +np.float64,0xc084fd46e5c84953,0x035ffffffffffe71,1 +np.float64,0xc084fd46e0000000,0x0360000b9096d32c,1 +np.float64,0xc084fd4700000000,0x035fff9721d12104,1 +np.float64,0xc086232bc0000000,0x0010003af5e64635,1 +np.float64,0xc086232bdd7abcd2,0x001000000000007c,1 +np.float64,0xc086232bdd7abcd3,0x000ffffffffffe7c,1 +np.float64,0xc086232be0000000,0x000ffffaf57a6fc9,1 +np.float64,0xc086233920000000,0x000fe590e3b45eb0,1 +np.float64,0xc086233938000000,0x000fe56133493c57,1 +np.float64,0xc086233940000000,0x000fe5514deffbbc,1 +np.float64,0xc086234c98000000,0x000fbf1024c32ccb,1 +np.float64,0xc086234ca0000000,0x000fbf0065bae78d,1 +np.float64,0xc086234c80000000,0x000fbf3f623a7724,1 +np.float64,0xc086234ec0000000,0x000fbad237c846f9,1 +np.float64,0xc086234ec8000000,0x000fbac27cfdec97,1 +np.float64,0xc086234ee0000000,0x000fba934cfd3dc2,1 +np.float64,0xc086234ef0000000,0x000fba73d7f618d9,1 +np.float64,0xc086234f00000000,0x000fba54632dddc0,1 +np.float64,0xc0862356e0000000,0x000faae0945b761a,1 +np.float64,0xc0862356f0000000,0x000faac13eb9a310,1 +np.float64,0xc086235700000000,0x000faaa1e9567b0a,1 +np.float64,0xc086236020000000,0x000f98cd75c11ed7,1 +np.float64,0xc086236ca0000000,0x000f8081b4d93f89,1 +np.float64,0xc086236cb0000000,0x000f8062b3f4d6c5,1 +np.float64,0xc086236cc0000000,0x000f8043b34e6f8c,1 +np.float64,0xc086238d98000000,0x000f41220d9b0d2c,1 +np.float64,0xc086238da0000000,0x000f4112cc80a01f,1 +np.float64,0xc086238d80000000,0x000f414fd145db5b,1 +np.float64,0xc08624fd00000000,0x000cbfce8ea1e6c4,1 +np.float64,0xc086256080000000,0x000c250747fcd46e,1 +np.float64,0xc08626c480000000,0x000a34f4bd975193,1 +np.float64,0xbf50000000000000,0x3feff800ffeaac00,1 +np.float64,0xbe10000000000000,0x3fefffffff800000,1 +np.float64,0xbcd0000000000000,0x3feffffffffffff8,1 +np.float64,0xc055d589e0000000,0x38100004bf94f63e,1 +np.float64,0xc055d58a00000000,0x380ffff97f292ce8,1 +np.float64,0xbfd962d900000000,0x3fe585a4b00110e1,1 +np.float64,0x3ff4bed280000000,0x400d411e7a58a303,1 +np.float64,0x3fff0b3620000000,0x401bd7737ffffcf3,1 +np.float64,0x3ff0000000000000,0x4005bf0a8b145769,1 +np.float64,0x3eb0000000000000,0x3ff0000100000800,1 +np.float64,0x3d70000000000000,0x3ff0000000001000,1 +np.float64,0x40862e42e0000000,0x7fefff841808287f,1 +np.float64,0x40862e42fefa39ef,0x7fefffffffffff2a,1 +np.float64,0x40862e0000000000,0x7feef85a11e73f2d,1 +np.float64,0x4000000000000000,0x401d8e64b8d4ddae,1 +np.float64,0x4009242920000000,0x40372a52c383a488,1 +np.float64,0x4049000000000000,0x44719103e4080b45,1 +np.float64,0x4008000000000000,0x403415e5bf6fb106,1 +np.float64,0x3f50000000000000,0x3ff00400800aab55,1 +np.float64,0x3e10000000000000,0x3ff0000000400000,1 +np.float64,0x3cd0000000000000,0x3ff0000000000004,1 +np.float64,0x40562e40a0000000,0x47effed088821c3f,1 +np.float64,0x40562e42e0000000,0x47effff082e6c7ff,1 +np.float64,0x40562e4300000000,0x47f00000417184b8,1 +np.float64,0x3fe8000000000000,0x4000ef9db467dcf8,1 +np.float64,0x402b12e8d4f33589,0x412718f68c71a6fe,1 +np.float64,0x402b12e8d4f3358a,0x412718f68c71a70a,1 +np.float64,0x402b12e8c0000000,0x412718f59a7f472e,1 +np.float64,0x402b12e8e0000000,0x412718f70c0eac62,1 +##use 1th entry +np.float64,0x40631659AE147CB4,0x4db3a95025a4890f,1 +np.float64,0xC061B87D2E85A4E2,0x332640c8e2de2c51,1 +np.float64,0x405A4A50BE243AF4,0x496a45e4b7f0339a,1 +np.float64,0xC0839898B98EC5C6,0x0764027828830df4,1 +#use 2th entry +np.float64,0xC072428C44B6537C,0x2596ade838b96f3e,1 +np.float64,0xC053057C5E1AE9BF,0x3912c8fad18fdadf,1 +np.float64,0x407E89C78328BAA3,0x6bfe35d5b9a1a194,1 +np.float64,0x4083501B6DD87112,0x77a855503a38924e,1 +#use 3th entry +np.float64,0x40832C6195F24540,0x7741e73c80e5eb2f,1 +np.float64,0xC083D4CD557C2EC9,0x06b61727c2d2508e,1 +np.float64,0x400C48F5F67C99BD,0x404128820f02b92e,1 +np.float64,0x4056E36D9B2DF26A,0x4830f52ff34a8242,1 +#use 4th entry +np.float64,0x4080FF700D8CBD06,0x70fa70df9bc30f20,1 +np.float64,0x406C276D39E53328,0x543eb8e20a8f4741,1 +np.float64,0xC070D6159BBD8716,0x27a4a0548c904a75,1 +np.float64,0xC052EBCF8ED61F83,0x391c0e92368d15e4,1 +#use 5th entry +np.float64,0xC061F892A8AC5FBE,0x32f807a89efd3869,1 +np.float64,0x4021D885D2DBA085,0x40bd4dc86d3e3270,1 +np.float64,0x40767AEEEE7D4FCF,0x605e22851ee2afb7,1 +np.float64,0xC0757C5D75D08C80,0x20f0751599b992a2,1 +#use 6th entry +np.float64,0x405ACF7A284C4CE3,0x499a4e0b7a27027c,1 +np.float64,0xC085A6C9E80D7AF5,0x0175914009d62ec2,1 +np.float64,0xC07E4C02F86F1DAE,0x1439269b29a9231e,1 +np.float64,0x4080D80F9691CC87,0x7088a6cdafb041de,1 +#use 7th entry +np.float64,0x407FDFD84FBA0AC1,0x6deb1ae6f9bc4767,1 +np.float64,0x40630C06A1A2213D,0x4dac7a9d51a838b7,1 +np.float64,0x40685FDB30BB8B4F,0x5183f5cc2cac9e79,1 +np.float64,0x408045A2208F77F4,0x6ee299e08e2aa2f0,1 +#use 8th entry +np.float64,0xC08104E391F5078B,0x0ed397b7cbfbd230,1 +np.float64,0xC031501CAEFAE395,0x3e6040fd1ea35085,1 +np.float64,0xC079229124F6247C,0x1babf4f923306b1e,1 +np.float64,0x407FB65F44600435,0x6db03beaf2512b8a,1 +#use 9th entry +np.float64,0xC07EDEE8E8E8A5AC,0x136536cec9cbef48,1 +np.float64,0x4072BB4086099A14,0x5af4d3c3008b56cc,1 +np.float64,0x4050442A2EC42CB4,0x45cd393bd8fad357,1 +np.float64,0xC06AC28FB3D419B4,0x2ca1b9d3437df85f,1 +#use 10th entry +np.float64,0x40567FC6F0A68076,0x480c977fd5f3122e,1 +np.float64,0x40620A2F7EDA59BB,0x4cf278e96f4ce4d7,1 +np.float64,0xC085044707CD557C,0x034aad6c968a045a,1 +np.float64,0xC07374EA5AC516AA,0x23dd6afdc03e83d5,1 +#use 11th entry +np.float64,0x4073CC95332619C1,0x5c804b1498bbaa54,1 +np.float64,0xC0799FEBBE257F31,0x1af6a954c43b87d2,1 +np.float64,0x408159F19EA424F6,0x7200858efcbfc84d,1 +np.float64,0x404A81F6F24C0792,0x44b664a07ce5bbfa,1 +#use 12th entry +np.float64,0x40295FF1EFB9A741,0x4113c0e74c52d7b0,1 +np.float64,0x4073975F4CC411DA,0x5c32be40b4fec2c1,1 +np.float64,0x406E9DE52E82A77E,0x56049c9a3f1ae089,1 +np.float64,0x40748C2F52560ED9,0x5d93bc14fd4cd23b,1 +#use 13th entry +np.float64,0x4062A553CDC4D04C,0x4d6266bfde301318,1 +np.float64,0xC079EC1D63598AB7,0x1a88cb184dab224c,1 +np.float64,0xC0725C1CB3167427,0x25725b46f8a081f6,1 +np.float64,0x407888771D9B45F9,0x6353b1ec6bd7ce80,1 +#use 14th entry +np.float64,0xC082CBA03AA89807,0x09b383723831ce56,1 +np.float64,0xC083A8961BB67DD7,0x0735b118d5275552,1 +np.float64,0xC076BC6ECA12E7E3,0x1f2222679eaef615,1 +np.float64,0xC072752503AA1A5B,0x254eb832242c77e1,1 +#use 15th entry +np.float64,0xC058800792125DEC,0x371882372a0b48d4,1 +np.float64,0x4082909FD863E81C,0x7580d5f386920142,1 +np.float64,0xC071616F8FB534F9,0x26dbe20ef64a412b,1 +np.float64,0x406D1AB571CAA747,0x54ee0d55cb38ac20,1 +#use 16th entry +np.float64,0x406956428B7DAD09,0x52358682c271237f,1 +np.float64,0xC07EFC2D9D17B621,0x133b3e77c27a4d45,1 +np.float64,0xC08469BAC5BA3CCA,0x050863e5f42cc52f,1 +np.float64,0x407189D9626386A5,0x593cb1c0b3b5c1d3,1 +#use 17th entry +np.float64,0x4077E652E3DEB8C6,0x6269a10dcbd3c752,1 +np.float64,0x407674C97DB06878,0x605485dcc2426ec2,1 +np.float64,0xC07CE9969CF4268D,0x16386cf8996669f2,1 +np.float64,0x40780EE32D5847C4,0x62a436bd1abe108d,1 +#use 18th entry +np.float64,0x4076C3AA5E1E8DA1,0x60c62f56a5e72e24,1 +np.float64,0xC0730AFC7239B9BE,0x24758ead095cec1e,1 +np.float64,0xC085CC2B9C420DDB,0x0109cdaa2e5694c1,1 +np.float64,0x406D0765CB6D7AA4,0x54e06f8dd91bd945,1 +#use 19th entry +np.float64,0xC082D011F3B495E7,0x09a6647661d279c2,1 +np.float64,0xC072826AF8F6AFBC,0x253acd3cd224507e,1 +np.float64,0x404EB9C4810CEA09,0x457933dbf07e8133,1 +np.float64,0x408284FBC97C58CE,0x755f6eb234aa4b98,1 +#use 20th entry +np.float64,0x40856008CF6EDC63,0x7d9c0b3c03f4f73c,1 +np.float64,0xC077CB2E9F013B17,0x1d9b3d3a166a55db,1 +np.float64,0xC0479CA3C20AD057,0x3bad40e081555b99,1 +np.float64,0x40844CD31107332A,0x7a821d70aea478e2,1 +#use 21th entry +np.float64,0xC07C8FCC0BFCC844,0x16ba1cc8c539d19b,1 +np.float64,0xC085C4E9A3ABA488,0x011ff675ba1a2217,1 +np.float64,0x4074D538B32966E5,0x5dfd9d78043c6ad9,1 +np.float64,0xC0630CA16902AD46,0x3231a446074cede6,1 +#use 22th entry +np.float64,0xC06C826733D7D0B7,0x2b5f1078314d41e1,1 +np.float64,0xC0520DF55B2B907F,0x396c13a6ce8e833e,1 +np.float64,0xC080712072B0F437,0x107eae02d11d98ea,1 +np.float64,0x40528A6150E19EFB,0x469fdabda02228c5,1 +#use 23th entry +np.float64,0xC07B1D74B6586451,0x18d1253883ae3b48,1 +np.float64,0x4045AFD7867DAEC0,0x43d7d634fc4c5d98,1 +np.float64,0xC07A08B91F9ED3E2,0x1a60973e6397fc37,1 +np.float64,0x407B3ECF0AE21C8C,0x673e03e9d98d7235,1 +#use 24th entry +np.float64,0xC078AEB6F30CEABF,0x1c530b93ab54a1b3,1 +np.float64,0x4084495006A41672,0x7a775b6dc7e63064,1 +np.float64,0x40830B1C0EBF95DD,0x76e1e6eed77cfb89,1 +np.float64,0x407D93E8F33D8470,0x6a9adbc9e1e4f1e5,1 +#use 25th entry +np.float64,0x4066B11A09EFD9E8,0x504dd528065c28a7,1 +np.float64,0x408545823723AEEB,0x7d504a9b1844f594,1 +np.float64,0xC068C711F2CA3362,0x2e104f3496ea118e,1 +np.float64,0x407F317FCC3CA873,0x6cf0732c9948ebf4,1 +#use 26th entry +np.float64,0x407AFB3EBA2ED50F,0x66dc28a129c868d5,1 +np.float64,0xC075377037708ADE,0x21531a329f3d793e,1 +np.float64,0xC07C30066A1F3246,0x174448baa16ded2b,1 +np.float64,0xC06689A75DE2ABD3,0x2fad70662fae230b,1 +#use 27th entry +np.float64,0x4081514E9FCCF1E0,0x71e673b9efd15f44,1 +np.float64,0xC0762C710AF68460,0x1ff1ed7d8947fe43,1 +np.float64,0xC0468102FF70D9C4,0x3be0c3a8ff3419a3,1 +np.float64,0xC07EA4CEEF02A83E,0x13b908f085102c61,1 +#use 28th entry +np.float64,0xC06290B04AE823C4,0x328a83da3c2e3351,1 +np.float64,0xC0770EB1D1C395FB,0x1eab281c1f1db5fe,1 +np.float64,0xC06F5D4D838A5BAE,0x29500ea32fb474ea,1 +np.float64,0x40723B3133B54C5D,0x5a3c82c7c3a2b848,1 +#use 29th entry +np.float64,0x4085E6454CE3B4AA,0x7f20319b9638d06a,1 +np.float64,0x408389F2A0585D4B,0x7850667c58aab3d0,1 +np.float64,0xC0382798F9C8AE69,0x3dc1c79fe8739d6d,1 +np.float64,0xC08299D827608418,0x0a4335f76cdbaeb5,1 +#use 30th entry +np.float64,0xC06F3DED43301BF1,0x2965670ae46750a8,1 +np.float64,0xC070CAF6BDD577D9,0x27b4aa4ffdd29981,1 +np.float64,0x4078529AD4B2D9F2,0x6305c12755d5e0a6,1 +np.float64,0xC055B14E75A31B96,0x381c2eda6d111e5d,1 +#use 31th entry +np.float64,0x407B13EE414FA931,0x6700772c7544564d,1 +np.float64,0x407EAFDE9DE3EC54,0x6c346a0e49724a3c,1 +np.float64,0xC08362F398B9530D,0x07ffeddbadf980cb,1 +np.float64,0x407E865CDD9EEB86,0x6bf866cac5e0d126,1 +#use 32th entry +np.float64,0x407FB62DBC794C86,0x6db009f708ac62cb,1 +np.float64,0xC063D0BAA68CDDDE,0x31a3b2a51ce50430,1 +np.float64,0xC05E7706A2231394,0x34f24bead6fab5c9,1 +np.float64,0x4083E3A06FDE444E,0x79527b7a386d1937,1 diff --git a/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-log.csv b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-log.csv new file mode 100644 index 0000000000000000000000000000000000000000..d4dafe86c4534e256860aab7dd2814e268c82321 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-log.csv @@ -0,0 +1,271 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0xc2afbc1b,4 +np.float32,0x007b2490,0xc2aec01e,4 +np.float32,0x007c99fa,0xc2aeba17,4 +np.float32,0x00734a0c,0xc2aee1dc,4 +np.float32,0x0070de24,0xc2aeecba,4 +np.float32,0x007fffff,0xc2aeac50,4 +np.float32,0x00000001,0xc2ce8ed0,4 +## -ve denormals ## +np.float32,0x80495d65,0xffc00000,4 +np.float32,0x806894f6,0xffc00000,4 +np.float32,0x80555a76,0xffc00000,4 +np.float32,0x804e1fb8,0xffc00000,4 +np.float32,0x80687de9,0xffc00000,4 +np.float32,0x807fffff,0xffc00000,4 +np.float32,0x80000001,0xffc00000,4 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0xff800000,4 +np.float32,0x80000000,0xff800000,4 +np.float32,0x7f7fffff,0x42b17218,4 +np.float32,0x80800000,0xffc00000,4 +np.float32,0xff7fffff,0xffc00000,4 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x00000000,4 +np.float32,0x3f800001,0x33ffffff,4 +np.float32,0x3f800002,0x347ffffe,4 +np.float32,0x3f7fffff,0xb3800000,4 +np.float32,0x3f7ffffe,0xb4000000,4 +np.float32,0x3f7ffffd,0xb4400001,4 +np.float32,0x402df853,0x3f7ffffe,4 +np.float32,0x402df854,0x3f7fffff,4 +np.float32,0x402df855,0x3f800000,4 +np.float32,0x402df856,0x3f800001,4 +np.float32,0x3ebc5ab0,0xbf800001,4 +np.float32,0x3ebc5ab1,0xbf800000,4 +np.float32,0x3ebc5ab2,0xbf800000,4 +np.float32,0x3ebc5ab3,0xbf7ffffe,4 +np.float32,0x423ef575,0x407768ab,4 +np.float32,0x427b8c61,0x408485dd,4 +np.float32,0x4211e9ee,0x406630b0,4 +np.float32,0x424d5c41,0x407c0fed,4 +np.float32,0x42be722a,0x4091cc91,4 +np.float32,0x42b73d30,0x4090908b,4 +np.float32,0x427e48e2,0x4084de7f,4 +np.float32,0x428f759b,0x4088bba3,4 +np.float32,0x41629069,0x4029a0cc,4 +np.float32,0x4272c99d,0x40836379,4 +np.float32,0x4d1b7458,0x4197463d,4 +np.float32,0x4f10c594,0x41ace2b2,4 +np.float32,0x4ea397c2,0x41a85171,4 +np.float32,0x4fefa9d1,0x41b6769c,4 +np.float32,0x4ebac6ab,0x41a960dc,4 +np.float32,0x4f6efb42,0x41b0e535,4 +np.float32,0x4e9ab8e7,0x41a7df44,4 +np.float32,0x4e81b5d1,0x41a67625,4 +np.float32,0x5014d9f2,0x41b832bd,4 +np.float32,0x4f02175c,0x41ac07b8,4 +np.float32,0x7f034f89,0x42b01c47,4 +np.float32,0x7f56d00e,0x42b11849,4 +np.float32,0x7f1cd5f6,0x42b0773a,4 +np.float32,0x7e979174,0x42af02d7,4 +np.float32,0x7f23369f,0x42b08ba2,4 +np.float32,0x7f0637ae,0x42b0277d,4 +np.float32,0x7efcb6e8,0x42b00897,4 +np.float32,0x7f7907c8,0x42b163f6,4 +np.float32,0x7e95c4c2,0x42aefcba,4 +np.float32,0x7f4577b2,0x42b0ed2d,4 +np.float32,0x3f49c92e,0xbe73ae84,4 +np.float32,0x3f4a23d1,0xbe71e2f8,4 +np.float32,0x3f4abb67,0xbe6ee430,4 +np.float32,0x3f48169a,0xbe7c5532,4 +np.float32,0x3f47f5fa,0xbe7cfc37,4 +np.float32,0x3f488309,0xbe7a2ad8,4 +np.float32,0x3f479df4,0xbe7ebf5f,4 +np.float32,0x3f47cfff,0xbe7dbec9,4 +np.float32,0x3f496704,0xbe75a125,4 +np.float32,0x3f478ee8,0xbe7f0c92,4 +np.float32,0x3f4a763b,0xbe7041ce,4 +np.float32,0x3f47a108,0xbe7eaf94,4 +np.float32,0x3f48136c,0xbe7c6578,4 +np.float32,0x3f481c17,0xbe7c391c,4 +np.float32,0x3f47cd28,0xbe7dcd56,4 +np.float32,0x3f478be8,0xbe7f1bf7,4 +np.float32,0x3f4c1f8e,0xbe67e367,4 +np.float32,0x3f489b0c,0xbe79b03f,4 +np.float32,0x3f4934cf,0xbe76a08a,4 +np.float32,0x3f4954df,0xbe75fd6a,4 +np.float32,0x3f47a3f5,0xbe7ea093,4 +np.float32,0x3f4ba4fc,0xbe6a4b02,4 +np.float32,0x3f47a0e1,0xbe7eb05c,4 +np.float32,0x3f48c30a,0xbe78e42f,4 +np.float32,0x3f48cab8,0xbe78bd05,4 +np.float32,0x3f4b0569,0xbe6d6ea4,4 +np.float32,0x3f47de32,0xbe7d7607,4 +np.float32,0x3f477328,0xbe7f9b00,4 +np.float32,0x3f496dab,0xbe757f52,4 +np.float32,0x3f47662c,0xbe7fddac,4 +np.float32,0x3f48ddd8,0xbe785b80,4 +np.float32,0x3f481866,0xbe7c4bff,4 +np.float32,0x3f48b119,0xbe793fb6,4 +np.float32,0x3f48c7e8,0xbe78cb5c,4 +np.float32,0x3f4985f6,0xbe7503da,4 +np.float32,0x3f483fdf,0xbe7b8212,4 +np.float32,0x3f4b1c76,0xbe6cfa67,4 +np.float32,0x3f480b2e,0xbe7c8fa8,4 +np.float32,0x3f48745f,0xbe7a75bf,4 +np.float32,0x3f485bda,0xbe7af308,4 +np.float32,0x3f47a660,0xbe7e942c,4 +np.float32,0x3f47d4d5,0xbe7da600,4 +np.float32,0x3f4b0a26,0xbe6d56be,4 +np.float32,0x3f4a4883,0xbe712924,4 +np.float32,0x3f4769e7,0xbe7fca84,4 +np.float32,0x3f499702,0xbe74ad3f,4 +np.float32,0x3f494ab1,0xbe763131,4 +np.float32,0x3f476b69,0xbe7fc2c6,4 +np.float32,0x3f4884e8,0xbe7a214a,4 +np.float32,0x3f486945,0xbe7aae76,4 +#float64 +## +ve denormal ## +np.float64,0x0000000000000001,0xc0874385446d71c3,1 +np.float64,0x0001000000000000,0xc086395a2079b70c,1 +np.float64,0x000fffffffffffff,0xc086232bdd7abcd2,1 +np.float64,0x0007ad63e2168cb6,0xc086290bc0b2980f,1 +## -ve denormal ## +np.float64,0x8000000000000001,0xfff8000000000001,1 +np.float64,0x8001000000000000,0xfff8000000000001,1 +np.float64,0x800fffffffffffff,0xfff8000000000001,1 +np.float64,0x8007ad63e2168cb6,0xfff8000000000001,1 +## +/-0.0f, MAX, MIN## +np.float64,0x0000000000000000,0xfff0000000000000,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,1 +np.float64,0xffefffffffffffff,0xfff8000000000001,1 +## near 1.0f ## +np.float64,0x3ff0000000000000,0x0000000000000000,1 +np.float64,0x3fe8000000000000,0xbfd269621134db92,1 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,1 +np.float64,0x3ff0000020000000,0x3e7fffffe000002b,1 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,1 +np.float64,0x3fefffffe0000000,0xbe70000008000005,1 +np.float64,0x3fefffffffffffff,0xbca0000000000000,1 +## random numbers ## +np.float64,0x02500186f3d9da56,0xc0855b8abf135773,1 +np.float64,0x09200815a3951173,0xc082ff1ad7131bdc,1 +np.float64,0x0da029623b0243d4,0xc0816fc994695bb5,1 +np.float64,0x48703b8ac483a382,0x40579213a313490b,1 +np.float64,0x09207b74c87c9860,0xc082fee20ff349ef,1 +np.float64,0x62c077698e8df947,0x407821c996d110f0,1 +np.float64,0x2350b45e87c3cfb0,0xc073d6b16b51d072,1 +np.float64,0x3990a23f9ff2b623,0xc051aa60eadd8c61,1 +np.float64,0x0d011386a116c348,0xc081a6cc7ea3b8fb,1 +np.float64,0x1fe0f0303ebe273a,0xc0763870b78a81ca,1 +np.float64,0x0cd1260121d387da,0xc081b7668d61a9d1,1 +np.float64,0x1e6135a8f581d422,0xc077425ac10f08c2,1 +np.float64,0x622168db5fe52d30,0x4077b3c669b9fadb,1 +np.float64,0x69f188e1ec6d1718,0x407d1e2f18c63889,1 +np.float64,0x3aa1bf1d9c4dd1a3,0xc04d682e24bde479,1 +np.float64,0x6c81c4011ce4f683,0x407ee5190e8a8e6a,1 +np.float64,0x2191fa55aa5a5095,0xc0750c0c318b5e2d,1 +np.float64,0x32a1f602a32bf360,0xc06270caa493fc17,1 +np.float64,0x16023c90ba93249b,0xc07d0f88e0801638,1 +np.float64,0x1c525fe6d71fa9ff,0xc078af49c66a5d63,1 +np.float64,0x1a927675815d65b7,0xc079e5bdd7fe376e,1 +np.float64,0x41227b8fe70da028,0x402aa0c9f9a84c71,1 +np.float64,0x4962bb6e853fe87d,0x405a34aa04c83747,1 +np.float64,0x23d2cda00b26b5a4,0xc0737c13a06d00ea,1 +np.float64,0x2d13083fd62987fa,0xc06a25055aeb474e,1 +np.float64,0x10e31e4c9b4579a1,0xc0804e181929418e,1 +np.float64,0x26d3247d556a86a9,0xc0716774171da7e8,1 +np.float64,0x6603379398d0d4ac,0x407a64f51f8a887b,1 +np.float64,0x02d38af17d9442ba,0xc0852d955ac9dd68,1 +np.float64,0x6a2382b4818dd967,0x407d4129d688e5d4,1 +np.float64,0x2ee3c403c79b3934,0xc067a091fefaf8b6,1 +np.float64,0x6493a699acdbf1a4,0x4079663c8602bfc5,1 +np.float64,0x1c8413c4f0de3100,0xc0788c99697059b6,1 +np.float64,0x4573f1ed350d9622,0x404e9bd1e4c08920,1 +np.float64,0x2f34265c9200b69c,0xc067310cfea4e986,1 +np.float64,0x19b43e65fa22029b,0xc07a7f8877de22d6,1 +np.float64,0x0af48ab7925ed6bc,0xc0825c4fbc0e5ade,1 +np.float64,0x4fa49699cad82542,0x4065c76d2a318235,1 +np.float64,0x7204a15e56ade492,0x40815bb87484dffb,1 +np.float64,0x4734aa08a230982d,0x40542a4bf7a361a9,1 +np.float64,0x1ae4ed296c2fd749,0xc079ac4921f20abb,1 +np.float64,0x472514ea4370289c,0x4053ff372bd8f18f,1 +np.float64,0x53a54b3f73820430,0x406b5411fc5f2e33,1 +np.float64,0x64754de5a15684fa,0x407951592e99a5ab,1 +np.float64,0x69358e279868a7c3,0x407c9c671a882c31,1 +np.float64,0x284579ec61215945,0xc0706688e55f0927,1 +np.float64,0x68b5c58806447adc,0x407c43d6f4eff760,1 +np.float64,0x1945a83f98b0e65d,0xc07acc15eeb032cc,1 +np.float64,0x0fc5eb98a16578bf,0xc080b0d02eddca0e,1 +np.float64,0x6a75e208f5784250,0x407d7a7383bf8f05,1 +np.float64,0x0fe63a029c47645d,0xc080a59ca1e98866,1 +np.float64,0x37963ac53f065510,0xc057236281f7bdb6,1 +np.float64,0x135661bb07067ff7,0xc07ee924930c21e4,1 +np.float64,0x4b4699469d458422,0x405f73843756e887,1 +np.float64,0x1a66d73e4bf4881b,0xc07a039ba1c63adf,1 +np.float64,0x12a6b9b119a7da59,0xc07f62e49c6431f3,1 +np.float64,0x24c719aa8fd1bdb5,0xc072d26da4bf84d3,1 +np.float64,0x0fa6ff524ffef314,0xc080bb8514662e77,1 +np.float64,0x1db751d66fdd4a9a,0xc077b77cb50d7c92,1 +np.float64,0x4947374c516da82c,0x4059e9acfc7105bf,1 +np.float64,0x1b1771ab98f3afc8,0xc07989326b8e1f66,1 +np.float64,0x25e78805baac8070,0xc0720a818e6ef080,1 +np.float64,0x4bd7a148225d3687,0x406082d004ea3ee7,1 +np.float64,0x53d7d6b2bbbda00a,0x406b9a398967cbd5,1 +np.float64,0x6997fb9f4e1c685f,0x407ce0a703413eba,1 +np.float64,0x069802c2ff71b951,0xc083df39bf7acddc,1 +np.float64,0x4d683ac9890f66d8,0x4062ae21d8c2acf0,1 +np.float64,0x5a2825863ec14f4c,0x40722d718d549552,1 +np.float64,0x0398799a88f4db80,0xc084e93dab8e2158,1 +np.float64,0x5ed87a8b77e135a5,0x40756d7051777b33,1 +np.float64,0x5828cd6d79b9bede,0x4070cafb22fc6ca1,1 +np.float64,0x7b18ba2a5ec6f068,0x408481386b3ed6fe,1 +np.float64,0x4938fd60922198fe,0x4059c206b762ea7e,1 +np.float64,0x31b8f44fcdd1a46e,0xc063b2faa8b6434e,1 +np.float64,0x5729341c0d918464,0x407019cac0c4a7d7,1 +np.float64,0x13595e9228ee878e,0xc07ee7235a7d8088,1 +np.float64,0x17698b0dc9dd4135,0xc07c1627e3a5ad5f,1 +np.float64,0x63b977c283abb0cc,0x4078cf1ec6ed65be,1 +np.float64,0x7349cc0d4dc16943,0x4081cc697ce4cb53,1 +np.float64,0x4e49a80b732fb28d,0x4063e67e3c5cbe90,1 +np.float64,0x07ba14b848a8ae02,0xc0837ac032a094e0,1 +np.float64,0x3da9f17b691bfddc,0xc03929c25366acda,1 +np.float64,0x02ea39aa6c3ac007,0xc08525af6f21e1c4,1 +np.float64,0x3a6a42f04ed9563d,0xc04e98e825dca46b,1 +np.float64,0x1afa877cd7900be7,0xc0799d6648cb34a9,1 +np.float64,0x58ea986649e052c6,0x4071512e939ad790,1 +np.float64,0x691abbc04647f536,0x407c89aaae0fcb83,1 +np.float64,0x43aabc5063e6f284,0x4044b45d18106fd2,1 +np.float64,0x488b003c893e0bea,0x4057df012a2dafbe,1 +np.float64,0x77eb076ed67caee5,0x40836720de94769e,1 +np.float64,0x5c1b46974aba46f4,0x40738731ba256007,1 +np.float64,0x1a5b29ecb5d3c261,0xc07a0becc77040d6,1 +np.float64,0x5d8b6ccf868c6032,0x4074865c1865e2db,1 +np.float64,0x4cfb6690b4aaf5af,0x406216cd8c7e8ddb,1 +np.float64,0x76cbd8eb5c5fc39e,0x4083038dc66d682b,1 +np.float64,0x28bbd1fec5012814,0xc07014c2dd1b9711,1 +np.float64,0x33dc1b3a4fd6bf7a,0xc060bd0756e07d8a,1 +np.float64,0x52bbe89b37de99f3,0x406a10041aa7d343,1 +np.float64,0x07bc479d15eb2dd3,0xc0837a1a6e3a3b61,1 +np.float64,0x18fc5275711a901d,0xc07aff3e9d62bc93,1 +np.float64,0x114c9758e247dc71,0xc080299a7cf15b05,1 +np.float64,0x25ac8f6d60755148,0xc07233c4c0c511d4,1 +np.float64,0x260cae2bb9e9fd7e,0xc071f128c7e82eac,1 +np.float64,0x572ccdfe0241de82,0x40701bedc84bb504,1 +np.float64,0x0ddcef6c8d41f5ee,0xc0815a7e16d07084,1 +np.float64,0x6dad1d59c988af68,0x407fb4a0bc0142b1,1 +np.float64,0x025d200580d8b6d1,0xc08556c0bc32b1b2,1 +np.float64,0x7aad344b6aa74c18,0x40845bbc453f22be,1 +np.float64,0x5b5d9d6ad9d14429,0x4073036d2d21f382,1 +np.float64,0x49cd8d8dcdf19954,0x405b5c034f5c7353,1 +np.float64,0x63edb9483335c1e6,0x4078f2dd21378786,1 +np.float64,0x7b1dd64c9d2c26bd,0x408482b922017bc9,1 +np.float64,0x782e13e0b574be5f,0x40837e2a0090a5ad,1 +np.float64,0x592dfe18b9d6db2f,0x40717f777fbcb1ec,1 +np.float64,0x654e3232ac60d72c,0x4079e71a95a70446,1 +np.float64,0x7b8e42ad22091456,0x4084a9a6f1e61722,1 +np.float64,0x570e88dfd5860ae6,0x407006ae6c0d137a,1 +np.float64,0x294e98346cb98ef1,0xc06f5edaac12bd44,1 +np.float64,0x1adeaa4ab792e642,0xc079b1431d5e2633,1 +np.float64,0x7b6ead3377529ac8,0x40849eabc8c7683c,1 +np.float64,0x2b8eedae8a9b2928,0xc06c400054deef11,1 +np.float64,0x65defb45b2dcf660,0x407a4b53f181c05a,1 +np.float64,0x1baf582d475e7701,0xc07920bcad4a502c,1 +np.float64,0x461f39cf05a0f15a,0x405126368f984fa1,1 +np.float64,0x7e5f6f5dcfff005b,0x4085a37d610439b4,1 +np.float64,0x136f66e4d09bd662,0xc07ed8a2719f2511,1 +np.float64,0x65afd8983fb6ca1f,0x407a2a7f48bf7fc1,1 +np.float64,0x572fa7f95ed22319,0x40701d706cf82e6f,1 diff --git a/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv new file mode 100644 index 0000000000000000000000000000000000000000..3b0a7a15def155f00047914bbb51a398d8fea309 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv @@ -0,0 +1,660 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x004b4716,2 +np.float32,0x007b2490,0x007b2490,2 +np.float32,0x007c99fa,0x007c99fa,2 +np.float32,0x00734a0c,0x00734a0c,2 +np.float32,0x0070de24,0x0070de24,2 +np.float32,0x007fffff,0x007fffff,2 +np.float32,0x00000001,0x00000001,2 +## -ve denormals ## +np.float32,0x80495d65,0x80495d65,2 +np.float32,0x806894f6,0x806894f6,2 +np.float32,0x80555a76,0x80555a76,2 +np.float32,0x804e1fb8,0x804e1fb8,2 +np.float32,0x80687de9,0x80687de9,2 +np.float32,0x807fffff,0x807fffff,2 +np.float32,0x80000001,0x80000001,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x00000000,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x00800000,0x00800000,2 +np.float32,0x80800000,0x80800000,2 +## 1.00f ## +np.float32,0x3f800000,0x3f576aa4,2 +np.float32,0x3f800001,0x3f576aa6,2 +np.float32,0x3f800002,0x3f576aa7,2 +np.float32,0xc090a8b0,0x3f7b4e48,2 +np.float32,0x41ce3184,0x3f192d43,2 +np.float32,0xc1d85848,0xbf7161cb,2 +np.float32,0x402b8820,0x3ee3f29f,2 +np.float32,0x42b4e454,0x3f1d0151,2 +np.float32,0x42a67a60,0x3f7ffa4c,2 +np.float32,0x41d92388,0x3f67beef,2 +np.float32,0x422dd66c,0xbeffb0c1,2 +np.float32,0xc28f5be6,0xbf0bae79,2 +np.float32,0x41ab2674,0x3f0ffe2b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0xbf3504f3,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x4016cbe4,0x3f3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x407b53d2,0xbf3504f5,2 +np.float32,0xc07b53d2,0x3f3504f5,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x40afede0,0xbf3504ef,2 +np.float32,0xc0afede0,0x3f3504ef,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0xbf3504f3,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x410a3ae7,0x3f3504eb,2 +np.float32,0xc10a3ae7,0xbf3504eb,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x41235ce2,0xbf3504f7,2 +np.float32,0xc1235ce2,0x3f3504f7,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x413c7edd,0xbf3504f3,2 +np.float32,0xc13c7edd,0x3f3504f3,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x4155a0d9,0x3f3504fb,2 +np.float32,0xc155a0d9,0xbf3504fb,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x416ec2d4,0x3f3504ef,2 +np.float32,0xc16ec2d4,0xbf3504ef,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x4183f268,0xbf3504ff,2 +np.float32,0xc183f268,0x3f3504ff,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x41908365,0xbf3504f6,2 +np.float32,0xc1908365,0x3f3504f6,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x419d1463,0x3f3504f8,2 +np.float32,0xc19d1463,0xbf3504f8,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x41a9a561,0x3f3504e7,2 +np.float32,0xc1a9a561,0xbf3504e7,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x41b6365e,0xbf3504f0,2 +np.float32,0xc1b6365e,0x3f3504f0,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x41c2c75c,0xbf3504ef,2 +np.float32,0xc1c2c75c,0x3f3504ef,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x41cf585a,0x3f3504ff,2 +np.float32,0xc1cf585a,0xbf3504ff,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x41dbe958,0x3f3504e0,2 +np.float32,0xc1dbe958,0xbf3504e0,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x41e87a55,0xbf3504f8,2 +np.float32,0xc1e87a55,0x3f3504f8,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x41f50b53,0xbf3504e7,2 +np.float32,0xc1f50b53,0x3f3504e7,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x4200ce28,0x3f3504f0,2 +np.float32,0xc200ce28,0xbf3504f0,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x420716a7,0x3f3504ee,2 +np.float32,0xc20716a7,0xbf3504ee,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x420d5f26,0xbf350500,2 +np.float32,0xc20d5f26,0x3f350500,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x4213a7a5,0xbf3504df,2 +np.float32,0xc213a7a5,0x3f3504df,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4219f024,0x3f35050f,2 +np.float32,0xc219f024,0xbf35050f,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x431d1463,0x36455799,2 +np.float32,0xc31d1463,0xb6455799,2 +np.float32,0x422038a3,0x3f3504d0,2 +np.float32,0xc22038a3,0xbf3504d0,2 +np.float32,0x42a038a3,0xbf800000,2 +np.float32,0xc2a038a3,0x3f800000,2 +np.float32,0x432038a3,0xb746cd61,2 +np.float32,0xc32038a3,0x3746cd61,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x43235ce2,0x36b889b6,2 +np.float32,0xc3235ce2,0xb6b889b6,2 +np.float32,0x42268121,0xbf3504f1,2 +np.float32,0xc2268121,0x3f3504f1,2 +np.float32,0x42a68121,0x3f800000,2 +np.float32,0xc2a68121,0xbf800000,2 +np.float32,0x43268121,0x35643aac,2 +np.float32,0xc3268121,0xb5643aac,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x4329a561,0x370733d0,2 +np.float32,0xc329a561,0xb70733d0,2 +np.float32,0x422cc9a0,0xbf3504ee,2 +np.float32,0xc22cc9a0,0x3f3504ee,2 +np.float32,0x42acc9a0,0xbf800000,2 +np.float32,0xc2acc9a0,0x3f800000,2 +np.float32,0x432cc9a0,0xb5e55a50,2 +np.float32,0xc32cc9a0,0x35e55a50,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x432fede0,0x373222c4,2 +np.float32,0xc32fede0,0xb73222c4,2 +np.float32,0x4233121f,0x3f350500,2 +np.float32,0xc233121f,0xbf350500,2 +np.float32,0x42b3121f,0x3f800000,2 +np.float32,0xc2b3121f,0xbf800000,2 +np.float32,0x4333121f,0xb68f347d,2 +np.float32,0xc333121f,0x368f347d,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x4336365e,0xb60bb91c,2 +np.float32,0xc336365e,0x360bb91c,2 +np.float32,0x42395a9e,0x3f3504df,2 +np.float32,0xc2395a9e,0xbf3504df,2 +np.float32,0x42b95a9e,0xbf800000,2 +np.float32,0xc2b95a9e,0x3f800000,2 +np.float32,0x43395a9e,0xb6e51267,2 +np.float32,0xc3395a9e,0x36e51267,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x433c7edd,0x35000add,2 +np.float32,0xc33c7edd,0xb5000add,2 +np.float32,0x423fa31d,0xbf35050f,2 +np.float32,0xc23fa31d,0x3f35050f,2 +np.float32,0x42bfa31d,0x3f800000,2 +np.float32,0xc2bfa31d,0xbf800000,2 +np.float32,0x433fa31d,0xb71d7828,2 +np.float32,0xc33fa31d,0x371d7828,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x4342c75c,0x364bbe8a,2 +np.float32,0xc342c75c,0xb64bbe8a,2 +np.float32,0x4245eb9c,0xbf3504d0,2 +np.float32,0xc245eb9c,0x3f3504d0,2 +np.float32,0x42c5eb9c,0xbf800000,2 +np.float32,0xc2c5eb9c,0x3f800000,2 +np.float32,0x4345eb9c,0xb748671d,2 +np.float32,0xc345eb9c,0x3748671d,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x43490fdb,0x36bbbd2e,2 +np.float32,0xc3490fdb,0xb6bbbd2e,2 +np.float32,0x424c341a,0x3f3504f1,2 +np.float32,0xc24c341a,0xbf3504f1,2 +np.float32,0x42cc341a,0x3f800000,2 +np.float32,0xc2cc341a,0xbf800000,2 +np.float32,0x434c341a,0x354a9ee6,2 +np.float32,0xc34c341a,0xb54a9ee6,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x434f585a,0x3708cd8c,2 +np.float32,0xc34f585a,0xb708cd8c,2 +np.float32,0x42527c99,0x3f3504ee,2 +np.float32,0xc2527c99,0xbf3504ee,2 +np.float32,0x42d27c99,0xbf800000,2 +np.float32,0xc2d27c99,0x3f800000,2 +np.float32,0x43527c99,0xb5f22833,2 +np.float32,0xc3527c99,0x35f22833,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x4355a0d9,0x3733bc81,2 +np.float32,0xc355a0d9,0xb733bc81,2 +np.float32,0x4258c518,0xbf350500,2 +np.float32,0xc258c518,0x3f350500,2 +np.float32,0x42d8c518,0x3f800000,2 +np.float32,0xc2d8c518,0xbf800000,2 +np.float32,0x4358c518,0xb69267f6,2 +np.float32,0xc358c518,0x369267f6,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x435be958,0x375eab75,2 +np.float32,0xc35be958,0xb75eab75,2 +np.float32,0x425f0d97,0xbf3504df,2 +np.float32,0xc25f0d97,0x3f3504df,2 +np.float32,0x42df0d97,0xbf800000,2 +np.float32,0xc2df0d97,0x3f800000,2 +np.float32,0x435f0d97,0xb6e845e0,2 +np.float32,0xc35f0d97,0x36e845e0,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x436231d6,0x3519a6a2,2 +np.float32,0xc36231d6,0xb519a6a2,2 +np.float32,0x42655616,0x3f35050f,2 +np.float32,0xc2655616,0xbf35050f,2 +np.float32,0x42e55616,0x3f800000,2 +np.float32,0xc2e55616,0xbf800000,2 +np.float32,0x43655616,0xb71f11e5,2 +np.float32,0xc3655616,0x371f11e5,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x43687a55,0x3652257b,2 +np.float32,0xc3687a55,0xb652257b,2 +np.float32,0x426b9e95,0x3f3504cf,2 +np.float32,0xc26b9e95,0xbf3504cf,2 +np.float32,0x42eb9e95,0xbf800000,2 +np.float32,0xc2eb9e95,0x3f800000,2 +np.float32,0x436b9e95,0xb74a00d9,2 +np.float32,0xc36b9e95,0x374a00d9,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x436ec2d4,0x36bef0a7,2 +np.float32,0xc36ec2d4,0xb6bef0a7,2 +np.float32,0x4271e713,0xbf3504f1,2 +np.float32,0xc271e713,0x3f3504f1,2 +np.float32,0x42f1e713,0x3f800000,2 +np.float32,0xc2f1e713,0xbf800000,2 +np.float32,0x4371e713,0x35310321,2 +np.float32,0xc371e713,0xb5310321,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x43750b53,0x370a6748,2 +np.float32,0xc3750b53,0xb70a6748,2 +np.float32,0x42782f92,0xbf3504ee,2 +np.float32,0xc2782f92,0x3f3504ee,2 +np.float32,0x42f82f92,0xbf800000,2 +np.float32,0xc2f82f92,0x3f800000,2 +np.float32,0x43782f92,0xb5fef616,2 +np.float32,0xc3782f92,0x35fef616,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x437b53d2,0x3735563d,2 +np.float32,0xc37b53d2,0xb735563d,2 +np.float32,0x427e7811,0x3f350500,2 +np.float32,0xc27e7811,0xbf350500,2 +np.float32,0x42fe7811,0x3f800000,2 +np.float32,0xc2fe7811,0xbf800000,2 +np.float32,0x437e7811,0xb6959b6f,2 +np.float32,0xc37e7811,0x36959b6f,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4380ce28,0xb5fdd672,2 +np.float32,0xc380ce28,0x35fdd672,2 +np.float32,0x42826048,0x3f3504de,2 +np.float32,0xc2826048,0xbf3504de,2 +np.float32,0x43026048,0xbf800000,2 +np.float32,0xc3026048,0x3f800000,2 +np.float32,0x43826048,0xb6eb7958,2 +np.float32,0xc3826048,0x36eb7958,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x4383f268,0x37859a13,2 +np.float32,0xc383f268,0xb7859a13,2 +np.float32,0x42858487,0xbf3504e2,2 +np.float32,0xc2858487,0x3f3504e2,2 +np.float32,0x43058487,0x3f800000,2 +np.float32,0xc3058487,0xbf800000,2 +np.float32,0x43858487,0x36bea8be,2 +np.float32,0xc3858487,0xb6bea8be,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x438716a7,0x36588c6d,2 +np.float32,0xc38716a7,0xb6588c6d,2 +np.float32,0x4288a8c7,0xbf3504cf,2 +np.float32,0xc288a8c7,0x3f3504cf,2 +np.float32,0x4308a8c7,0xbf800000,2 +np.float32,0xc308a8c7,0x3f800000,2 +np.float32,0x4388a8c7,0xb74b9a96,2 +np.float32,0xc388a8c7,0x374b9a96,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x438a3ae7,0x37b08908,2 +np.float32,0xc38a3ae7,0xb7b08908,2 +np.float32,0x428bcd06,0x3f3504f2,2 +np.float32,0xc28bcd06,0xbf3504f2,2 +np.float32,0x430bcd06,0x3f800000,2 +np.float32,0xc30bcd06,0xbf800000,2 +np.float32,0x438bcd06,0x3517675b,2 +np.float32,0xc38bcd06,0xb517675b,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x438d5f26,0x370c0105,2 +np.float32,0xc38d5f26,0xb70c0105,2 +np.float32,0x428ef146,0x3f3504c0,2 +np.float32,0xc28ef146,0xbf3504c0,2 +np.float32,0x430ef146,0xbf800000,2 +np.float32,0xc30ef146,0x3f800000,2 +np.float32,0x438ef146,0xb790bc40,2 +np.float32,0xc38ef146,0x3790bc40,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x43908365,0xb692200d,2 +np.float32,0xc3908365,0x3692200d,2 +np.float32,0x42921585,0xbf350501,2 +np.float32,0xc2921585,0x3f350501,2 +np.float32,0x43121585,0x3f800000,2 +np.float32,0xc3121585,0xbf800000,2 +np.float32,0x43921585,0xb698cee8,2 +np.float32,0xc3921585,0x3698cee8,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4393a7a5,0x3761deee,2 +np.float32,0xc393a7a5,0xb761deee,2 +np.float32,0x429539c5,0xbf3504b1,2 +np.float32,0xc29539c5,0x3f3504b1,2 +np.float32,0x431539c5,0xbf800000,2 +np.float32,0xc31539c5,0x3f800000,2 +np.float32,0x439539c5,0xb7bbab34,2 +np.float32,0xc39539c5,0x37bbab34,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4396cbe4,0x354cde2e,2 +np.float32,0xc396cbe4,0xb54cde2e,2 +np.float32,0x42985e04,0x3f350510,2 +np.float32,0xc2985e04,0xbf350510,2 +np.float32,0x43185e04,0x3f800000,2 +np.float32,0xc3185e04,0xbf800000,2 +np.float32,0x43985e04,0xb722455d,2 +np.float32,0xc3985e04,0x3722455d,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x4399f024,0x379bde6c,2 +np.float32,0xc399f024,0xb79bde6c,2 +np.float32,0x429b8243,0x3f3504fc,2 +np.float32,0xc29b8243,0xbf3504fc,2 +np.float32,0x431b8243,0xbf800000,2 +np.float32,0xc31b8243,0x3f800000,2 +np.float32,0x439b8243,0x364b2eb8,2 +np.float32,0xc39b8243,0xb64b2eb8,2 +np.float32,0x435b2047,0xbf350525,2 +np.float32,0x42a038a2,0xbf800000,2 +np.float32,0x432038a2,0x3664ca7e,2 +np.float32,0x4345eb9b,0x365e638c,2 +np.float32,0x42c5eb9b,0xbf800000,2 +np.float32,0x42eb9e94,0xbf800000,2 +np.float32,0x4350ea79,0x3f800000,2 +np.float32,0x42dbe957,0x3585522a,2 +np.float32,0x425be957,0xbf800000,2 +np.float32,0x435be957,0xb605522a,2 +np.float32,0x476362a2,0xbd7ff911,2 +np.float32,0x464c99a4,0x3e7f4d41,2 +np.float32,0x4471f73d,0x3e7fe1b0,2 +np.float32,0x445a6752,0x3e7ef367,2 +np.float32,0x474fa400,0x3e7f9fcd,2 +np.float32,0x45c1e72f,0xbe7fc7af,2 +np.float32,0x4558c91d,0x3e7e9f31,2 +np.float32,0x43784f94,0xbdff6654,2 +np.float32,0x466e8500,0xbe7ea0a3,2 +np.float32,0x468e1c25,0x3e7e22fb,2 +np.float32,0x44ea6cfc,0x3dff70c3,2 +np.float32,0x4605126c,0x3e7f89ef,2 +np.float32,0x4788b3c6,0xbb87d853,2 +np.float32,0x4531b042,0x3dffd163,2 +np.float32,0x43f1f71d,0x3dfff387,2 +np.float32,0x462c3fa5,0xbd7fe13d,2 +np.float32,0x441c5354,0xbdff76b4,2 +np.float32,0x44908b69,0x3e7dcf0d,2 +np.float32,0x478813ad,0xbe7e9d80,2 +np.float32,0x441c4351,0x3dff937b,2 diff --git a/MLPY/Lib/site-packages/numpy/core/tests/examples/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/core/tests/examples/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2db086740f2a2386b246c05dfd59ea275b704a88 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/core/tests/examples/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/core/tests/examples/checks.pyx b/MLPY/Lib/site-packages/numpy/core/tests/examples/checks.pyx new file mode 100644 index 0000000000000000000000000000000000000000..74cb31491a82508fcdf3cc1dbb7ec2c5022450c4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/examples/checks.pyx @@ -0,0 +1,30 @@ +""" +Functions in this module give python-space wrappers for cython functions +exposed in numpy/__init__.pxd, so they can be tested in test_cython.py +""" +cimport numpy as cnp +cnp.import_array() + + +def is_td64(obj): + return cnp.is_timedelta64_object(obj) + + +def is_dt64(obj): + return cnp.is_datetime64_object(obj) + + +def get_dt64_value(obj): + return cnp.get_datetime64_value(obj) + + +def get_td64_value(obj): + return cnp.get_timedelta64_value(obj) + + +def get_dt64_unit(obj): + return cnp.get_datetime64_unit(obj) + + +def is_integer(obj): + return isinstance(obj, (cnp.integer, int)) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/examples/setup.py b/MLPY/Lib/site-packages/numpy/core/tests/examples/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..921e3528dd7289e8472af9a41b476fa00acb067c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/examples/setup.py @@ -0,0 +1,25 @@ +""" +Provide python-space access to the functions exposed in numpy/__init__.pxd +for testing. +""" + +import numpy as np +from distutils.core import setup +from Cython.Build import cythonize +from setuptools.extension import Extension +import os + +macros = [("NPY_NO_DEPRECATED_API", 0)] + +checks = Extension( + "checks", + sources=[os.path.join('.', "checks.pyx")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [checks] + +setup( + ext_modules=cythonize(extensions) +) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test__exceptions.py b/MLPY/Lib/site-packages/numpy/core/tests/test__exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..2510bcd6c639c2ebe43acb8e8cab9d9b1b8c31fe --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test__exceptions.py @@ -0,0 +1,58 @@ +""" +Tests of the ._exceptions module. Primarily for exercising the __str__ methods. +""" + +import pickle + +import numpy as np + +_ArrayMemoryError = np.core._exceptions._ArrayMemoryError +_UFuncNoLoopError = np.core._exceptions._UFuncNoLoopError + +class TestArrayMemoryError: + def test_pickling(self): + """ Test that _ArrayMemoryError can be pickled """ + error = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + res = pickle.loads(pickle.dumps(error)) + assert res._total_size == error._total_size + + def test_str(self): + e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + str(e) # not crashing is enough + + # testing these properties is easier than testing the full string repr + def test__size_to_string(self): + """ Test e._size_to_string """ + f = _ArrayMemoryError._size_to_string + Ki = 1024 + assert f(0) == '0 bytes' + assert f(1) == '1 bytes' + assert f(1023) == '1023 bytes' + assert f(Ki) == '1.00 KiB' + assert f(Ki+1) == '1.00 KiB' + assert f(10*Ki) == '10.0 KiB' + assert f(int(999.4*Ki)) == '999. KiB' + assert f(int(1023.4*Ki)) == '1023. KiB' + assert f(int(1023.5*Ki)) == '1.00 MiB' + assert f(Ki*Ki) == '1.00 MiB' + + # 1023.9999 Mib should round to 1 GiB + assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB' + assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB' + # larger than sys.maxsize, adding larger prefices isn't going to help + # anyway. + assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB' + + def test__total_size(self): + """ Test e._total_size """ + e = _ArrayMemoryError((1,), np.dtype(np.uint8)) + assert e._total_size == 1 + + e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) + assert e._total_size == 1024 + + +class TestUFuncNoLoopError: + def test_pickling(self): + """ Test that _UFuncNoLoopError can be pickled """ + assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_abc.py b/MLPY/Lib/site-packages/numpy/core/tests/test_abc.py new file mode 100644 index 0000000000000000000000000000000000000000..dbc908d24b4fafa7266400c9cdda5d0858cc4a9c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_abc.py @@ -0,0 +1,54 @@ +from numpy.testing import assert_ + +import numbers + +import numpy as np +from numpy.core.numerictypes import sctypes + +class TestABC: + def test_abstract(self): + assert_(issubclass(np.number, numbers.Number)) + + assert_(issubclass(np.inexact, numbers.Complex)) + assert_(issubclass(np.complexfloating, numbers.Complex)) + assert_(issubclass(np.floating, numbers.Real)) + + assert_(issubclass(np.integer, numbers.Integral)) + assert_(issubclass(np.signedinteger, numbers.Integral)) + assert_(issubclass(np.unsignedinteger, numbers.Integral)) + + def test_floats(self): + for t in sctypes['float']: + assert_(isinstance(t(), numbers.Real), + "{0} is not instance of Real".format(t.__name__)) + assert_(issubclass(t, numbers.Real), + "{0} is not subclass of Real".format(t.__name__)) + assert_(not isinstance(t(), numbers.Rational), + "{0} is instance of Rational".format(t.__name__)) + assert_(not issubclass(t, numbers.Rational), + "{0} is subclass of Rational".format(t.__name__)) + + def test_complex(self): + for t in sctypes['complex']: + assert_(isinstance(t(), numbers.Complex), + "{0} is not instance of Complex".format(t.__name__)) + assert_(issubclass(t, numbers.Complex), + "{0} is not subclass of Complex".format(t.__name__)) + assert_(not isinstance(t(), numbers.Real), + "{0} is instance of Real".format(t.__name__)) + assert_(not issubclass(t, numbers.Real), + "{0} is subclass of Real".format(t.__name__)) + + def test_int(self): + for t in sctypes['int']: + assert_(isinstance(t(), numbers.Integral), + "{0} is not instance of Integral".format(t.__name__)) + assert_(issubclass(t, numbers.Integral), + "{0} is not subclass of Integral".format(t.__name__)) + + def test_uint(self): + for t in sctypes['uint']: + assert_(isinstance(t(), numbers.Integral), + "{0} is not instance of Integral".format(t.__name__)) + assert_(issubclass(t, numbers.Integral), + "{0} is not subclass of Integral".format(t.__name__)) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_api.py b/MLPY/Lib/site-packages/numpy/core/tests/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..25741189b663c097fc064f098afab37494da69d3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_api.py @@ -0,0 +1,600 @@ +import sys + +import numpy as np +from numpy.core._rational_tests import rational +import pytest +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, assert_warns, + HAS_REFCOUNT + ) + +# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set. +NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous + + +def test_array_array(): + tobj = type(object) + ones11 = np.ones((1, 1), np.float64) + tndarray = type(ones11) + # Test is_ndarray + assert_equal(np.array(ones11, dtype=np.float64), ones11) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tndarray) + np.array(ones11) + assert_equal(old_refcount, sys.getrefcount(tndarray)) + + # test None + assert_equal(np.array(None, dtype=np.float64), + np.array(np.nan, dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tobj) + np.array(None, dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(tobj)) + + # test scalar + assert_equal(np.array(1.0, dtype=np.float64), + np.ones((), dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(np.float64) + np.array(np.array(1.0, dtype=np.float64), dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(np.float64)) + + # test string + S2 = np.dtype((bytes, 2)) + S3 = np.dtype((bytes, 3)) + S5 = np.dtype((bytes, 5)) + assert_equal(np.array(b"1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array(b"1.0").dtype, S3) + assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3) + assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1.")) + assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5)) + + # test string + U2 = np.dtype((str, 2)) + U3 = np.dtype((str, 3)) + U5 = np.dtype((str, 5)) + assert_equal(np.array("1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array("1.0").dtype, U3) + assert_equal(np.array("1.0", dtype=str).dtype, U3) + assert_equal(np.array("1.0", dtype=U2), np.array(str("1."))) + assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5)) + + builtins = getattr(__builtins__, '__dict__', __builtins__) + assert_(hasattr(builtins, 'get')) + + # test memoryview + dat = np.array(memoryview(b'1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(memoryview(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) + + # test array interface + a = np.array(100.0, dtype=np.float64) + o = type("o", (object,), + dict(__array_interface__=a.__array_interface__)) + assert_equal(np.array(o, dtype=np.float64), a) + + # test array_struct interface + a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], + dtype=[('f0', int), ('f1', float), ('f2', str)]) + o = type("o", (object,), + dict(__array_struct__=a.__array_struct__)) + ## wasn't what I expected... is np.array(o) supposed to equal a ? + ## instead we get a array([...], dtype=">V18") + assert_equal(bytes(np.array(o).data), bytes(a.data)) + + # test array + o = type("o", (object,), + dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))() + assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) + + # test recursion + nested = 1.5 + for i in range(np.MAXDIMS): + nested = [nested] + + # no error + np.array(nested) + + # Exceeds recursion limit + assert_raises(ValueError, np.array, [nested], dtype=np.float64) + + # Try with lists... + assert_equal(np.array([None] * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([[None]] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array([1.0] * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([[1.0]] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + # Try with tuples + assert_equal(np.array((None,) * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,)] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array((1.0,) * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + +@pytest.mark.parametrize("array", [True, False]) +def test_array_impossible_casts(array): + # All builtin types can forst cast as least theoretically + # but user dtypes cannot necessarily. + rt = rational(1, 2) + if array: + rt = np.array(rt) + with assert_raises(TypeError): + np.array(rt, dtype="M8") + + +def test_fastCopyAndTranspose(): + # 0D array + a = np.array(2) + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert_(b.flags.owndata) + + # 1D array + a = np.array([3, 2, 7, 0]) + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert_(b.flags.owndata) + + # 2D array + a = np.arange(6).reshape(2, 3) + b = np.fastCopyAndTranspose(a) + assert_equal(b, a.T) + assert_(b.flags.owndata) + +def test_array_astype(): + a = np.arange(6, dtype='f4').reshape(2, 3) + # Default behavior: allows unsafe casts, keeps memory layout, + # always copies. + b = a.astype('i4') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.strides, b.strides) + b = a.T.astype('i4') + assert_equal(a.T, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.T.strides, b.strides) + b = a.astype('f4') + assert_equal(a, b) + assert_(not (a is b)) + + # copy=False parameter can sometimes skip a copy + b = a.astype('f4', copy=False) + assert_(a is b) + + # order parameter allows overriding of the memory layout, + # forcing a copy if the layout is wrong + b = a.astype('f4', order='F', copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(b.flags.f_contiguous) + + b = a.astype('f4', order='C', copy=False) + assert_equal(a, b) + assert_(a is b) + assert_(b.flags.c_contiguous) + + # casting parameter allows catching bad casts + b = a.astype('c8', casting='safe') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('c8')) + + assert_raises(TypeError, a.astype, 'i4', casting='safe') + + # subok=False passes through a non-subclassed array + b = a.astype('f4', subok=0, copy=False) + assert_(a is b) + + class MyNDArray(np.ndarray): + pass + + a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray) + + # subok=True passes through a subclass + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), MyNDArray) + + # subok=False never returns a subclass + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not MyNDArray) + + # Make sure converting from string object to fixed length string + # does not truncate. + a = np.array([b'a'*100], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S100')) + a = np.array([u'a'*100], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U100')) + + # Same test as above but for strings shorter than 64 characters + a = np.array([b'a'*10], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S10')) + a = np.array([u'a'*10], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U10')) + + a = np.array(123456789012345678901234567890, dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='O').astype('U') + assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) + + a = np.array([123456789012345678901234567890], dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array([123456789012345678901234567890], dtype='O').astype('U') + assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) + + a = np.array(123456789012345678901234567890, dtype='S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='U') + assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30')) + + a = np.array(u'a\u0140', dtype='U') + b = np.ndarray(buffer=a, dtype='uint32', shape=2) + assert_(b.size == 2) + + a = np.array([1000], dtype='i4') + assert_raises(TypeError, a.astype, 'S1', casting='safe') + + a = np.array(1000, dtype='i4') + assert_raises(TypeError, a.astype, 'U1', casting='safe') + +@pytest.mark.parametrize("dt", ["S", "U"]) +def test_array_astype_to_string_discovery_empty(dt): + # See also gh-19085 + arr = np.array([""], dtype=object) + # Note, the itemsize is the `0 -> 1` logic, which should change. + # The important part the test is rather that it does not error. + assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize + + # check the same thing for `np.can_cast` (since it accepts arrays) + assert np.can_cast(arr, dt, casting="unsafe") + assert not np.can_cast(arr, dt, casting="same_kind") + # as well as for the object as a descriptor: + assert np.can_cast("O", dt, casting="unsafe") + +@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"]) +def test_array_astype_to_void(dt): + dt = np.dtype(dt) + arr = np.array([], dtype=dt) + assert arr.astype("V").dtype.itemsize == dt.itemsize + +def test_object_array_astype_to_void(): + # This is different to `test_array_astype_to_void` as object arrays + # are inspected. The default void is "V8" (8 is the length of double) + arr = np.array([], dtype="O").astype("V") + assert arr.dtype == "V8" + +@pytest.mark.parametrize("t", + np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float'] +) +def test_array_astype_warning(t): + # test ComplexWarning when casting from complex to float or int + a = np.array(10, dtype=np.complex_) + assert_warns(np.ComplexWarning, a.astype, t) + +@pytest.mark.parametrize(["dtype", "out_dtype"], + [(np.bytes_, np.bool_), + (np.unicode_, np.bool_), + (np.dtype("S10,S9"), np.dtype("?,?"))]) +def test_string_to_boolean_cast(dtype, out_dtype): + """ + Currently, for `astype` strings are cast to booleans effectively by + calling `bool(int(string)`. This is not consistent (see gh-9875) and + will eventually be deprecated. + """ + arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype) + expected = np.array([True, True, False, False], dtype=out_dtype) + assert_array_equal(arr.astype(out_dtype), expected) + +@pytest.mark.parametrize(["dtype", "out_dtype"], + [(np.bytes_, np.bool_), + (np.unicode_, np.bool_), + (np.dtype("S10,S9"), np.dtype("?,?"))]) +def test_string_to_boolean_cast_errors(dtype, out_dtype): + """ + These currently error out, since cast to integers fails, but should not + error out in the future. + """ + for invalid in ["False", "True", "", "\0", "non-empty"]: + arr = np.array([invalid], dtype=dtype) + with assert_raises(ValueError): + arr.astype(out_dtype) + +@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_]) +@pytest.mark.parametrize("scalar_type", + [np.complex64, np.complex128, np.clongdouble]) +def test_string_to_complex_cast(str_type, scalar_type): + value = scalar_type(b"1+3j") + assert scalar_type(value) == 1+3j + assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j + assert np.array(value).astype(scalar_type)[()] == 1+3j + arr = np.zeros(1, dtype=scalar_type) + arr[0] = value + assert arr[0] == 1+3j + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_none_to_nan_cast(dtype): + # Note that at the time of writing this test, the scalar constructors + # reject None + arr = np.zeros(1, dtype=dtype) + arr[0] = None + assert np.isnan(arr)[0] + assert np.isnan(np.array(None, dtype=dtype))[()] + assert np.isnan(np.array([None], dtype=dtype))[0] + assert np.isnan(np.array(None).astype(dtype))[()] + +def test_copyto_fromscalar(): + a = np.arange(6, dtype='f4').reshape(2, 3) + + # Simple copy + np.copyto(a, 1.5) + assert_equal(a, 1.5) + np.copyto(a.T, 2.5) + assert_equal(a, 2.5) + + # Where-masked copy + mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') + np.copyto(a, 3.5, where=mask) + assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) + mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') + np.copyto(a.T, 4.5, where=mask) + assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) + +def test_copyto(): + a = np.arange(6, dtype='i4').reshape(2, 3) + + # Simple copy + np.copyto(a, [[3, 1, 5], [6, 2, 1]]) + assert_equal(a, [[3, 1, 5], [6, 2, 1]]) + + # Overlapping copy should work + np.copyto(a[:, :2], a[::-1, 1::-1]) + assert_equal(a, [[2, 6, 5], [1, 3, 1]]) + + # Defaults to 'same_kind' casting + assert_raises(TypeError, np.copyto, a, 1.5) + + # Force a copy with 'unsafe' casting, truncating 1.5 to 1 + np.copyto(a, 1.5, casting='unsafe') + assert_equal(a, 1) + + # Copying with a mask + np.copyto(a, 3, where=[True, False, True]) + assert_equal(a, [[3, 1, 3], [3, 1, 3]]) + + # Casting rule still applies with a mask + assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) + + # Lists of integer 0's and 1's is ok too + np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) + assert_equal(a, [[3, 4, 4], [4, 1, 3]]) + + # Overlapping copy with mask should work + np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) + assert_equal(a, [[3, 4, 4], [4, 3, 3]]) + + # 'dst' must be an array + assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + +def test_copyto_permut(): + # test explicit overflow case + pad = 500 + l = [True] * pad + [True, True, True, True] + r = np.zeros(len(l)-pad) + d = np.ones(len(l)-pad) + mask = np.array(l)[pad:] + np.copyto(r, d, where=mask[::-1]) + + # test all permutation of possible masks, 9 should be sufficient for + # current 4 byte unrolled code + power = 9 + d = np.ones(power) + for i in range(2**power): + r = np.zeros(power) + l = [(i & x) != 0 for x in range(power)] + mask = np.array(l) + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=mask[::-1]) + assert_array_equal(r == 1, l[::-1]) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::2]) + assert_array_equal(r[::2] == 1, l[::2]) + assert_equal(r[::2].sum(), sum(l[::2])) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::-2]) + assert_array_equal(r[::2] == 1, l[::-2]) + assert_equal(r[::2].sum(), sum(l[::-2])) + + for c in [0xFF, 0x7F, 0x02, 0x10]: + r = np.zeros(power) + mask = np.array(l) + imask = np.array(l).view(np.uint8) + imask[mask != 0] = c + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=True) + assert_equal(r.sum(), r.size) + r = np.ones(power) + d = np.zeros(power) + np.copyto(r, d, where=False) + assert_equal(r.sum(), r.size) + +def test_copy_order(): + a = np.arange(24).reshape(2, 1, 3, 4) + b = a.copy(order='F') + c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) + + def check_copy_result(x, y, ccontig, fcontig, strides=False): + assert_(not (x is y)) + assert_equal(x, y) + assert_equal(res.flags.c_contiguous, ccontig) + assert_equal(res.flags.f_contiguous, fcontig) + # This check is impossible only because + # NPY_RELAXED_STRIDES_CHECKING changes the strides actively + if not NPY_RELAXED_STRIDES_CHECKING: + if strides: + assert_equal(x.strides, y.strides) + else: + assert_(x.strides != y.strides) + + # Validate the initial state of a, b, and c + assert_(a.flags.c_contiguous) + assert_(not a.flags.f_contiguous) + assert_(not b.flags.c_contiguous) + assert_(b.flags.f_contiguous) + assert_(not c.flags.c_contiguous) + assert_(not c.flags.f_contiguous) + + # Copy with order='C' + res = a.copy(order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = c.copy(order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + res = np.copy(a, order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = np.copy(c, order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + + # Copy with order='F' + res = a.copy(order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = b.copy(order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + res = np.copy(a, order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = np.copy(b, order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + + # Copy with order='K' + res = a.copy(order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + res = np.copy(a, order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + +def test_contiguous_flags(): + a = np.ones((4, 4, 1))[::2,:,:] + if NPY_RELAXED_STRIDES_CHECKING: + a.strides = a.strides[:2] + (-123,) + b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) + + def check_contig(a, ccontig, fcontig): + assert_(a.flags.c_contiguous == ccontig) + assert_(a.flags.f_contiguous == fcontig) + + # Check if new arrays are correct: + check_contig(a, False, False) + check_contig(b, False, False) + if NPY_RELAXED_STRIDES_CHECKING: + check_contig(np.empty((2, 2, 0, 2, 2)), True, True) + check_contig(np.array([[[1], [2]]], order='F'), True, True) + else: + check_contig(np.empty((2, 2, 0, 2, 2)), True, False) + check_contig(np.array([[[1], [2]]], order='F'), False, True) + check_contig(np.empty((2, 2)), True, False) + check_contig(np.empty((2, 2), order='F'), False, True) + + # Check that np.array creates correct contiguous flags: + check_contig(np.array(a, copy=False), False, False) + check_contig(np.array(a, copy=False, order='C'), True, False) + check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True) + + if NPY_RELAXED_STRIDES_CHECKING: + # Check slicing update of flags and : + check_contig(a[0], True, True) + check_contig(a[None, ::4, ..., None], True, True) + check_contig(b[0, 0, ...], False, True) + check_contig(b[:,:, 0:0,:,:], True, True) + else: + # Check slicing update of flags: + check_contig(a[0], True, False) + # Would be nice if this was C-Contiguous: + check_contig(a[None, 0, ..., None], False, False) + check_contig(b[0, 0, 0, ...], False, True) + + # Test ravel and squeeze. + check_contig(a.ravel(), True, True) + check_contig(np.ones((1, 3, 1)).squeeze(), True, True) + +def test_broadcast_arrays(): + # Test user defined dtypes + a = np.array([(1, 2, 3)], dtype='u4,u4,u4') + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + result = np.broadcast_arrays(a, b) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + +@pytest.mark.parametrize(["shape", "fill_value", "expected_output"], + [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), + ((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))]) +def test_full_from_list(shape, fill_value, expected_output): + output = np.full(shape, fill_value) + assert_equal(output, expected_output) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_argparse.py b/MLPY/Lib/site-packages/numpy/core/tests/test_argparse.py new file mode 100644 index 0000000000000000000000000000000000000000..d499e4eba6cfac61138f5971b2ea384b1bccda31 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_argparse.py @@ -0,0 +1,62 @@ +""" +Tests for the private NumPy argument parsing functionality. +They mainly exists to ensure good test coverage without having to try the +weirder cases on actual numpy functions but test them in one place. + +The test function is defined in C to be equivalent to (errors may not always +match exactly, and could be adjusted): + + def func(arg1, /, arg2, *, arg3): + i = integer(arg1) # reproducing the 'i' parsing in Python. + return None +""" + +import pytest + +import numpy as np +from numpy.core._multiarray_tests import argparse_example_function as func + + +def test_invalid_integers(): + with pytest.raises(TypeError, + match="integer argument expected, got float"): + func(1.) + with pytest.raises(OverflowError): + func(2**100) + + +def test_missing_arguments(): + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func() + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func(arg2=1, arg3=4) + with pytest.raises(TypeError, + match=r"missing required argument \'arg2\' \(pos 1\)"): + func(1, arg3=5) + + +def test_too_many_positional(): + # the second argument is positional but can be passed as keyword. + with pytest.raises(TypeError, + match="takes from 2 to 3 positional arguments but 4 were given"): + func(1, 2, 3, 4) + + +def test_multiple_values(): + with pytest.raises(TypeError, + match=r"given by name \('arg2'\) and position \(position 1\)"): + func(1, 2, arg2=3) + + +def test_string_fallbacks(): + # We can (currently?) use numpy strings to test the "slow" fallbacks + # that should normally not be taken due to string interning. + arg2 = np.unicode_("arg2") + missing_arg = np.unicode_("missing_arg") + func(1, **{arg2: 3}) + with pytest.raises(TypeError, + match="got an unexpected keyword argument 'missing_arg'"): + func(2, **{missing_arg: 3}) + diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_array_coercion.py b/MLPY/Lib/site-packages/numpy/core/tests/test_array_coercion.py new file mode 100644 index 0000000000000000000000000000000000000000..a4ed23f1d88957851babf89a2aa90b938ec591a9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_array_coercion.py @@ -0,0 +1,734 @@ +""" +Tests for array coercion, mainly through testing `np.array` results directly. +Note that other such tests exist e.g. in `test_api.py` and many corner-cases +are tested (sometimes indirectly) elsewhere. +""" + +import pytest +from pytest import param + +from itertools import product + +import numpy as np +from numpy.core._rational_tests import rational +from numpy.core._multiarray_umath import _discover_array_parameters + +from numpy.testing import ( + assert_array_equal, assert_warns, IS_PYPY) + + +def arraylikes(): + """ + Generator for functions converting an array into various array-likes. + If full is True (default) includes array-likes not capable of handling + all dtypes + """ + # base array: + def ndarray(a): + return a + + yield param(ndarray, id="ndarray") + + # subclass: + class MyArr(np.ndarray): + pass + + def subclass(a): + return a.view(MyArr) + + yield subclass + + class _SequenceLike(): + # We are giving a warning that array-like's were also expected to be + # sequence-like in `np.array([array_like])`, this can be removed + # when the deprecation exired (started NumPy 1.20) + def __len__(self): + raise TypeError + + def __getitem__(self): + raise TypeError + + # Array-interface + class ArrayDunder(_SequenceLike): + def __init__(self, a): + self.a = a + + def __array__(self, dtype=None): + return self.a + + yield param(ArrayDunder, id="__array__") + + # memory-view + yield param(memoryview, id="memoryview") + + # Array-interface + class ArrayInterface(_SequenceLike): + def __init__(self, a): + self.a = a # need to hold on to keep interface valid + self.__array_interface__ = a.__array_interface__ + + yield param(ArrayInterface, id="__array_interface__") + + # Array-Struct + class ArrayStruct(_SequenceLike): + def __init__(self, a): + self.a = a # need to hold on to keep struct valid + self.__array_struct__ = a.__array_struct__ + + yield param(ArrayStruct, id="__array_struct__") + + +def scalar_instances(times=True, extended_precision=True, user_dtype=True): + # Hard-coded list of scalar instances. + # Floats: + yield param(np.sqrt(np.float16(5)), id="float16") + yield param(np.sqrt(np.float32(5)), id="float32") + yield param(np.sqrt(np.float64(5)), id="float64") + if extended_precision: + yield param(np.sqrt(np.longdouble(5)), id="longdouble") + + # Complex: + yield param(np.sqrt(np.complex64(2+3j)), id="complex64") + yield param(np.sqrt(np.complex128(2+3j)), id="complex128") + if extended_precision: + yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble") + + # Bool: + # XFAIL: Bool should be added, but has some bad properties when it + # comes to strings, see also gh-9875 + # yield param(np.bool_(0), id="bool") + + # Integers: + yield param(np.int8(2), id="int8") + yield param(np.int16(2), id="int16") + yield param(np.int32(2), id="int32") + yield param(np.int64(2), id="int64") + + yield param(np.uint8(2), id="uint8") + yield param(np.uint16(2), id="uint16") + yield param(np.uint32(2), id="uint32") + yield param(np.uint64(2), id="uint64") + + # Rational: + if user_dtype: + yield param(rational(1, 2), id="rational") + + # Cannot create a structured void scalar directly: + structured = np.array([(1, 3)], "i,i")[0] + assert isinstance(structured, np.void) + assert structured.dtype == np.dtype("i,i") + yield param(structured, id="structured") + + if times: + # Datetimes and timedelta + yield param(np.timedelta64(2), id="timedelta64[generic]") + yield param(np.timedelta64(23, "s"), id="timedelta64[s]") + yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)") + + yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)") + yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]") + + # Strings and unstructured void: + yield param(np.bytes_(b"1234"), id="bytes") + yield param(np.unicode_("2345"), id="unicode") + yield param(np.void(b"4321"), id="unstructured_void") + + +def is_parametric_dtype(dtype): + """Returns True if the the dtype is a parametric legacy dtype (itemsize + is 0, or a datetime without units) + """ + if dtype.itemsize == 0: + return True + if issubclass(dtype.type, (np.datetime64, np.timedelta64)): + if dtype.name.endswith("64"): + # Generic time units + return True + return False + + +class TestStringDiscovery: + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_basic_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + + assert np.array(obj, dtype="S").dtype == expected + assert np.array([obj], dtype="S").dtype == expected + + # A nested array is also discovered correctly + arr = np.array(obj, dtype="O") + assert np.array(arr, dtype="S").dtype == expected + # Check that .astype() behaves identical + assert arr.astype("S").dtype == expected + + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_nested_arrays_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + arr = np.array(obj, dtype="O") + assert np.array([arr, arr], dtype="S").dtype == expected + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_unpack_first_level(self, arraylike): + # We unpack exactly one level of array likes + obj = np.array([None]) + obj[0] = np.array(1.2) + # the length of the included item, not of the float dtype + length = len(str(obj[0])) + expected = np.dtype(f"S{length}") + + obj = arraylike(obj) + # casting to string usually calls str(obj) + arr = np.array([obj], dtype="S") + assert arr.shape == (1, 1) + assert arr.dtype == expected + + +class TestScalarDiscovery: + def test_void_special_case(self): + # Void dtypes with structures discover tuples as elements + arr = np.array((1, 2, 3), dtype="i,i,i") + assert arr.shape == () + arr = np.array([(1, 2, 3)], dtype="i,i,i") + assert arr.shape == (1,) + + def test_char_special_case(self): + arr = np.array("string", dtype="c") + assert arr.shape == (6,) + assert arr.dtype.char == "c" + arr = np.array(["string"], dtype="c") + assert arr.shape == (1, 6) + assert arr.dtype.char == "c" + + def test_char_special_case_deep(self): + # Check that the character special case errors correctly if the + # array is too deep: + nested = ["string"] # 2 dimensions (due to string being sequence) + for i in range(np.MAXDIMS - 2): + nested = [nested] + + arr = np.array(nested, dtype='c') + assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,) + with pytest.raises(ValueError): + np.array([nested], dtype="c") + + def test_unknown_object(self): + arr = np.array(object()) + assert arr.shape == () + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar(self, scalar): + arr = np.array(scalar) + assert arr.shape == () + assert arr.dtype == scalar.dtype + + arr = np.array([[scalar, scalar]]) + assert arr.shape == (1, 2) + assert arr.dtype == scalar.dtype + + # Additionally to string this test also runs into a corner case + # with datetime promotion (the difference is the promotion order). + @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning") + def test_scalar_promotion(self): + for sc1, sc2 in product(scalar_instances(), scalar_instances()): + sc1, sc2 = sc1.values[0], sc2.values[0] + # test all combinations: + try: + arr = np.array([sc1, sc2]) + except (TypeError, ValueError): + # The promotion between two times can fail + # XFAIL (ValueError): Some object casts are currently undefined + continue + assert arr.shape == (2,) + try: + dt1, dt2 = sc1.dtype, sc2.dtype + expected_dtype = np.promote_types(dt1, dt2) + assert arr.dtype == expected_dtype + except TypeError as e: + # Will currently always go to object dtype + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar_coercion(self, scalar): + # This tests various scalar coercion paths, mainly for the numerical + # types. It includes some paths not directly related to `np.array` + if isinstance(scalar, np.inexact): + # Ensure we have a full-precision number if available + scalar = type(scalar)((scalar * 2)**0.5) + + if type(scalar) is rational: + # Rational generally fails due to a missing cast. In the future + # object casts should automatically be defined based on `setitem`. + pytest.xfail("Rational to object cast is undefined currently.") + + # Use casting from object: + arr = np.array(scalar, dtype=object).astype(scalar.dtype) + + # Test various ways to create an array containing this scalar: + arr1 = np.array(scalar).reshape(1) + arr2 = np.array([scalar]) + arr3 = np.empty(1, dtype=scalar.dtype) + arr3[0] = scalar + arr4 = np.empty(1, dtype=scalar.dtype) + arr4[:] = [scalar] + # All of these methods should yield the same results + assert_array_equal(arr, arr1) + assert_array_equal(arr, arr2) + assert_array_equal(arr, arr3) + assert_array_equal(arr, arr4) + + @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") + @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning") + @pytest.mark.parametrize("cast_to", scalar_instances()) + def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): + """ + Test that in most cases: + * `np.array(scalar, dtype=dtype)` + * `np.empty((), dtype=dtype)[()] = scalar` + * `np.array(scalar).astype(dtype)` + should behave the same. The only exceptions are paramteric dtypes + (mainly datetime/timedelta without unit) and void without fields. + """ + dtype = cast_to.dtype # use to parametrize only the target dtype + + for scalar in scalar_instances(times=False): + scalar = scalar.values[0] + + if dtype.type == np.void: + if scalar.dtype.fields is not None and dtype.fields is None: + # Here, coercion to "V6" works, but the cast fails. + # Since the types are identical, SETITEM takes care of + # this, but has different rules than the cast. + with pytest.raises(TypeError): + np.array(scalar).astype(dtype) + np.array(scalar, dtype=dtype) + np.array([scalar], dtype=dtype) + continue + + # The main test, we first try to use casting and if it succeeds + # continue below testing that things are the same, otherwise + # test that the alternative paths at least also fail. + try: + cast = np.array(scalar).astype(dtype) + except (TypeError, ValueError, RuntimeError): + # coercion should also raise (error type may change) + with pytest.raises(Exception): + np.array(scalar, dtype=dtype) + + if (isinstance(scalar, rational) and + np.issubdtype(dtype, np.signedinteger)): + return + + with pytest.raises(Exception): + np.array([scalar], dtype=dtype) + # assignment should also raise + res = np.zeros((), dtype=dtype) + with pytest.raises(Exception): + res[()] = scalar + + return + + # Non error path: + arr = np.array(scalar, dtype=dtype) + assert_array_equal(arr, cast) + # assignment behaves the same + ass = np.zeros((), dtype=dtype) + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("dtype_char", np.typecodes["All"]) + def test_default_dtype_instance(self, dtype_char): + if dtype_char in "SU": + dtype = np.dtype(dtype_char + "1") + elif dtype_char == "V": + # Legacy behaviour was to use V8. The reason was float64 being the + # default dtype and that having 8 bytes. + dtype = np.dtype("V8") + else: + dtype = np.dtype(dtype_char) + + discovered_dtype, _ = _discover_array_parameters([], type(dtype)) + + assert discovered_dtype == dtype + assert discovered_dtype.itemsize == dtype.itemsize + + @pytest.mark.parametrize("dtype", np.typecodes["Integer"]) + def test_scalar_to_int_coerce_does_not_cast(self, dtype): + """ + Signed integers are currently different in that they do not cast other + NumPy scalar, but instead use scalar.__int__(). The harcoded + exception to this rule is `np.array(scalar, dtype=integer)`. + """ + dtype = np.dtype(dtype) + invalid_int = np.ulonglong(-1) + + float_nan = np.float64(np.nan) + + for scalar in [float_nan, invalid_int]: + # This is a special case using casting logic and thus not failing: + coerced = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(coerced, cast) + + # However these fail: + with pytest.raises((ValueError, OverflowError)): + np.array([scalar], dtype=dtype) + with pytest.raises((ValueError, OverflowError)): + cast[()] = scalar + + +class TestTimeScalars: + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"), + param(np.timedelta64(123, "s"), id="timedelta64[s]"), + param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"), + param(np.datetime64(1, "D"), id="datetime64[D]")],) + def test_coercion_basic(self, dtype, scalar): + # Note the `[scalar]` is there because np.array(scalar) uses stricter + # `scalar.__int__()` rules for backward compatibility right now. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(arr, cast) + + ass = np.ones((), dtype=dtype) + if issubclass(dtype, np.integer): + with pytest.raises(TypeError): + # raises, as would np.array([scalar], dtype=dtype), this is + # conversion from times, but behaviour of integers. + ass[()] = scalar + else: + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"), + param(np.timedelta64(12, "generic"), id="timedelta64[generic]")]) + def test_coercion_timedelta_convert_to_number(self, dtype, scalar): + # Only "ns" and "generic" timedeltas can be converted to numbers + # so these are slightly special. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + ass = np.ones((), dtype=dtype) + ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype) + + assert_array_equal(arr, cast) + assert_array_equal(cast, cast) + + @pytest.mark.parametrize("dtype", ["S6", "U6"]) + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_datetime(self, val, unit, dtype): + # String from datetime64 assignment is currently special cased to + # never use casting. This is because casting will error in this + # case, and traditionally in most cases the behaviour is maintained + # like this. (`np.array(scalar, dtype="U6")` would have failed before) + # TODO: This discrepency _should_ be resolved, either by relaxing the + # cast, or by deprecating the first part. + scalar = np.datetime64(val, unit) + dtype = np.dtype(dtype) + cut_string = dtype.type(str(scalar)[:6]) + + arr = np.array(scalar, dtype=dtype) + assert arr[()] == cut_string + ass = np.ones((), dtype=dtype) + ass[()] = scalar + assert ass[()] == cut_string + + with pytest.raises(RuntimeError): + # However, unlike the above assignment using `str(scalar)[:6]` + # due to being handled by the string DType and not be casting + # the explicit cast fails: + np.array(scalar).astype(dtype) + + + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_timedelta(self, val, unit): + scalar = np.timedelta64(val, unit) + + # Unlike datetime64, timedelta allows the unsafe cast: + np.array(scalar, dtype="S6") + cast = np.array(scalar).astype("S6") + ass = np.ones((), dtype="S6") + ass[()] = scalar + expected = scalar.astype("S")[:6] + assert cast[()] == expected + assert ass[()] == expected + +class TestNested: + def test_nested_simple(self): + initial = [1.2] + nested = initial + for i in range(np.MAXDIMS - 1): + nested = [nested] + + arr = np.array(nested, dtype="float64") + assert arr.shape == (1,) * np.MAXDIMS + with pytest.raises(ValueError): + np.array([nested], dtype="float64") + + # We discover object automatically at this time: + with assert_warns(np.VisibleDeprecationWarning): + arr = np.array([nested]) + assert arr.dtype == np.dtype("O") + assert arr.shape == (1,) * np.MAXDIMS + assert arr.item() is initial + + def test_pathological_self_containing(self): + # Test that this also works for two nested sequences + l = [] + l.append(l) + arr = np.array([l, l, l], dtype=object) + assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1) + + # Also check a ragged case: + arr = np.array([l, [None], l], dtype=object) + assert arr.shape == (3, 1) + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_nested_arraylikes(self, arraylike): + # We try storing an array like into an array, but the array-like + # will have too many dimensions. This means the shape discovery + # decides that the array-like must be treated as an object (a special + # case of ragged discovery). The result will be an array with one + # dimension less than the maximum dimensions, and the array being + # assigned to it (which does work for object or if `float(arraylike)` + # works). + initial = arraylike(np.ones((1, 1))) + + nested = initial + for i in range(np.MAXDIMS - 1): + nested = [nested] + + with pytest.warns(DeprecationWarning): + # It will refuse to assign the array into + np.array(nested, dtype="float64") + + # If this is object, we end up assigning a (1, 1) array into (1,) + # (due to running out of dimensions), this is currently supported but + # a special case which is not ideal. + arr = np.array(nested, dtype=object) + assert arr.shape == (1,) * np.MAXDIMS + assert arr.item() == np.array(initial).item() + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_uneven_depth_ragged(self, arraylike): + arr = np.arange(4).reshape((2, 2)) + arr = arraylike(arr) + + # Array is ragged in the second dimension already: + out = np.array([arr, [arr]], dtype=object) + assert out.shape == (2,) + assert out[0] is arr + assert type(out[1]) is list + + # Array is ragged in the third dimension: + with pytest.raises(ValueError): + # This is a broadcast error during assignment, because + # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails. + np.array([arr, [arr, arr]], dtype=object) + + def test_empty_sequence(self): + arr = np.array([[], [1], [[1]]], dtype=object) + assert arr.shape == (3,) + + # The empty sequence stops further dimension discovery, so the + # result shape will be (0,) which leads to an error during: + with pytest.raises(ValueError): + np.array([[], np.empty((0, 1))], dtype=object) + + def test_array_of_different_depths(self): + # When multiple arrays (or array-likes) are included in a + # sequences and have different depth, we currently discover + # as many dimensions as they share. (see also gh-17224) + arr = np.zeros((3, 2)) + mismatch_first_dim = np.zeros((1, 2)) + mismatch_second_dim = np.zeros((3, 3)) + + dtype, shape = _discover_array_parameters( + [arr, mismatch_second_dim], dtype=np.dtype("O")) + assert shape == (2, 3) + + dtype, shape = _discover_array_parameters( + [arr, mismatch_first_dim], dtype=np.dtype("O")) + assert shape == (2,) + # The second case is currently supported because the arrays + # can be stored as objects: + res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O")) + assert res[0] is arr + assert res[1] is mismatch_first_dim + + +class TestBadSequences: + # These are tests for bad objects passed into `np.array`, in general + # these have undefined behaviour. In the old code they partially worked + # when now they will fail. We could (and maybe should) create a copy + # of all sequences to be safe against bad-actors. + + def test_growing_list(self): + # List to coerce, `mylist` will append to it during coercion + obj = [] + class mylist(list): + def __len__(self): + obj.append([1, 2]) + return super().__len__() + + obj.append(mylist([1, 2])) + + with pytest.raises(RuntimeError): + np.array(obj) + + # Note: We do not test a shrinking list. These do very evil things + # and the only way to fix them would be to copy all sequences. + # (which may be a real option in the future). + + def test_mutated_list(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + class mylist(list): + def __len__(self): + obj[0] = [2, 3] # replace with a different list. + return super().__len__() + + obj.append([2, 3]) + obj.append(mylist([1, 2])) + with pytest.raises(RuntimeError): + np.array(obj) + + def test_replace_0d_array(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + class baditem: + def __len__(self): + obj[0][0] = 2 # replace with a different list. + raise ValueError("not actually a sequence!") + + def __getitem__(self): + pass + + # Runs into a corner case in the new code, the `array(2)` is cached + # so replacing it invalidates the cache. + obj.append([np.array(2), baditem()]) + with pytest.raises(RuntimeError): + np.array(obj) + + +class TestArrayLikes: + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_0d_object_special_case(self, arraylike): + arr = np.array(0.) + obj = arraylike(arr) + # A single array-like is always converted: + res = np.array(obj, dtype=object) + assert_array_equal(arr, res) + + # But a single 0-D nested array-like never: + res = np.array([obj], dtype=object) + assert res[0] is obj + + def test_0d_generic_special_case(self): + class ArraySubclass(np.ndarray): + def __float__(self): + raise TypeError("e.g. quantities raise on this") + + arr = np.array(0.) + obj = arr.view(ArraySubclass) + res = np.array(obj) + # The subclass is simply cast: + assert_array_equal(arr, res) + + # If the 0-D array-like is included, __float__ is currently + # guaranteed to be used. We may want to change that, quantities + # and masked arrays half make use of this. + with pytest.raises(TypeError): + np.array([obj]) + + # The same holds for memoryview: + obj = memoryview(arr) + res = np.array(obj) + assert_array_equal(arr, res) + with pytest.raises(ValueError): + # The error type does not matter much here. + np.array([obj]) + + def test_arraylike_classes(self): + # The classes of array-likes should generally be acceptable to be + # stored inside a numpy (object) array. This tests all of the + # special attributes (since all are checked during coercion). + arr = np.array(np.int64) + assert arr[()] is np.int64 + arr = np.array([np.int64]) + assert arr[0] is np.int64 + + # This also works for properties/unbound methods: + class ArrayLike: + @property + def __array_interface__(self): + pass + + @property + def __array_struct__(self): + pass + + def __array__(self): + pass + + arr = np.array(ArrayLike) + assert arr[()] is ArrayLike + arr = np.array([ArrayLike]) + assert arr[0] is ArrayLike + + @pytest.mark.skipif( + np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform") + def test_too_large_array_error_paths(self): + """Test the error paths, including for memory leaks""" + arr = np.array(0, dtype="uint8") + # Guarantees that a contiguous copy won't work: + arr = np.broadcast_to(arr, 2**62) + + for i in range(5): + # repeat, to ensure caching cannot have an effect: + with pytest.raises(MemoryError): + np.array(arr) + with pytest.raises(MemoryError): + np.array([arr]) + + @pytest.mark.parametrize("attribute", + ["__array_interface__", "__array__", "__array_struct__"]) + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_attributes(self, attribute, error): + # RecursionError and MemoryError are considered fatal. All errors + # (except AttributeError) should probably be raised in the future, + # but shapely made use of it, so it will require a deprecation. + + class BadInterface: + def __getattr__(self, attr): + if attr == attribute: + raise error + super().__getattr__(attr) + + with pytest.raises(error): + np.array(BadInterface()) + + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_bad_length(self, error): + # RecursionError and MemoryError are considered "critical" in + # sequences. We could expand this more generally though. (NumPy 1.20) + class BadSequence: + def __len__(self): + raise error + def __getitem__(self): + # must have getitem to be a Sequence + return 1 + + with pytest.raises(error): + np.array(BadSequence()) + diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_arraymethod.py b/MLPY/Lib/site-packages/numpy/core/tests/test_arraymethod.py new file mode 100644 index 0000000000000000000000000000000000000000..c74555e6be0410def02bec46603376fe630e6de6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_arraymethod.py @@ -0,0 +1,58 @@ +""" +This file tests the generic aspects of ArrayMethod. At the time of writing +this is private API, but when added, public API may be added here. +""" + +import pytest + +import numpy as np +from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl + + +class TestResolveDescriptors: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize("args", [ + (True,), # Not a tuple. + ((None,)), # Too few elements + ((None, None, None),), # Too many + ((None, None),), # Input dtype is None, which is invalid. + ((np.dtype("d"), True),), # Output dtype is not a dtype + ((np.dtype("f"), None),), # Input dtype does not match method + ]) + def test_invalid_arguments(self, args): + with pytest.raises(TypeError): + self.method._resolve_descriptors(*args) + + +class TestSimpleStridedCall: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize(["args", "error"], [ + ((True,), TypeError), # Not a tuple + (((None,),), TypeError), # Too few elements + ((None, None), TypeError), # Inputs are not arrays. + (((None, None, None),), TypeError), # Too many + (((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes + (((np.ones(3, dtype=">d"), np.ones(3, dtype=" 1) + y = sub(None) + x[()] = y + y[()] = x + assert_equal(repr(x), + 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') + assert_equal(str(x), '...') + x[()] = 0 # resolve circular references for garbage collector + + # nested 0d-subclass-object + x = sub(None) + x[()] = sub(None) + assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') + assert_equal(str(x), 'None') + + # gh-10663 + class DuckCounter(np.ndarray): + def __getitem__(self, item): + result = super().__getitem__(item) + if not isinstance(result, DuckCounter): + result = result[...].view(DuckCounter) + return result + + def to_string(self): + return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') + + def __str__(self): + if self.shape == (): + return self.to_string() + else: + fmt = {'all': lambda x: x.to_string()} + return np.array2string(self, formatter=fmt) + + dc = np.arange(5).view(DuckCounter) + assert_equal(str(dc), "[zero one two many many]") + assert_equal(str(dc[0]), "zero") + + def test_self_containing(self): + arr0d = np.array(None) + arr0d[()] = arr0d + assert_equal(repr(arr0d), + 'array(array(..., dtype=object), dtype=object)') + arr0d[()] = 0 # resolve recursion for garbage collector + + arr1d = np.array([None, None]) + arr1d[1] = arr1d + assert_equal(repr(arr1d), + 'array([None, array(..., dtype=object)], dtype=object)') + arr1d[1] = 0 # resolve recursion for garbage collector + + first = np.array(None) + second = np.array(None) + first[()] = second + second[()] = first + assert_equal(repr(first), + 'array(array(array(..., dtype=object), dtype=object), dtype=object)') + first[()] = 0 # resolve circular references for garbage collector + + def test_containing_list(self): + # printing square brackets directly would be ambiguuous + arr1d = np.array([None, None]) + arr1d[0] = [1, 2] + arr1d[1] = [3] + assert_equal(repr(arr1d), + 'array([list([1, 2]), list([3])], dtype=object)') + + def test_void_scalar_recursion(self): + # gh-9345 + repr(np.void(b'test')) # RecursionError ? + + def test_fieldless_structured(self): + # gh-10366 + no_fields = np.dtype([]) + arr_no_fields = np.empty(4, dtype=no_fields) + assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') + + +class TestComplexArray: + def test_str(self): + rvals = [0, 1, -1, np.inf, -np.inf, np.nan] + cvals = [complex(rp, ip) for rp in rvals for ip in rvals] + dtypes = [np.complex64, np.cdouble, np.clongdouble] + actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] + wanted = [ + '[0.+0.j]', '[0.+0.j]', '[0.+0.j]', + '[0.+1.j]', '[0.+1.j]', '[0.+1.j]', + '[0.-1.j]', '[0.-1.j]', '[0.-1.j]', + '[0.+infj]', '[0.+infj]', '[0.+infj]', + '[0.-infj]', '[0.-infj]', '[0.-infj]', + '[0.+nanj]', '[0.+nanj]', '[0.+nanj]', + '[1.+0.j]', '[1.+0.j]', '[1.+0.j]', + '[1.+1.j]', '[1.+1.j]', '[1.+1.j]', + '[1.-1.j]', '[1.-1.j]', '[1.-1.j]', + '[1.+infj]', '[1.+infj]', '[1.+infj]', + '[1.-infj]', '[1.-infj]', '[1.-infj]', + '[1.+nanj]', '[1.+nanj]', '[1.+nanj]', + '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]', + '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]', + '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]', + '[-1.+infj]', '[-1.+infj]', '[-1.+infj]', + '[-1.-infj]', '[-1.-infj]', '[-1.-infj]', + '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]', + '[inf+0.j]', '[inf+0.j]', '[inf+0.j]', + '[inf+1.j]', '[inf+1.j]', '[inf+1.j]', + '[inf-1.j]', '[inf-1.j]', '[inf-1.j]', + '[inf+infj]', '[inf+infj]', '[inf+infj]', + '[inf-infj]', '[inf-infj]', '[inf-infj]', + '[inf+nanj]', '[inf+nanj]', '[inf+nanj]', + '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]', + '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]', + '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]', + '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', + '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', + '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', + '[nan+0.j]', '[nan+0.j]', '[nan+0.j]', + '[nan+1.j]', '[nan+1.j]', '[nan+1.j]', + '[nan-1.j]', '[nan-1.j]', '[nan-1.j]', + '[nan+infj]', '[nan+infj]', '[nan+infj]', + '[nan-infj]', '[nan-infj]', '[nan-infj]', + '[nan+nanj]', '[nan+nanj]', '[nan+nanj]'] + + for res, val in zip(actual, wanted): + assert_equal(res, val) + +class TestArray2String: + def test_basic(self): + """Basic test of array2string.""" + a = np.arange(3) + assert_(np.array2string(a) == '[0 1 2]') + assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') + assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') + + def test_unexpected_kwarg(self): + # ensure than an appropriate TypeError + # is raised when array2string receives + # an unexpected kwarg + + with assert_raises_regex(TypeError, 'nonsense'): + np.array2string(np.array([1, 2, 3]), + nonsense=None) + + def test_format_function(self): + """Test custom format function for each element in array.""" + def _format_function(x): + if np.abs(x) < 1: + return '.' + elif np.abs(x) < 2: + return 'o' + else: + return 'O' + + x = np.arange(3) + x_hex = "[0x0 0x1 0x2]" + x_oct = "[0o0 0o1 0o2]" + assert_(np.array2string(x, formatter={'all':_format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'int_kind':_format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == + "[0.0000 1.0000 2.0000]") + assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), + x_hex) + assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), + x_oct) + + x = np.arange(3.) + assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == + "[0.00 1.00 2.00]") + assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == + "[0.00 1.00 2.00]") + + s = np.array(['abc', 'def']) + assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == + '[abcabc defdef]') + + + def test_structure_format(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_equal(np.array2string(x), + "[('Sarah', [8., 7.]) ('John', [6., 7.])]") + + np.set_printoptions(legacy='1.13') + try: + # for issue #5692 + A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + ('NaT',) ('NaT',) ('NaT',)]""") + ) + finally: + np.set_printoptions(legacy=False) + + # same again, but with non-legacy behavior + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',)]""") + ) + + # and again, with timedeltas + A = np.full(10, 123456, dtype=[("A", "m8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") + ) + + # See #8160 + struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)]) + assert_equal(np.array2string(struct_int), + "[([ 1, -1],) ([123, 1],)]") + struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], + dtype=[('B', 'i4', (2, 2))]) + assert_equal(np.array2string(struct_2dint), + "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") + + # See #8172 + array_scalar = np.array( + (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) + assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") + + def test_unstructured_void_repr(self): + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, + 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8') + assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") + assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") + assert_equal(repr(a), + r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n" + r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") + + assert_equal(eval(repr(a), vars(np)), a) + assert_equal(eval(repr(a[0]), vars(np)), a[0]) + + def test_edgeitems_kwarg(self): + # previously the global print options would be taken over the kwarg + arr = np.zeros(3, int) + assert_equal( + np.array2string(arr, edgeitems=1, threshold=0), + "[0 ... 0]" + ) + + def test_summarize_1d(self): + A = np.arange(1001) + strA = '[ 0 1 2 ... 998 999 1000]' + assert_equal(str(A), strA) + + reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' + assert_equal(repr(A), reprA) + + def test_summarize_2d(self): + A = np.arange(1002).reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ + ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + assert_equal(repr(A), reprA) + + def test_linewidth(self): + a = np.full(6, 1) + + def make_str(a, width, **kw): + return np.array2string(a, separator="", max_line_width=width, **kw) + + assert_equal(make_str(a, 8, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 7, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n' + ' 11]') + + assert_equal(make_str(a, 8), '[111111]') + assert_equal(make_str(a, 7), '[11111\n' + ' 1]') + assert_equal(make_str(a, 5), '[111\n' + ' 111]') + + b = a[None,None,:] + + assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n' + ' 1]]]') + + assert_equal(make_str(b, 12), '[[[111111]]]') + assert_equal(make_str(b, 9), '[[[111\n' + ' 111]]]') + assert_equal(make_str(b, 8), '[[[11\n' + ' 11\n' + ' 11]]]') + + def test_wide_element(self): + a = np.array(['xxxxx']) + assert_equal( + np.array2string(a, max_line_width=5), + "['xxxxx']" + ) + assert_equal( + np.array2string(a, max_line_width=5, legacy='1.13'), + "[ 'xxxxx']" + ) + + def test_multiline_repr(self): + class MultiLine: + def __repr__(self): + return "Line 1\nLine 2" + + a = np.array([[None, MultiLine()], [MultiLine(), None]]) + + assert_equal( + np.array2string(a), + '[[None Line 1\n' + ' Line 2]\n' + ' [Line 1\n' + ' Line 2 None]]' + ) + assert_equal( + np.array2string(a, max_line_width=5), + '[[None\n' + ' Line 1\n' + ' Line 2]\n' + ' [Line 1\n' + ' Line 2\n' + ' None]]' + ) + assert_equal( + repr(a), + 'array([[None, Line 1\n' + ' Line 2],\n' + ' [Line 1\n' + ' Line 2, None]], dtype=object)' + ) + + class MultiLineLong: + def __repr__(self): + return "Line 1\nLooooooooooongestLine2\nLongerLine 3" + + a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]]) + assert_equal( + repr(a), + 'array([[None, Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 ],\n' + ' [Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 , None]], dtype=object)' + ) + assert_equal( + np.array_repr(a, 20), + 'array([[None,\n' + ' Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 ],\n' + ' [Line 1\n' + ' LooooooooooongestLine2\n' + ' LongerLine 3 ,\n' + ' None]],\n' + ' dtype=object)' + ) + + def test_nested_array_repr(self): + a = np.empty((2, 2), dtype=object) + a[0, 0] = np.eye(2) + a[0, 1] = np.eye(3) + a[1, 0] = None + a[1, 1] = np.ones((3, 1)) + assert_equal( + repr(a), + 'array([[array([[1., 0.],\n' + ' [0., 1.]]), array([[1., 0., 0.],\n' + ' [0., 1., 0.],\n' + ' [0., 0., 1.]])],\n' + ' [None, array([[1.],\n' + ' [1.],\n' + ' [1.]])]], dtype=object)' + ) + + @given(hynp.from_dtype(np.dtype("U"))) + def test_any_text(self, text): + # This test checks that, given any value that can be represented in an + # array of dtype("U") (i.e. unicode string), ... + a = np.array([text, text, text]) + # casting a list of them to an array does not e.g. truncate the value + assert_equal(a[0], text) + # and that np.array2string puts a newline in the expected location + expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text) + result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3) + assert_equal(result, expected_repr) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount(self): + # make sure we do not hold references to the array due to a recursive + # closure (gh-10620) + gc.disable() + a = np.arange(2) + r1 = sys.getrefcount(a) + np.array2string(a) + np.array2string(a) + r2 = sys.getrefcount(a) + gc.collect() + gc.enable() + assert_(r1 == r2) + +class TestPrintOptions: + """Test getting and setting global print options.""" + + def setup(self): + self.oldopts = np.get_printoptions() + + def teardown(self): + np.set_printoptions(**self.oldopts) + + def test_basic(self): + x = np.array([1.5, 0, 1.234567890]) + assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])") + np.set_printoptions(precision=4) + assert_equal(repr(x), "array([1.5 , 0. , 1.2346])") + + def test_precision_zero(self): + np.set_printoptions(precision=0) + for values, string in ( + ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."), + ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."), + ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."), + ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")): + x = np.array(values) + assert_equal(repr(x), "array([%s])" % string) + + def test_formatter(self): + x = np.arange(3) + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + + def test_formatter_reset(self): + x = np.arange(3) + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'int':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + np.set_printoptions(formatter={'all':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'all':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + np.set_printoptions(formatter={'int':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1, 0, 1])") + np.set_printoptions(formatter={'int_kind':None}) + assert_equal(repr(x), "array([0, 1, 2])") + + x = np.arange(3.) + np.set_printoptions(formatter={'float':lambda x: str(x-1)}) + assert_equal(repr(x), "array([-1.0, 0.0, 1.0])") + np.set_printoptions(formatter={'float_kind':None}) + assert_equal(repr(x), "array([0., 1., 2.])") + + def test_0d_arrays(self): + assert_equal(str(np.array(u'café', '= dtype2.itemsize: + length = self.size // dtype1.itemsize + else: + length = self.size // dtype2.itemsize + + # Assume that the base array is well enough aligned for all inputs. + arr1 = np.empty(length, dtype=dtype1) + assert arr1.flags.c_contiguous + assert arr1.flags.aligned + + values = [random.randrange(-128, 128) for _ in range(length)] + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + arr1[i] = value + + if dtype2 is None: + if dtype1.char == "?": + values = [bool(v) for v in values] + return arr1, values + + if dtype2.char == "?": + values = [bool(v) for v in values] + + arr2 = np.empty(length, dtype=dtype2) + assert arr2.flags.c_contiguous + assert arr2.flags.aligned + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + arr2[i] = value + + return arr1, arr2, values + + def get_data_variation(self, arr1, arr2, aligned=True, contig=True): + """ + Returns a copy of arr1 that may be non-contiguous or unaligned, and a + matching array for arr2 (although not a copy). + """ + if contig: + stride1 = arr1.dtype.itemsize + stride2 = arr2.dtype.itemsize + elif aligned: + stride1 = 2 * arr1.dtype.itemsize + stride2 = 2 * arr2.dtype.itemsize + else: + stride1 = arr1.dtype.itemsize + 1 + stride2 = arr2.dtype.itemsize + 1 + + max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1 + max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1 + from_bytes = np.zeros(max_size1, dtype=np.uint8) + to_bytes = np.zeros(max_size2, dtype=np.uint8) + + # Sanity check that the above is large enough: + assert stride1 * len(arr1) <= from_bytes.nbytes + assert stride2 * len(arr2) <= to_bytes.nbytes + + if aligned: + new1 = as_strided(from_bytes[:-1].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[:-1].view(arr2.dtype), + arr2.shape, (stride2,)) + else: + new1 = as_strided(from_bytes[1:].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[1:].view(arr2.dtype), + arr2.shape, (stride2,)) + + new1[...] = arr1 + + if not contig: + # Ensure we did not overwrite bytes that should not be written: + offset = arr1.dtype.itemsize if aligned else 0 + buf = from_bytes[offset::stride1].tobytes() + assert buf.count(b"\0") == len(buf) + + if contig: + assert new1.flags.c_contiguous + assert new2.flags.c_contiguous + else: + assert not new1.flags.c_contiguous + assert not new2.flags.c_contiguous + + if aligned: + assert new1.flags.aligned + assert new2.flags.aligned + else: + assert not new1.flags.aligned or new1.dtype.alignment == 1 + assert not new2.flags.aligned or new2.dtype.alignment == 1 + + return new1, new2 + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_simple_cancast(self, from_Dt): + for to_Dt in simple_dtypes: + cast = get_castingimpl(from_Dt, to_Dt) + + for from_dt in [from_Dt(), from_Dt().newbyteorder()]: + default = cast._resolve_descriptors((from_dt, None))[1][1] + assert default == to_Dt() + del default + + for to_dt in [to_Dt(), to_Dt().newbyteorder()]: + casting, (from_res, to_res) = cast._resolve_descriptors( + (from_dt, to_dt)) + assert(type(from_res) == from_Dt) + assert(type(to_res) == to_Dt) + if casting & Casting.cast_is_view: + # If a view is acceptable, this is "no" casting + # and byte order must be matching. + assert casting == Casting.no | Casting.cast_is_view + # The above table lists this as "equivalent" + assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt] + # Note that to_res may not be the same as from_dt + assert from_res.isnative == to_res.isnative + else: + if from_Dt == to_Dt: + # Note that to_res may not be the same as from_dt + assert from_res.isnative != to_res.isnative + assert casting == CAST_TABLE[from_Dt][to_Dt] + + if from_Dt is to_Dt: + assert(from_dt is from_res) + assert(to_dt is to_res) + + + @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning") + @pytest.mark.parametrize("from_dt", simple_dtype_instances()) + def test_simple_direct_casts(self, from_dt): + """ + This test checks numeric direct casts for dtypes supported also by the + struct module (plus complex). It tries to be test a wide range of + inputs, but skips over possibly undefined behaviour (e.g. int rollover). + Longdouble and CLongdouble are tested, but only using double precision. + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + for to_dt in simple_dtype_instances(): + to_dt = to_dt.values[0] + cast = get_castingimpl(type(from_dt), type(to_dt)) + + casting, (from_res, to_res) = cast._resolve_descriptors( + (from_dt, to_dt)) + + if from_res is not from_dt or to_res is not to_dt: + # Do not test this case, it is handled in multiple steps, + # each of which should is tested individually. + return + + safe = (casting & ~Casting.cast_is_view) <= Casting.safe + del from_res, to_res, casting + + arr1, arr2, values = self.get_data(from_dt, to_dt) + + cast._simple_strided_call((arr1, arr2)) + + # Check via python list + assert arr2.tolist() == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + # Check if alignment makes a difference, but only if supported + # and only if the alignment can be wrong + if ((from_dt.alignment == 1 and to_dt.alignment == 1) or + not cast._supports_unaligned): + return + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + del arr1_o, arr2_o, cast + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_numeric_to_times(self, from_Dt): + # We currently only implement contiguous loops, so only need to + # test those. + from_dt = from_Dt() + + time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"), + np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")] + for time_dt in time_dtypes: + cast = get_castingimpl(type(from_dt), type(time_dt)) + + casting, (from_res, to_res) = cast._resolve_descriptors( + (from_dt, time_dt)) + + assert from_res is from_dt + assert to_res is time_dt + del from_res, to_res + + assert(casting & CAST_TABLE[from_Dt][type(time_dt)]) + + int64_dt = np.dtype(np.int64) + arr1, arr2, values = self.get_data(from_dt, int64_dt) + arr2 = arr2.view(time_dt) + arr2[...] = np.datetime64("NaT") + + if time_dt == np.dtype("M8"): + # This is a bit of a strange path, and could probably be removed + arr1[-1] = 0 # ensure at least one value is not NaT + + # The cast currently succeeds, but the values are invalid: + cast._simple_strided_call((arr1, arr2)) + with pytest.raises(ValueError): + str(arr2[-1]) # e.g. conversion to string fails + return + + cast._simple_strided_call((arr1, arr2)) + + assert [int(v) for v in arr2.tolist()] == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + @pytest.mark.parametrize( + ["from_dt", "to_dt", "expected_casting", "nom", "denom"], + [("M8[ns]", None, + Casting.no | Casting.cast_is_view, 1, 1), + (str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1), + ("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1), + ("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast + ("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1), + ("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6), + ("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1), + ("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7), + ("M8[4D]", "M8[1M]", Casting.same_kind, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, -1, 1314, -1315, 564442610]), + ("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1), + (str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1), + ("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1), + ("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast + ("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1), + ("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6), + ("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1), + ("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7), + ("m8[4D]", "m8[1M]", Casting.unsafe, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, 0, 1314, -1315, 564442610])]) + def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom): + from_dt = np.dtype(from_dt) + if to_dt is not None: + to_dt = np.dtype(to_dt) + + # Test a few values for casting (results generated with NumPy 1.19) + values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32]) + values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder)) + assert values.dtype.byteorder == from_dt.byteorder + assert np.isnat(values.view(from_dt)[0]) + + DType = type(from_dt) + cast = get_castingimpl(DType, DType) + casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt)) + assert from_res is from_dt + assert to_res is to_dt or to_dt is None + assert casting == expected_casting + + if nom is not None: + expected_out = (values * nom // denom).view(to_res) + expected_out[0] = "NaT" + else: + expected_out = np.empty_like(values) + expected_out[...] = denom + expected_out = expected_out.view(to_dt) + + orig_arr = values.view(from_dt) + orig_out = np.empty_like(expected_out) + + if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): + # Casting from non-generic to generic units is an error and should + # probably be reported as an invalid cast earlier. + with pytest.raises(ValueError): + cast._simple_strided_call((orig_arr, orig_out)) + return + + for aligned in [True, True]: + for contig in [True, True]: + arr, out = self.get_data_variation( + orig_arr, orig_out, aligned, contig) + out[...] = 0 + cast._simple_strided_call((arr, out)) + assert_array_equal(out.view("int64"), expected_out.view("int64")) + + def string_with_modified_length(self, dtype, change_length): + fact = 1 if dtype.char == "S" else 4 + length = dtype.itemsize // fact + change_length + return np.dtype(f"{dtype.byteorder}{dtype.char}{length}") + + @pytest.mark.parametrize("other_DT", simple_dtypes) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_cancast(self, other_DT, string_char): + fact = 1 if string_char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(other_DT, string_DT) + + other_dt = other_DT() + expected_length = get_expected_stringlength(other_dt) + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert safety == Casting.safe # we consider to string casts "safe" + assert isinstance(res_dt, string_DT) + + # These casts currently implement changing the string length, so + # check the cast-safety for too long/fixed string lengths: + for change_length in [-1, 0, 1]: + if change_length >= 0: + expected_safety = Casting.safe + else: + expected_safety = Casting.same_kind + + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt)) + assert res_dt is to_dt + assert safety == expected_safety + + # The opposite direction is always considered unsafe: + cast = get_castingimpl(string_DT, other_DT) + + safety, _ = cast._resolve_descriptors((string_dt, other_dt)) + assert safety == Casting.unsafe + + cast = get_castingimpl(string_DT, other_DT) + safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None)) + assert safety == Casting.unsafe + assert other_dt is res_dt # returns the singleton for simple dtypes + + @pytest.mark.parametrize("string_char", ["S", "U"]) + @pytest.mark.parametrize("other_dt", simple_dtype_instances()) + def test_simple_string_casts_roundtrip(self, other_dt, string_char): + """ + Tests casts from and to string by checking the roundtripping property. + + The test also covers some string to string casts (but not all). + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + string_DT = type(np.dtype(string_char)) + + cast = get_castingimpl(type(other_dt), string_DT) + cast_back = get_castingimpl(string_DT, type(other_dt)) + _, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None)) + + if res_other_dt is not other_dt: + # do not support non-native byteorder, skip test in that case + assert other_dt.byteorder != res_other_dt.byteorder + return + + orig_arr, values = self.get_data(other_dt, None) + str_arr = np.zeros(len(orig_arr), dtype=string_dt) + string_dt_short = self.string_with_modified_length(string_dt, -1) + str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short) + string_dt_long = self.string_with_modified_length(string_dt, 1) + str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long) + + assert not cast._supports_unaligned # if support is added, should test + assert not cast_back._supports_unaligned + + for contig in [True, False]: + other_arr, str_arr = self.get_data_variation( + orig_arr, str_arr, True, contig) + _, str_arr_short = self.get_data_variation( + orig_arr, str_arr_short.copy(), True, contig) + _, str_arr_long = self.get_data_variation( + orig_arr, str_arr_long, True, contig) + + cast._simple_strided_call((other_arr, str_arr)) + + cast._simple_strided_call((other_arr, str_arr_short)) + assert_array_equal(str_arr.astype(string_dt_short), str_arr_short) + + cast._simple_strided_call((other_arr, str_arr_long)) + assert_array_equal(str_arr, str_arr_long) + + if other_dt.kind == "b": + # Booleans do not roundtrip + continue + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr, other_arr)) + assert_array_equal(orig_arr, other_arr) + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr_long, other_arr)) + assert_array_equal(orig_arr, other_arr) + + @pytest.mark.parametrize("other_dt", ["S8", "U8"]) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_to_string_cancast(self, other_dt, string_char): + other_dt = np.dtype(other_dt) + + fact = 1 if string_char == "S" else 4 + div = 1 if other_dt.char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(type(other_dt), string_DT) + + expected_length = other_dt.itemsize // div + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert isinstance(res_dt, string_DT) + + if other_dt.char == string_char: + if other_dt.isnative: + expected_safety = Casting.no | Casting.cast_is_view + else: + expected_safety = Casting.equiv + elif string_char == "U": + expected_safety = Casting.safe + else: + expected_safety = Casting.unsafe + + assert expected_safety == safety + + for change_length in [-1, 0, 1]: + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt)) + + assert res_dt is to_dt + if expected_safety == Casting.unsafe: + assert safety == expected_safety + elif change_length < 0: + assert safety == Casting.same_kind + elif change_length == 0: + assert safety == expected_safety + elif change_length > 0: + assert safety == Casting.safe + + @pytest.mark.parametrize("order1", [">", "<"]) + @pytest.mark.parametrize("order2", [">", "<"]) + def test_unicode_byteswapped_cast(self, order1, order2): + # Very specific tests (not using the castingimpl directly) + # that tests unicode bytedwaps including for unaligned array data. + dtype1 = np.dtype(f"{order1}U30") + dtype2 = np.dtype(f"{order2}U30") + data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1) + data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2) + if dtype1.alignment != 1: + # alignment should always be >1, but skip the check if not + assert not data1.flags.aligned + assert not data2.flags.aligned + + element = "this is a ünicode string‽" + data1[()] = element + # Test both `data1` and `data1.copy()` (which should be aligned) + for data in [data1, data1.copy()]: + data2[...] = data1 + assert data2[()] == element + assert data2.copy()[()] == element + + def test_void_to_string_special_case(self): + # Cover a small special case in void to string casting that could + # probably just as well be turned into an error (compare + # `test_object_to_parametric_internal_error` below). + assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5 + assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5 + + def test_object_to_parametric_internal_error(self): + # We reject casting from object to a parametric type, without + # figuring out the correct instance first. + object_dtype = type(np.dtype(object)) + other_dtype = type(np.dtype(str)) + cast = get_castingimpl(object_dtype, other_dtype) + with pytest.raises(TypeError, + match="casting from object to the parametric DType"): + cast._resolve_descriptors((np.dtype("O"), None)) + + @pytest.mark.parametrize("casting", ["no", "unsafe"]) + def test_void_and_structured_with_subarray(self, casting): + # test case corresponding to gh-19325 + dtype = np.dtype([("foo", " casts may succeed or fail, but a NULL'ed array must + # behave the same as one filled with None's. + arr_normal = np.array([None] * 5) + arr_NULLs = np.empty_like([None] * 5) + # If the check fails (maybe it should) the test would lose its purpose: + assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes + + try: + expected = arr_normal.astype(dtype) + except TypeError: + with pytest.raises(TypeError): + arr_NULLs.astype(dtype) + else: + assert_array_equal(expected, arr_NULLs.astype(dtype)) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_conversion_utils.py b/MLPY/Lib/site-packages/numpy/core/tests/test_conversion_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee97c61916d1ee820066ef84dd64cc77eb1e2e5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_conversion_utils.py @@ -0,0 +1,205 @@ +""" +Tests for numpy/core/src/multiarray/conversion_utils.c +""" +import re + +import pytest + +import numpy as np +import numpy.core._multiarray_tests as mt +from numpy.testing import assert_warns + + +class StringConverterTestCase: + allow_bytes = True + case_insensitive = True + exact_match = False + warn = True + + def _check_value_error(self, val): + pattern = r'\(got {}\)'.format(re.escape(repr(val))) + with pytest.raises(ValueError, match=pattern) as exc: + self.conv(val) + + def _check_conv_assert_warn(self, val, expected): + if self.warn: + with assert_warns(DeprecationWarning) as exc: + assert self.conv(val) == expected + else: + assert self.conv(val) == expected + + def _check(self, val, expected): + """Takes valid non-deprecated inputs for converters, + runs converters on inputs, checks correctness of outputs, + warnings and errors""" + assert self.conv(val) == expected + + if self.allow_bytes: + assert self.conv(val.encode('ascii')) == expected + else: + with pytest.raises(TypeError): + self.conv(val.encode('ascii')) + + if len(val) != 1: + if self.exact_match: + self._check_value_error(val[:1]) + self._check_value_error(val + '\0') + else: + self._check_conv_assert_warn(val[:1], expected) + + if self.case_insensitive: + if val != val.lower(): + self._check_conv_assert_warn(val.lower(), expected) + if val != val.upper(): + self._check_conv_assert_warn(val.upper(), expected) + else: + if val != val.lower(): + self._check_value_error(val.lower()) + if val != val.upper(): + self._check_value_error(val.upper()) + + def test_wrong_type(self): + # common cases which apply to all the below + with pytest.raises(TypeError): + self.conv({}) + with pytest.raises(TypeError): + self.conv([]) + + def test_wrong_value(self): + # nonsense strings + self._check_value_error('') + self._check_value_error('\N{greek small letter pi}') + + if self.allow_bytes: + self._check_value_error(b'') + # bytes which can't be converted to strings via utf8 + self._check_value_error(b"\xFF") + if self.exact_match: + self._check_value_error("there's no way this is supported") + + +class TestByteorderConverter(StringConverterTestCase): + """ Tests of PyArray_ByteorderConverter """ + conv = mt.run_byteorder_converter + warn = False + + def test_valid(self): + for s in ['big', '>']: + self._check(s, 'NPY_BIG') + for s in ['little', '<']: + self._check(s, 'NPY_LITTLE') + for s in ['native', '=']: + self._check(s, 'NPY_NATIVE') + for s in ['ignore', '|']: + self._check(s, 'NPY_IGNORE') + for s in ['swap']: + self._check(s, 'NPY_SWAP') + + +class TestSortkindConverter(StringConverterTestCase): + """ Tests of PyArray_SortkindConverter """ + conv = mt.run_sortkind_converter + warn = False + + def test_valid(self): + self._check('quicksort', 'NPY_QUICKSORT') + self._check('heapsort', 'NPY_HEAPSORT') + self._check('mergesort', 'NPY_STABLESORT') # alias + self._check('stable', 'NPY_STABLESORT') + + +class TestSelectkindConverter(StringConverterTestCase): + """ Tests of PyArray_SelectkindConverter """ + conv = mt.run_selectkind_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check('introselect', 'NPY_INTROSELECT') + + +class TestSearchsideConverter(StringConverterTestCase): + """ Tests of PyArray_SearchsideConverter """ + conv = mt.run_searchside_converter + def test_valid(self): + self._check('left', 'NPY_SEARCHLEFT') + self._check('right', 'NPY_SEARCHRIGHT') + + +class TestOrderConverter(StringConverterTestCase): + """ Tests of PyArray_OrderConverter """ + conv = mt.run_order_converter + warn = False + + def test_valid(self): + self._check('c', 'NPY_CORDER') + self._check('f', 'NPY_FORTRANORDER') + self._check('a', 'NPY_ANYORDER') + self._check('k', 'NPY_KEEPORDER') + + def test_flatten_invalid_order(self): + # invalid after gh-14596 + with pytest.raises(ValueError): + self.conv('Z') + for order in [False, True, 0, 8]: + with pytest.raises(TypeError): + self.conv(order) + + +class TestClipmodeConverter(StringConverterTestCase): + """ Tests of PyArray_ClipmodeConverter """ + conv = mt.run_clipmode_converter + def test_valid(self): + self._check('clip', 'NPY_CLIP') + self._check('wrap', 'NPY_WRAP') + self._check('raise', 'NPY_RAISE') + + # integer values allowed here + assert self.conv(np.CLIP) == 'NPY_CLIP' + assert self.conv(np.WRAP) == 'NPY_WRAP' + assert self.conv(np.RAISE) == 'NPY_RAISE' + + +class TestCastingConverter(StringConverterTestCase): + """ Tests of PyArray_CastingConverter """ + conv = mt.run_casting_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check("no", "NPY_NO_CASTING") + self._check("equiv", "NPY_EQUIV_CASTING") + self._check("safe", "NPY_SAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + self._check("unsafe", "NPY_UNSAFE_CASTING") + + +class TestIntpConverter: + """ Tests of PyArray_IntpConverter """ + conv = mt.run_intp_converter + + def test_basic(self): + assert self.conv(1) == (1,) + assert self.conv((1, 2)) == (1, 2) + assert self.conv([1, 2]) == (1, 2) + assert self.conv(()) == () + + def test_none(self): + # once the warning expires, this will raise TypeError + with pytest.warns(DeprecationWarning): + assert self.conv(None) == () + + def test_float(self): + with pytest.raises(TypeError): + self.conv(1.0) + with pytest.raises(TypeError): + self.conv([1, 1.0]) + + def test_too_large(self): + with pytest.raises(ValueError): + self.conv(2**64) + + def test_too_many_dims(self): + assert self.conv([1]*32) == (1,)*32 + with pytest.raises(ValueError): + self.conv([1]*33) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_cpu_dispatcher.py b/MLPY/Lib/site-packages/numpy/core/tests/test_cpu_dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..84eed5d727c9fdacb8a57d0ef105ed02264d7bf5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_cpu_dispatcher.py @@ -0,0 +1,42 @@ +from numpy.core._multiarray_umath import __cpu_features__, __cpu_baseline__, __cpu_dispatch__ +from numpy.core import _umath_tests +from numpy.testing import assert_equal + +def test_dispatcher(): + """ + Testing the utilites of the CPU dispatcher + """ + targets = ( + "SSE2", "SSE41", "AVX2", + "VSX", "VSX2", "VSX3", + "NEON", "ASIMD", "ASIMDHP" + ) + highest_sfx = "" # no suffix for the baseline + all_sfx = [] + for feature in reversed(targets): + # skip baseline features, by the default `CCompilerOpt` do not generate separated objects + # for the baseline, just one object combined all of them via 'baseline' option + # within the configuration statments. + if feature in __cpu_baseline__: + continue + # check compiler and running machine support + if feature not in __cpu_dispatch__ or not __cpu_features__[feature]: + continue + + if not highest_sfx: + highest_sfx = "_" + feature + all_sfx.append("func" + "_" + feature) + + test = _umath_tests.test_dispatch() + assert_equal(test["func"], "func" + highest_sfx) + assert_equal(test["var"], "var" + highest_sfx) + + if highest_sfx: + assert_equal(test["func_xb"], "func" + highest_sfx) + assert_equal(test["var_xb"], "var" + highest_sfx) + else: + assert_equal(test["func_xb"], "nobase") + assert_equal(test["var_xb"], "nobase") + + all_sfx.append("func") # add the baseline + assert_equal(test["all"], all_sfx) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_cpu_features.py b/MLPY/Lib/site-packages/numpy/core/tests/test_cpu_features.py new file mode 100644 index 0000000000000000000000000000000000000000..3377815c942aebb89923f6495503726372789b11 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_cpu_features.py @@ -0,0 +1,171 @@ +import sys, platform, re, pytest +from numpy.core._multiarray_umath import __cpu_features__ + +def assert_features_equal(actual, desired, fname): + __tracebackhide__ = True # Hide traceback for py.test + actual, desired = str(actual), str(desired) + if actual == desired: + return + detected = str(__cpu_features__).replace("'", "") + try: + with open("/proc/cpuinfo", "r") as fd: + cpuinfo = fd.read(2048) + except Exception as err: + cpuinfo = str(err) + + try: + import subprocess + auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1")) + auxv = auxv.decode() + except Exception as err: + auxv = str(err) + + import textwrap + error_report = textwrap.indent( +""" +########################################### +### Extra debugging information +########################################### +------------------------------------------- +--- NumPy Detections +------------------------------------------- +%s +------------------------------------------- +--- SYS / CPUINFO +------------------------------------------- +%s.... +------------------------------------------- +--- SYS / AUXV +------------------------------------------- +%s +""" % (detected, cpuinfo, auxv), prefix='\r') + + raise AssertionError(( + "Failure Detection\n" + " NAME: '%s'\n" + " ACTUAL: %s\n" + " DESIRED: %s\n" + "%s" + ) % (fname, actual, desired, error_report)) + +class AbstractTest: + features = [] + features_groups = {} + features_map = {} + features_flags = set() + + def load_flags(self): + # a hook + pass + def test_features(self): + self.load_flags() + for gname, features in self.features_groups.items(): + test_features = [self.cpu_have(f) for f in features] + assert_features_equal(__cpu_features__.get(gname), all(test_features), gname) + + for feature_name in self.features: + cpu_have = self.cpu_have(feature_name) + npy_have = __cpu_features__.get(feature_name) + assert_features_equal(npy_have, cpu_have, feature_name) + + def cpu_have(self, feature_name): + map_names = self.features_map.get(feature_name, feature_name) + if isinstance(map_names, str): + return map_names in self.features_flags + for f in map_names: + if f in self.features_flags: + return True + return False + + def load_flags_cpuinfo(self, magic_key): + self.features_flags = self.get_cpuinfo_item(magic_key) + + def get_cpuinfo_item(self, magic_key): + values = set() + with open('/proc/cpuinfo') as fd: + for line in fd: + if not line.startswith(magic_key): + continue + flags_value = [s.strip() for s in line.split(':', 1)] + if len(flags_value) == 2: + values = values.union(flags_value[1].upper().split()) + return values + + def load_flags_auxv(self): + import subprocess + auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1")) + for at in auxv.split(b'\n'): + if not at.startswith(b"AT_HWCAP"): + continue + hwcap_value = [s.strip() for s in at.split(b':', 1)] + if len(hwcap_value) == 2: + self.features_flags = self.features_flags.union( + hwcap_value[1].upper().decode().split() + ) + +is_linux = sys.platform.startswith('linux') +machine = platform.machine() +is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_x86, reason="Only for Linux and x86") +class Test_X86_Features(AbstractTest): + features = [ + "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42", + "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD", + "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ", + "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA", + "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", + ] + features_groups = dict( + AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"], + AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS", + "AVX5124VNNIW", "AVX512VPOPCNTDQ"], + AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"], + AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"], + AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", + "AVX512VBMI"], + AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", + "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"], + ) + features_map = dict( + SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA", + AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2", + AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ", + ) + def load_flags(self): + self.load_flags_cpuinfo("flags") + +is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power") +class Test_POWER_Features(AbstractTest): + features = ["VSX", "VSX2", "VSX3"] + features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00") + + def load_flags(self): + self.load_flags_auxv() + +is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM") +class Test_ARM_Features(AbstractTest): + features = [ + "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM" + ] + features_groups = dict( + NEON_FP16 = ["NEON", "HALF"], + NEON_VFPV4 = ["NEON", "VFPV4"], + ) + def load_flags(self): + self.load_flags_cpuinfo("Features") + arch = self.get_cpuinfo_item("CPU architecture") + # in case of mounting virtual filesystem of aarch64 kernel + is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0 + if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8: + self.features_map = dict( + NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD" + ) + else: + self.features_map = dict( + # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) + # doesn't provide information about ASIMD, so we assume that ASIMD is supported + # if the kernel reports any one of the following ARM8 features. + ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32") + ) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_cython.py b/MLPY/Lib/site-packages/numpy/core/tests/test_cython.py new file mode 100644 index 0000000000000000000000000000000000000000..3acffc8f5947394dcc58e5e938a38ff7e8b1ea77 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_cython.py @@ -0,0 +1,134 @@ +import os +import shutil +import subprocess +import sys +import pytest + +import numpy as np + +# This import is copied from random.tests.test_extending +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from distutils.version import LooseVersion + + # Cython 0.29.21 is required for Python 3.9 and there are + # other fixes in the 0.29 series that are needed even for earlier + # Python versions. + # Note: keep in sync with the one in pyproject.toml + required_version = LooseVersion("0.29.21") + if LooseVersion(cython_version) < required_version: + # too old or wrong cython, skip the test + cython = None + +pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") + + +@pytest.fixture +def install_temp(request, tmp_path): + # Based in part on test_cython from random.tests.test_extending + + here = os.path.dirname(__file__) + ext_dir = os.path.join(here, "examples") + + cytest = str(tmp_path / "cytest") + + shutil.copytree(ext_dir, cytest) + # build the examples and "install" them into a temporary directory + + install_log = str(tmp_path / "tmp_install_log.txt") + subprocess.check_call( + [ + sys.executable, + "setup.py", + "build", + "install", + "--prefix", str(tmp_path / "installdir"), + "--single-version-externally-managed", + "--record", + install_log, + ], + cwd=cytest, + ) + + # In order to import the built module, we need its path to sys.path + # so parse that out of the record + with open(install_log) as fid: + for line in fid: + if "checks" in line: + sys.path.append(os.path.dirname(line)) + break + else: + raise RuntimeError(f'could not parse "{install_log}"') + + +def test_is_timedelta64_object(install_temp): + import checks + + assert checks.is_td64(np.timedelta64(1234)) + assert checks.is_td64(np.timedelta64(1234, "ns")) + assert checks.is_td64(np.timedelta64("NaT", "ns")) + + assert not checks.is_td64(1) + assert not checks.is_td64(None) + assert not checks.is_td64("foo") + assert not checks.is_td64(np.datetime64("now", "s")) + + +def test_is_datetime64_object(install_temp): + import checks + + assert checks.is_dt64(np.datetime64(1234, "ns")) + assert checks.is_dt64(np.datetime64("NaT", "ns")) + + assert not checks.is_dt64(1) + assert not checks.is_dt64(None) + assert not checks.is_dt64("foo") + assert not checks.is_dt64(np.timedelta64(1234)) + + +def test_get_datetime64_value(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + + result = checks.get_dt64_value(dt64) + expected = dt64.view("i8") + + assert result == expected + + +def test_get_timedelta64_value(install_temp): + import checks + + td64 = np.timedelta64(12345, "h") + + result = checks.get_td64_value(td64) + expected = td64.view("i8") + + assert result == expected + + +def test_get_datetime64_unit(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + result = checks.get_dt64_unit(dt64) + expected = 10 + assert result == expected + + td64 = np.timedelta64(12345, "h") + result = checks.get_dt64_unit(td64) + expected = 5 + assert result == expected + + +def test_abstract_scalars(install_temp): + import checks + + assert checks.is_integer(1) + assert checks.is_integer(np.int8(1)) + assert checks.is_integer(np.uint64(1)) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_datetime.py b/MLPY/Lib/site-packages/numpy/core/tests/test_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..f8bcb82ac441137b0bd93a362da5ec65bdb6ffdc --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_datetime.py @@ -0,0 +1,2470 @@ + +import numpy +import numpy as np +import datetime +import pytest +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_warns, suppress_warnings, + assert_raises_regex, assert_array_equal, + ) +from numpy.compat import pickle + +# Use pytz to test out various time zones if available +try: + from pytz import timezone as tz + _has_pytz = True +except ImportError: + _has_pytz = False + +try: + RecursionError +except NameError: + RecursionError = RuntimeError # python < 3.5 + + +class TestDateTime: + def test_datetime_dtype_creation(self): + for unit in ['Y', 'M', 'W', 'D', + 'h', 'm', 's', 'ms', 'us', + 'μs', # alias for us + 'ns', 'ps', 'fs', 'as']: + dt1 = np.dtype('M8[750%s]' % unit) + assert_(dt1 == np.dtype('datetime64[750%s]' % unit)) + dt2 = np.dtype('m8[%s]' % unit) + assert_(dt2 == np.dtype('timedelta64[%s]' % unit)) + + # Generic units shouldn't add [] to the end + assert_equal(str(np.dtype("M8")), "datetime64") + + # Should be possible to specify the endianness + assert_equal(np.dtype("=M8"), np.dtype("M8")) + assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) + assert_(np.dtype(">M8") == np.dtype("M8") or + np.dtype("M8[D]") == np.dtype("M8[D]") or + np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or + np.dtype("m8[D]") == np.dtype("m8[D]") or + np.dtype("m8") != np.dtype(" Scalars + assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) + + # Arrays -> Scalars + assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) + + # NaN -> NaT + nan = np.array([np.nan] * 8) + fnan = nan.astype('f') + lnan = nan.astype('g') + cnan = nan.astype('D') + cfnan = nan.astype('F') + clnan = nan.astype('G') + + nat = np.array([np.datetime64('NaT')] * 8) + assert_equal(nan.astype('M8[ns]'), nat) + assert_equal(fnan.astype('M8[ns]'), nat) + assert_equal(lnan.astype('M8[ns]'), nat) + assert_equal(cnan.astype('M8[ns]'), nat) + assert_equal(cfnan.astype('M8[ns]'), nat) + assert_equal(clnan.astype('M8[ns]'), nat) + + nat = np.array([np.timedelta64('NaT')] * 8) + assert_equal(nan.astype('timedelta64[ns]'), nat) + assert_equal(fnan.astype('timedelta64[ns]'), nat) + assert_equal(lnan.astype('timedelta64[ns]'), nat) + assert_equal(cnan.astype('timedelta64[ns]'), nat) + assert_equal(cfnan.astype('timedelta64[ns]'), nat) + assert_equal(clnan.astype('timedelta64[ns]'), nat) + + def test_days_creation(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3 - 365) + assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3) + assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)/4 + 3 + 366) + assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), + (1900-1970)*365 - (1970-1900)//4) + assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), + (1900-1970)*365 - (1970-1900)//4 + 365) + assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1) + assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1) + assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365) + assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365) + assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365) + assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365) + assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1) + assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1) + assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4) + assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 366) + assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), + (2400 - 1970)*365 + (2400 - 1972)//4 - 3) + assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), + (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366) + + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), + (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29) + assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28) + assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), + (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21) + + def test_days_to_pydate(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('O'), + datetime.date(1599, 1, 1)) + assert_equal(np.array('1600', dtype='M8[D]').astype('O'), + datetime.date(1600, 1, 1)) + assert_equal(np.array('1601', dtype='M8[D]').astype('O'), + datetime.date(1601, 1, 1)) + assert_equal(np.array('1900', dtype='M8[D]').astype('O'), + datetime.date(1900, 1, 1)) + assert_equal(np.array('1901', dtype='M8[D]').astype('O'), + datetime.date(1901, 1, 1)) + assert_equal(np.array('2000', dtype='M8[D]').astype('O'), + datetime.date(2000, 1, 1)) + assert_equal(np.array('2001', dtype='M8[D]').astype('O'), + datetime.date(2001, 1, 1)) + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), + datetime.date(1600, 2, 29)) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), + datetime.date(1600, 3, 1)) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), + datetime.date(2001, 3, 22)) + + def test_dtype_comparison(self): + assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) + assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) + assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) + assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) + + def test_pydatetime_creation(self): + a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') + assert_equal(a[0], a[1]) + # Will fail if the date changes during the exact right moment + a = np.array(['today', datetime.date.today()], dtype='M8[D]') + assert_equal(a[0], a[1]) + # datetime.datetime.now() returns local time, not UTC + #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') + #assert_equal(a[0], a[1]) + + # we can give a datetime.date time units + assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), + np.array(np.datetime64('1960-03-12T00:00:00'))) + + def test_datetime_string_conversion(self): + a = ['2011-03-16', '1920-01-01', '2013-05-19'] + str_a = np.array(a, dtype='S') + uni_a = np.array(a, dtype='U') + dt_a = np.array(a, dtype='M') + + # String to datetime + assert_equal(dt_a, str_a.astype('M')) + assert_equal(dt_a.dtype, str_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = str_a + assert_equal(dt_a, dt_b) + + # Datetime to string + assert_equal(str_a, dt_a.astype('S0')) + str_b = np.empty_like(str_a) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + # Unicode to datetime + assert_equal(dt_a, uni_a.astype('M')) + assert_equal(dt_a.dtype, uni_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = uni_a + assert_equal(dt_a, dt_b) + + # Datetime to unicode + assert_equal(uni_a, dt_a.astype('U')) + uni_b = np.empty_like(uni_a) + uni_b[...] = dt_a + assert_equal(uni_a, uni_b) + + # Datetime to long string - gh-9712 + assert_equal(str_a, dt_a.astype((np.string_, 128))) + str_b = np.empty(str_a.shape, dtype=(np.string_, 128)) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + def test_time_byteswapping(self, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + times_swapped = times.astype(times.dtype.newbyteorder()) + assert_array_equal(times, times_swapped) + + unswapped = times_swapped.view(np.int64).newbyteorder() + assert_array_equal(unswapped, times.view(np.int64)) + + @pytest.mark.parametrize(["time1", "time2"], + [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")]) + def test_time_byteswapped_cast(self, time1, time2): + dtype1 = np.dtype(time1) + dtype2 = np.dtype(time2) + times = np.array(["2017", "NaT"], dtype=dtype1) + expected = times.astype(dtype2) + + # Test that every byte-swapping combination also returns the same + # results (previous tests check that this comparison works fine). + res = times.astype(dtype1.newbyteorder()).astype(dtype2) + assert_array_equal(res, expected) + res = times.astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + @pytest.mark.parametrize("str_dtype", ["U", "S"]) + def test_datetime_conversions_byteorders(self, str_dtype, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + # Unfortunately, timedelta does not roundtrip: + from_strings = np.array(["2017", "NaT"], dtype=str_dtype) + to_strings = times.astype(str_dtype) # assume this is correct + + # Check that conversion from times to string works if src is swapped: + times_swapped = times.astype(times.dtype.newbyteorder()) + res = times_swapped.astype(str_dtype) + assert_array_equal(res, to_strings) + # And also if both are swapped: + res = times_swapped.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + # only destination is swapped: + res = times.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + + # Check that conversion from string to times works if src is swapped: + from_strings_swapped = from_strings.astype( + from_strings.dtype.newbyteorder()) + res = from_strings_swapped.astype(time_dtype) + assert_array_equal(res, times) + # And if both are swapped: + res = from_strings_swapped.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + # Only destination is swapped: + res = from_strings.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + + def test_datetime_array_str(self): + a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') + assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") + + a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') + assert_equal(np.array2string(a, separator=', ', + formatter={'datetime': lambda x: + "'%s'" % np.datetime_as_string(x, timezone='UTC')}), + "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") + + # Check that one NaT doesn't corrupt subsequent entries + a = np.array(['2010', 'NaT', '2030']).astype('M') + assert_equal(str(a), "['2010' 'NaT' '2030']") + + def test_timedelta_array_str(self): + a = np.array([-1, 0, 100], dtype='m') + assert_equal(str(a), "[ -1 0 100]") + a = np.array(['NaT', 'NaT'], dtype='m') + assert_equal(str(a), "['NaT' 'NaT']") + # Check right-alignment with NaTs + a = np.array([-1, 'NaT', 0], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 0]") + a = np.array([-1, 'NaT', 1234567], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + + # Test with other byteorder: + a = np.array([-1, 'NaT', 1234567], dtype='>m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + + def test_setstate(self): + "Verify that datetime dtype __setstate__ can handle bad arguments" + dt = np.dtype('>M8[us]') + assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + + def test_dtype_promotion(self): + # datetime datetime computes the metadata gcd + # timedelta timedelta computes the metadata gcd + for mM in ['m', 'M']: + assert_equal( + np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), + np.dtype(mM+'8[2Y]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), + np.dtype(mM+'8[3Y]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), + np.dtype(mM+'8[2M]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), + np.dtype(mM+'8[1D]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), + np.dtype(mM+'8[s]')) + assert_equal( + np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), + np.dtype(mM+'8[7s]')) + # timedelta timedelta raises when there is no reasonable gcd + assert_raises(TypeError, np.promote_types, + np.dtype('m8[Y]'), np.dtype('m8[D]')) + assert_raises(TypeError, np.promote_types, + np.dtype('m8[M]'), np.dtype('m8[W]')) + # timedelta and float cannot be safely cast with each other + assert_raises(TypeError, np.promote_types, "float32", "m8") + assert_raises(TypeError, np.promote_types, "m8", "float32") + assert_raises(TypeError, np.promote_types, "uint64", "m8") + assert_raises(TypeError, np.promote_types, "m8", "uint64") + + # timedelta timedelta may overflow with big unit ranges + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[W]'), np.dtype('m8[fs]')) + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[s]'), np.dtype('m8[as]')) + + def test_cast_overflow(self): + # gh-4486 + def cast(): + numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("datetime64[%s]', + 'timedelta64[%s]']) + def test_isfinite_isinf_isnan_units(self, unit, dstr): + '''check isfinite, isinf, isnan for all units of M, m dtypes + ''' + arr_val = [123, -321, "NaT"] + arr = np.array(arr_val, dtype= dstr % unit) + pos = np.array([True, True, False]) + neg = np.array([False, False, True]) + false = np.array([False, False, False]) + assert_equal(np.isfinite(arr), pos) + assert_equal(np.isinf(arr), false) + assert_equal(np.isnan(arr), neg) + + def test_assert_equal(self): + assert_raises(AssertionError, assert_equal, + np.datetime64('nat'), np.timedelta64('nat')) + + def test_corecursive_input(self): + # construct a co-recursive list + a, b = [], [] + a.append(b) + b.append(a) + obj_arr = np.array([None]) + obj_arr[0] = a + + # At some point this caused a stack overflow (gh-11154). Now raises + # ValueError since the nested list cannot be converted to a datetime. + assert_raises(ValueError, obj_arr.astype, 'M8') + assert_raises(ValueError, obj_arr.astype, 'm8') + + @pytest.mark.parametrize("shape", [(), (1,)]) + def test_discovery_from_object_array(self, shape): + arr = np.array("2020-10-10", dtype=object).reshape(shape) + res = np.array("2020-10-10", dtype="M8").reshape(shape) + assert res.dtype == np.dtype("M8[D]") + assert_equal(arr.astype("M8"), res) + arr[...] = np.bytes_("2020-10-10") # try a numpy string type + assert_equal(arr.astype("M8"), res) + arr = arr.astype("S") + assert_equal(arr.astype("S").astype("M8"), res) + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", + # compound units + "10D", "2M", + ]) + def test_limit_symmetry(self, time_unit): + """ + Dates should have symmetric limits around the unix epoch at +/-np.int64 + """ + epoch = np.datetime64(0, time_unit) + latest = np.datetime64(np.iinfo(np.int64).max, time_unit) + earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit) + + # above should not have overflowed + assert earliest < epoch < latest + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", + pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")), + "D", "h", "m", + "s", "ms", "us", "ns", "ps", "fs", "as", + pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")), + ]) + @pytest.mark.parametrize("sign", [-1, 1]) + def test_limit_str_roundtrip(self, time_unit, sign): + """ + Limits should roundtrip when converted to strings. + + This tests the conversion to and from npy_datetimestruct. + """ + # TODO: add absolute (gold standard) time span limit strings + limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit) + + # Convert to string and back. Explicit unit needed since the day and + # week reprs are not distinguishable. + limit_via_str = np.datetime64(str(limit), time_unit) + assert limit_via_str == limit + + +class TestDateTimeData: + + def test_basic(self): + a = np.array(['1980-03-23'], dtype=np.datetime64) + assert_equal(np.datetime_data(a.dtype), ('D', 1)) + + def test_bytes(self): + # byte units are converted to unicode + dt = np.datetime64('2000', (b'ms', 5)) + assert np.datetime_data(dt.dtype) == ('ms', 5) + + dt = np.datetime64('2000', b'5ms') + assert np.datetime_data(dt.dtype) == ('ms', 5) + + def test_non_ascii(self): + # μs is normalized to μ + dt = np.datetime64('2000', ('μs', 5)) + assert np.datetime_data(dt.dtype) == ('us', 5) + + dt = np.datetime64('2000', '5μs') + assert np.datetime_data(dt.dtype) == ('us', 5) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_defchararray.py b/MLPY/Lib/site-packages/numpy/core/tests/test_defchararray.py new file mode 100644 index 0000000000000000000000000000000000000000..75f571b9e019b1f38cf53f4c488fe0b8403af7dd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_defchararray.py @@ -0,0 +1,673 @@ + +import numpy as np +from numpy.core.multiarray import _vec_string +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, + assert_raises_regex + ) + +kw_unicode_true = {'unicode': True} # make 2to3 work properly +kw_unicode_false = {'unicode': False} + +class TestBasic: + def test_from_object_array(self): + A = np.array([['abc', 2], + ['long ', '0123456789']], dtype='O') + B = np.char.array(A) + assert_equal(B.dtype.itemsize, 10) + assert_array_equal(B, [[b'abc', b'2'], + [b'long', b'0123456789']]) + + def test_from_object_array_unicode(self): + A = np.array([['abc', u'Sigma \u03a3'], + ['long ', '0123456789']], dtype='O') + assert_raises(ValueError, np.char.array, (A,)) + B = np.char.array(A, **kw_unicode_true) + assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize) + assert_array_equal(B, [['abc', u'Sigma \u03a3'], + ['long', '0123456789']]) + + def test_from_string_array(self): + A = np.array([[b'abc', b'foo'], + [b'long ', b'0123456789']]) + assert_equal(A.dtype.type, np.string_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B[0, 0] = 'changed' + assert_(B[0, 0] != A[0, 0]) + C = np.char.asarray(A) + assert_array_equal(C, A) + assert_equal(C.dtype, A.dtype) + C[0, 0] = 'changed again' + assert_(C[0, 0] != B[0, 0]) + assert_(C[0, 0] == A[0, 0]) + + def test_from_unicode_array(self): + A = np.array([['abc', u'Sigma \u03a3'], + ['long ', '0123456789']]) + assert_equal(A.dtype.type, np.unicode_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B = np.char.array(A, **kw_unicode_true) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + + def fail(): + np.char.array(A, **kw_unicode_false) + + assert_raises(UnicodeEncodeError, fail) + + def test_unicode_upconvert(self): + A = np.char.array(['abc']) + B = np.char.array([u'\u03a3']) + assert_(issubclass((A + B).dtype.type, np.unicode_)) + + def test_from_string(self): + A = np.char.array(b'abc') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 3) + assert_(issubclass(A.dtype.type, np.string_)) + + def test_from_unicode(self): + A = np.char.array(u'\u03a3') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 1) + assert_equal(A.itemsize, 4) + assert_(issubclass(A.dtype.type, np.unicode_)) + +class TestVecString: + def test_non_existent_method(self): + + def fail(): + _vec_string('a', np.string_, 'bogus') + + assert_raises(AttributeError, fail) + + def test_non_string_array(self): + + def fail(): + _vec_string(1, np.string_, 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_args_tuple(self): + + def fail(): + _vec_string(['a'], np.string_, 'strip', 1) + + assert_raises(TypeError, fail) + + def test_invalid_type_descr(self): + + def fail(): + _vec_string(['a'], 'BOGUS', 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_function_args(self): + + def fail(): + _vec_string(['a'], np.string_, 'strip', (1,)) + + assert_raises(TypeError, fail) + + def test_invalid_result_type(self): + + def fail(): + _vec_string(['a'], np.int_, 'strip') + + assert_raises(TypeError, fail) + + def test_broadcast_error(self): + + def fail(): + _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],)) + + assert_raises(ValueError, fail) + + +class TestWhitespace: + def setup(self): + self.A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.chararray) + self.B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + + def test1(self): + assert_(np.all(self.A == self.B)) + assert_(np.all(self.A >= self.B)) + assert_(np.all(self.A <= self.B)) + assert_(not np.any(self.A > self.B)) + assert_(not np.any(self.A < self.B)) + assert_(not np.any(self.A != self.B)) + +class TestChar: + def setup(self): + self.A = np.array('abc1', dtype='c').view(np.chararray) + + def test_it(self): + assert_equal(self.A.shape, (4,)) + assert_equal(self.A.upper()[:2].tobytes(), b'AB') + +class TestComparisons: + def setup(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + self.B = np.array([['efg', '123 '], + ['051', 'tuv']]).view(np.chararray) + + def test_not_equal(self): + assert_array_equal((self.A != self.B), [[True, False], [True, True]]) + + def test_equal(self): + assert_array_equal((self.A == self.B), [[False, True], [False, False]]) + + def test_greater_equal(self): + assert_array_equal((self.A >= self.B), [[False, True], [True, True]]) + + def test_less_equal(self): + assert_array_equal((self.A <= self.B), [[True, True], [False, False]]) + + def test_greater(self): + assert_array_equal((self.A > self.B), [[False, False], [True, True]]) + + def test_less(self): + assert_array_equal((self.A < self.B), [[True, False], [False, False]]) + + def test_type(self): + out1 = np.char.equal(self.A, self.B) + out2 = np.char.equal('a', 'a') + assert_(isinstance(out1, np.ndarray)) + assert_(isinstance(out2, np.ndarray)) + +class TestComparisonsMixed1(TestComparisons): + """Ticket #1276""" + + def setup(self): + TestComparisons.setup(self) + self.B = np.array([['efg', '123 '], + ['051', 'tuv']], np.unicode_).view(np.chararray) + +class TestComparisonsMixed2(TestComparisons): + """Ticket #1276""" + + def setup(self): + TestComparisons.setup(self) + self.A = np.array([['abc', '123'], + ['789', 'xyz']], np.unicode_).view(np.chararray) + +class TestInformation: + def setup(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) + self.B = np.array([[u' \u03a3 ', u''], + [u'12345', u'MixedCase'], + [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) + + def test_len(self): + assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + + def test_count(self): + assert_(issubclass(self.A.count('').dtype.type, np.integer)) + assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + # Python doesn't seem to like counting NULL characters + # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) + # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + + def test_endswith(self): + assert_(issubclass(self.A.endswith('').dtype.type, np.bool_)) + assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.endswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + def test_find(self): + assert_(issubclass(self.A.find('a').dtype.type, np.integer)) + assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]]) + assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]]) + + def test_index(self): + + def fail(): + self.A.index('a') + + assert_raises(ValueError, fail) + assert_(np.char.index('abcba', 'b') == 1) + assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) + + def test_isalnum(self): + assert_(issubclass(self.A.isalnum().dtype.type, np.bool_)) + assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + + def test_isalpha(self): + assert_(issubclass(self.A.isalpha().dtype.type, np.bool_)) + assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + + def test_isdigit(self): + assert_(issubclass(self.A.isdigit().dtype.type, np.bool_)) + assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + + def test_islower(self): + assert_(issubclass(self.A.islower().dtype.type, np.bool_)) + assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + + def test_isspace(self): + assert_(issubclass(self.A.isspace().dtype.type, np.bool_)) + assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + + def test_istitle(self): + assert_(issubclass(self.A.istitle().dtype.type, np.bool_)) + assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + + def test_isupper(self): + assert_(issubclass(self.A.isupper().dtype.type, np.bool_)) + assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + + def test_rfind(self): + assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) + assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + + def test_rindex(self): + + def fail(): + self.A.rindex('a') + + assert_raises(ValueError, fail) + assert_(np.char.rindex('abcba', 'b') == 3) + assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) + + def test_startswith(self): + assert_(issubclass(self.A.startswith('').dtype.type, np.bool_)) + assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.startswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + +class TestMethods: + def setup(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.chararray) + self.B = np.array([[u' \u03a3 ', u''], + [u'12345', u'MixedCase'], + [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray) + + def test_capitalize(self): + tgt = [[b' abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.capitalize().dtype.type, np.string_)) + assert_array_equal(self.A.capitalize(), tgt) + + tgt = [[u' \u03c3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_)) + assert_array_equal(self.B.capitalize(), tgt) + + def test_center(self): + assert_(issubclass(self.A.center(10).dtype.type, np.string_)) + C = self.A.center([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.center(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.center(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO ', b' FOO '], + [b' FOO ', b' FOO ']] + assert_(issubclass(C.dtype.type, np.string_)) + assert_array_equal(C, tgt) + + def test_decode(self): + A = np.char.array([b'\\u03a3']) + assert_(A.decode('unicode-escape')[0] == '\u03a3') + + def test_encode(self): + B = self.B.encode('unicode_escape') + assert_(B[0][0] == str(' \\u03a3 ').encode('latin1')) + + def test_expandtabs(self): + T = self.A.expandtabs() + assert_(T[2, 0] == b'123 345 \0') + + def test_join(self): + # NOTE: list(b'123') == [49, 50, 51] + # so that b','.join(b'123') results to an error on Py3 + A0 = self.A.decode('ascii') + + A = np.char.join([',', '#'], A0) + assert_(issubclass(A.dtype.type, np.unicode_)) + tgt = np.array([[' ,a,b,c, ', ''], + ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], + ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) + assert_array_equal(np.char.join([',', '#'], A0), tgt) + + def test_ljust(self): + assert_(issubclass(self.A.ljust(10).dtype.type, np.string_)) + + C = self.A.ljust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.ljust(20, b'#') + assert_array_equal(C.startswith(b'#'), [ + [False, True], [False, False], [False, False]]) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.ljust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b'FOO ', b'FOO '], + [b'FOO ', b'FOO ']] + assert_(issubclass(C.dtype.type, np.string_)) + assert_array_equal(C, tgt) + + def test_lower(self): + tgt = [[b' abc ', b''], + [b'12345', b'mixedcase'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.lower().dtype.type, np.string_)) + assert_array_equal(self.A.lower(), tgt) + + tgt = [[u' \u03c3 ', u''], + [u'12345', u'mixedcase'], + [u'123 \t 345 \0 ', u'upper']] + assert_(issubclass(self.B.lower().dtype.type, np.unicode_)) + assert_array_equal(self.B.lower(), tgt) + + def test_lstrip(self): + tgt = [[b'abc ', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.lstrip().dtype.type, np.string_)) + assert_array_equal(self.A.lstrip(), tgt) + + tgt = [[b' abc', b''], + [b'2345', b'ixedCase'], + [b'23 \t 345 \x00', b'UPPER']] + assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + + tgt = [[u'\u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_)) + assert_array_equal(self.B.lstrip(), tgt) + + def test_partition(self): + P = self.A.partition([b'3', b'M']) + tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] + assert_(issubclass(P.dtype.type, np.string_)) + assert_array_equal(P, tgt) + + def test_replace(self): + R = self.A.replace([b'3', b'a'], + [b'##########', b'@']) + tgt = [[b' abc ', b''], + [b'12##########45', b'MixedC@se'], + [b'12########## \t ##########45 \x00', b'UPPER']] + assert_(issubclass(R.dtype.type, np.string_)) + assert_array_equal(R, tgt) + + def test_rjust(self): + assert_(issubclass(self.A.rjust(10).dtype.type, np.string_)) + + C = self.A.rjust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.rjust(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_array_equal(C.endswith(b'#'), + [[False, True], [False, False], [False, False]]) + + C = np.char.rjust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO', b' FOO'], + [b' FOO', b' FOO']] + assert_(issubclass(C.dtype.type, np.string_)) + assert_array_equal(C, tgt) + + def test_rpartition(self): + P = self.A.rpartition([b'3', b'M']) + tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] + assert_(issubclass(P.dtype.type, np.string_)) + assert_array_equal(P, tgt) + + def test_rsplit(self): + A = self.A.rsplit(b'3') + tgt = [[[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_rstrip(self): + assert_(issubclass(self.A.rstrip().dtype.type, np.string_)) + + tgt = [[b' abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_array_equal(self.A.rstrip(), tgt) + + tgt = [[b' abc ', b''], + [b'1234', b'MixedCase'], + [b'123 \t 345 \x00', b'UPP'] + ] + assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + + tgt = [[u' \u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_)) + assert_array_equal(self.B.rstrip(), tgt) + + def test_strip(self): + tgt = [[b'abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_(issubclass(self.A.strip().dtype.type, np.string_)) + assert_array_equal(self.A.strip(), tgt) + + tgt = [[b' abc ', b''], + [b'234', b'ixedCas'], + [b'23 \t 345 \x00', b'UPP']] + assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + + tgt = [[u'\u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.strip().dtype.type, np.unicode_)) + assert_array_equal(self.B.strip(), tgt) + + def test_split(self): + A = self.A.split(b'3') + tgt = [ + [[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_splitlines(self): + A = np.char.array(['abc\nfds\nwer']).splitlines() + assert_(issubclass(A.dtype.type, np.object_)) + assert_(A.shape == (1,)) + assert_(len(A[0]) == 3) + + def test_swapcase(self): + tgt = [[b' ABC ', b''], + [b'12345', b'mIXEDcASE'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.swapcase().dtype.type, np.string_)) + assert_array_equal(self.A.swapcase(), tgt) + + tgt = [[u' \u03c3 ', u''], + [u'12345', u'mIXEDcASE'], + [u'123 \t 345 \0 ', u'upper']] + assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_)) + assert_array_equal(self.B.swapcase(), tgt) + + def test_title(self): + tgt = [[b' Abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.title().dtype.type, np.string_)) + assert_array_equal(self.A.title(), tgt) + + tgt = [[u' \u03a3 ', u''], + [u'12345', u'Mixedcase'], + [u'123 \t 345 \0 ', u'Upper']] + assert_(issubclass(self.B.title().dtype.type, np.unicode_)) + assert_array_equal(self.B.title(), tgt) + + def test_upper(self): + tgt = [[b' ABC ', b''], + [b'12345', b'MIXEDCASE'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.upper().dtype.type, np.string_)) + assert_array_equal(self.A.upper(), tgt) + + tgt = [[u' \u03a3 ', u''], + [u'12345', u'MIXEDCASE'], + [u'123 \t 345 \0 ', u'UPPER']] + assert_(issubclass(self.B.upper().dtype.type, np.unicode_)) + assert_array_equal(self.B.upper(), tgt) + + def test_isnumeric(self): + + def fail(): + self.A.isnumeric() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_)) + assert_array_equal(self.B.isnumeric(), [ + [False, False], [True, False], [False, False]]) + + def test_isdecimal(self): + + def fail(): + self.A.isdecimal() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_)) + assert_array_equal(self.B.isdecimal(), [ + [False, False], [True, False], [False, False]]) + + +class TestOperations: + def setup(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.chararray) + self.B = np.array([['efg', '456'], + ['051', 'tuv']]).view(np.chararray) + + def test_add(self): + AB = np.array([['abcefg', '123456'], + ['789051', 'xyztuv']]).view(np.chararray) + assert_array_equal(AB, (self.A + self.B)) + assert_(len((self.A + self.B)[0][0]) == 6) + + def test_radd(self): + QA = np.array([['qabc', 'q123'], + ['q789', 'qxyz']]).view(np.chararray) + assert_array_equal(QA, ('q' + self.A)) + + def test_mul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) + + assert_array_equal(Ar, (self.A * r)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + A*ob + + def test_rmul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) + assert_array_equal(Ar, (r * self.A)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + ob * A + + def test_mod(self): + """Ticket #856""" + F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray) + C = np.array([[3, 7], [19, 1]]) + FC = np.array([['3', '7.000000'], + ['19', '1']]).view(np.chararray) + assert_array_equal(FC, F % C) + + A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray) + A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray) + assert_array_equal(A1, (A % 1)) + + A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray) + assert_array_equal(A2, (A % [[1, 2], [3, 4]])) + + def test_rmod(self): + assert_(("%s" % self.A) == str(self.A)) + assert_(("%r" % self.A) == repr(self.A)) + + for ob in [42, object()]: + with assert_raises_regex( + TypeError, "unsupported operand type.* and 'chararray'"): + ob % self.A + + def test_slice(self): + """Regression test for https://github.com/numpy/numpy/issues/5982""" + + arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']], + dtype='S4').view(np.chararray) + sl1 = arr[:] + assert_array_equal(sl1, arr) + assert_(sl1.base is arr) + assert_(sl1.base.base is arr.base) + + sl2 = arr[:, :] + assert_array_equal(sl2, arr) + assert_(sl2.base is arr) + assert_(sl2.base.base is arr.base) + + assert_(arr[0, 0] == b'abc') + + +def test_empty_indexing(): + """Regression test for ticket 1948.""" + # Check that indexing a chararray with an empty list/array returns an + # empty chararray instead of a chararray with a single empty string in it. + s = np.chararray((4,)) + assert_(s[[]].size == 0) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_deprecations.py b/MLPY/Lib/site-packages/numpy/core/tests/test_deprecations.py new file mode 100644 index 0000000000000000000000000000000000000000..5450f005eab15f3d483660783dda4a336dea2b6e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_deprecations.py @@ -0,0 +1,1209 @@ +""" +Tests related to deprecation warnings. Also a convenient place +to document how deprecations should eventually be turned into errors. + +""" +import datetime +import operator +import warnings +import pytest +import tempfile +import re +import sys + +import numpy as np +from numpy.testing import ( + assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, KnownFailureException + ) + +from numpy.core._multiarray_tests import fromstring_null_term_c_api + +try: + import pytz + _has_pytz = True +except ImportError: + _has_pytz = False + + +class _DeprecationTestCase: + # Just as warning: warnings uses re.match, so the start of this message + # must match. + message = '' + warning_cls = DeprecationWarning + + def setup(self): + self.warn_ctx = warnings.catch_warnings(record=True) + self.log = self.warn_ctx.__enter__() + + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + + def teardown(self): + self.warn_ctx.__exit__() + + def assert_deprecated(self, function, num=1, ignore_others=False, + function_fails=False, + exceptions=np._NoValue, + args=(), kwargs={}): + """Test if DeprecationWarnings are given and raised. + + This first checks if the function when called gives `num` + DeprecationWarnings, after that it tries to raise these + DeprecationWarnings and compares them with `exceptions`. + The exceptions can be different for cases where this code path + is simply not anticipated and the exception is replaced. + + Parameters + ---------- + function : callable + The function to test + num : int + Number of DeprecationWarnings to expect. This should normally be 1. + ignore_others : bool + Whether warnings of the wrong type should be ignored (note that + the message is not checked) + function_fails : bool + If the function would normally fail, setting this will check for + warnings inside a try/except block. + exceptions : Exception or tuple of Exceptions + Exception to expect when turning the warnings into an error. + The default checks for DeprecationWarnings. If exceptions is + empty the function is expected to run successfully. + args : tuple + Arguments for `function` + kwargs : dict + Keyword arguments for `function` + """ + __tracebackhide__ = True # Hide traceback for py.test + + # reset the log + self.log[:] = [] + + if exceptions is np._NoValue: + exceptions = (self.warning_cls,) + + try: + function(*args, **kwargs) + except (Exception if function_fails else tuple()): + pass + + # just in case, clear the registry + num_found = 0 + for warning in self.log: + if warning.category is self.warning_cls: + num_found += 1 + elif not ignore_others: + raise AssertionError( + "expected %s but got: %s" % + (self.warning_cls.__name__, warning.category)) + if num is not None and num_found != num: + msg = "%i warnings found but %i expected." % (len(self.log), num) + lst = [str(w) for w in self.log] + raise AssertionError("\n".join([msg] + lst)) + + with warnings.catch_warnings(): + warnings.filterwarnings("error", message=self.message, + category=self.warning_cls) + try: + function(*args, **kwargs) + if exceptions != tuple(): + raise AssertionError( + "No error raised during function call") + except exceptions: + if exceptions == tuple(): + raise AssertionError( + "Error raised during function call") + + def assert_not_deprecated(self, function, args=(), kwargs={}): + """Test that warnings are not raised. + + This is just a shorthand for: + + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + """ + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + + +class _VisibleDeprecationTestCase(_DeprecationTestCase): + warning_cls = np.VisibleDeprecationWarning + + +class TestNonTupleNDIndexDeprecation: + def test_basic(self): + a = np.zeros((5, 5)) + with warnings.catch_warnings(): + warnings.filterwarnings('always') + assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]]) + assert_warns(FutureWarning, a.__getitem__, [slice(None)]) + + warnings.filterwarnings('error') + assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]]) + assert_raises(FutureWarning, a.__getitem__, [slice(None)]) + + # a a[[0, 1]] always was advanced indexing, so no error/warning + a[[0, 1]] + + +class TestComparisonDeprecations(_DeprecationTestCase): + """This tests the deprecation, for non-element-wise comparison logic. + This used to mean that when an error occurred during element-wise comparison + (i.e. broadcasting) NotImplemented was returned, but also in the comparison + itself, False was given instead of the error. + + Also test FutureWarning for the None comparison. + """ + + message = "elementwise.* comparison failed; .*" + + def test_normal_types(self): + for op in (operator.eq, operator.ne): + # Broadcasting errors: + self.assert_deprecated(op, args=(np.zeros(3), [])) + a = np.zeros(3, dtype='i,i') + # (warning is issued a couple of times here) + self.assert_deprecated(op, args=(a, a[:-1]), num=None) + + # ragged array comparison returns True/False + a = np.array([1, np.array([1,2,3])], dtype=object) + b = np.array([1, np.array([1,2,3])], dtype=object) + self.assert_deprecated(op, args=(a, b), num=None) + + def test_string(self): + # For two string arrays, strings always raised the broadcasting error: + a = np.array(['a', 'b']) + b = np.array(['a', 'b', 'c']) + assert_raises(ValueError, lambda x, y: x == y, a, b) + + # The empty list is not cast to string, and this used to pass due + # to dtype mismatch; now (2018-06-21) it correctly leads to a + # FutureWarning. + assert_warns(FutureWarning, lambda: a == []) + + def test_void_dtype_equality_failures(self): + class NotArray: + def __array__(self): + raise TypeError + + # Needed so Python 3 does not raise DeprecationWarning twice. + def __ne__(self, other): + return NotImplemented + + self.assert_deprecated(lambda: np.arange(2) == NotArray()) + self.assert_deprecated(lambda: np.arange(2) != NotArray()) + + struct1 = np.zeros(2, dtype="i4,i4") + struct2 = np.zeros(2, dtype="i4,i4,i4") + + assert_warns(FutureWarning, lambda: struct1 == 1) + assert_warns(FutureWarning, lambda: struct1 == struct2) + assert_warns(FutureWarning, lambda: struct1 != 1) + assert_warns(FutureWarning, lambda: struct1 != struct2) + + def test_array_richcompare_legacy_weirdness(self): + # It doesn't really work to use assert_deprecated here, b/c part of + # the point of assert_deprecated is to check that when warnings are + # set to "error" mode then the error is propagated -- which is good! + # But here we are testing a bunch of code that is deprecated *because* + # it has the habit of swallowing up errors and converting them into + # different warnings. So assert_warns will have to be sufficient. + assert_warns(FutureWarning, lambda: np.arange(2) == "a") + assert_warns(FutureWarning, lambda: np.arange(2) != "a") + # No warning for scalar comparisons + with warnings.catch_warnings(): + warnings.filterwarnings("error") + assert_(not (np.array(0) == "a")) + assert_(np.array(0) != "a") + assert_(not (np.int16(0) == "a")) + assert_(np.int16(0) != "a") + + for arg1 in [np.asarray(0), np.int16(0)]: + struct = np.zeros(2, dtype="i4,i4") + for arg2 in [struct, "a"]: + for f in [operator.lt, operator.le, operator.gt, operator.ge]: + with warnings.catch_warnings() as l: + warnings.filterwarnings("always") + assert_raises(TypeError, f, arg1, arg2) + assert_(not l) + + +class TestDatetime64Timezone(_DeprecationTestCase): + """Parsing of datetime64 with timezones deprecated in 1.11.0, because + datetime64 is now timezone naive rather than UTC only. + + It will be quite a while before we can remove this, because, at the very + least, a lot of existing code uses the 'Z' modifier to avoid conversion + from local time to UTC, even if otherwise it handles time in a timezone + naive fashion. + """ + def test_string(self): + self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',)) + self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',)) + + @pytest.mark.skipif(not _has_pytz, + reason="The pytz module is not available.") + def test_datetime(self): + tz = pytz.timezone('US/Eastern') + dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz) + self.assert_deprecated(np.datetime64, args=(dt,)) + + +class TestNonCContiguousViewDeprecation(_DeprecationTestCase): + """View of non-C-contiguous arrays deprecated in 1.11.0. + + The deprecation will not be raised for arrays that are both C and F + contiguous, as C contiguous is dominant. There are more such arrays + with relaxed stride checking than without so the deprecation is not + as visible with relaxed stride checking in force. + """ + + def test_fortran_contiguous(self): + self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,)) + self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) + + +class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase): + """Assigning the 'data' attribute of an ndarray is unsafe as pointed + out in gh-7093. Eventually, such assignment should NOT be allowed, but + in the interests of maintaining backwards compatibility, only a Deprecation- + Warning will be raised instead for the time being to give developers time to + refactor relevant code. + """ + + def test_data_attr_assignment(self): + a = np.arange(10) + b = np.linspace(0, 1, 10) + + self.message = ("Assigning the 'data' attribute is an " + "inherently unsafe operation and will " + "be removed in the future.") + self.assert_deprecated(a.__setattr__, args=('data', b.data)) + + +class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase): + """ + If a 'width' parameter is passed into ``binary_repr`` that is insufficient to + represent the number in base 2 (positive) or 2's complement (negative) form, + the function used to silently ignore the parameter and return a representation + using the minimal number of bits needed for the form in question. Such behavior + is now considered unsafe from a user perspective and will raise an error in the future. + """ + + def test_insufficient_width_positive(self): + args = (10,) + kwargs = {'width': 2} + + self.message = ("Insufficient bit width provided. This behavior " + "will raise an error in the future.") + self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) + + def test_insufficient_width_negative(self): + args = (-5,) + kwargs = {'width': 2} + + self.message = ("Insufficient bit width provided. This behavior " + "will raise an error in the future.") + self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs) + + +class TestNumericStyleTypecodes(_DeprecationTestCase): + """ + Most numeric style typecodes were previously deprecated (and removed) + in 1.20. This also deprecates the remaining ones. + """ + # 2020-06-09, NumPy 1.20 + def test_all_dtypes(self): + deprecated_types = ['Bytes0', 'Datetime64', 'Str0'] + # Depending on intp size, either Uint32 or Uint64 is defined: + deprecated_types.append(f"U{np.dtype(np.intp).name}") + for dt in deprecated_types: + self.assert_deprecated(np.dtype, exceptions=(TypeError,), + args=(dt,)) + + +class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase): + # Deprecated 2021-01-05, NumPy 1.21 + message = r".*`.dtype` attribute" + + def test_deprecation_dtype_attribute_is_dtype(self): + class dt: + dtype = "f8" + + class vdt(np.void): + dtype = "f,f" + + self.assert_deprecated(lambda: np.dtype(dt)) + self.assert_deprecated(lambda: np.dtype(dt())) + self.assert_deprecated(lambda: np.dtype(vdt)) + self.assert_deprecated(lambda: np.dtype(vdt(1))) + + +class TestTestDeprecated: + def test_assert_deprecated(self): + test_case_instance = _DeprecationTestCase() + test_case_instance.setup() + assert_raises(AssertionError, + test_case_instance.assert_deprecated, + lambda: None) + + def foo(): + warnings.warn("foo", category=DeprecationWarning, stacklevel=2) + + test_case_instance.assert_deprecated(foo) + test_case_instance.teardown() + + +class TestNonNumericConjugate(_DeprecationTestCase): + """ + Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes, + which conflicts with the error behavior of np.conjugate. + """ + def test_conjugate(self): + for a in np.array(5), np.array(5j): + self.assert_not_deprecated(a.conjugate) + for a in (np.array('s'), np.array('2016', 'M'), + np.array((1, 2), [('a', int), ('b', int)])): + self.assert_deprecated(a.conjugate) + + +class TestNPY_CHAR(_DeprecationTestCase): + # 2017-05-03, 1.13.0 + def test_npy_char_deprecation(self): + from numpy.core._multiarray_tests import npy_char_deprecation + self.assert_deprecated(npy_char_deprecation) + assert_(npy_char_deprecation() == 'S1') + + +class TestPyArray_AS1D(_DeprecationTestCase): + def test_npy_pyarrayas1d_deprecation(self): + from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation + assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation) + + +class TestPyArray_AS2D(_DeprecationTestCase): + def test_npy_pyarrayas2d_deprecation(self): + from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation + assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation) + + +class Test_UPDATEIFCOPY(_DeprecationTestCase): + """ + v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use + WRITEBACKIFCOPY instead + """ + def test_npy_updateifcopy_deprecation(self): + from numpy.core._multiarray_tests import npy_updateifcopy_deprecation + arr = np.arange(9).reshape(3, 3) + v = arr.T + self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,)) + + +class TestDatetimeEvent(_DeprecationTestCase): + # 2017-08-11, 1.14.0 + def test_3_tuple(self): + for cls in (np.datetime64, np.timedelta64): + # two valid uses - (unit, num) and (unit, num, den, None) + self.assert_not_deprecated(cls, args=(1, ('ms', 2))) + self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None))) + + # trying to use the event argument, removed in 1.7.0, is deprecated + # it used to be a uint8 + self.assert_deprecated(cls, args=(1, ('ms', 2, 'event'))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 63))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event'))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63))) + + +class TestTruthTestingEmptyArrays(_DeprecationTestCase): + # 2017-09-25, 1.14.0 + message = '.*truth value of an empty array is ambiguous.*' + + def test_1d(self): + self.assert_deprecated(bool, args=(np.array([]),)) + + def test_2d(self): + self.assert_deprecated(bool, args=(np.zeros((1, 0)),)) + self.assert_deprecated(bool, args=(np.zeros((0, 1)),)) + self.assert_deprecated(bool, args=(np.zeros((0, 0)),)) + + +class TestBincount(_DeprecationTestCase): + # 2017-06-01, 1.14.0 + def test_bincount_minlength(self): + self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) + + +class TestAlen(_DeprecationTestCase): + # 2019-08-02, 1.18.0 + def test_alen(self): + self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3]))) + + +class TestGeneratorSum(_DeprecationTestCase): + # 2018-02-25, 1.15.0 + def test_generator_sum(self): + self.assert_deprecated(np.sum, args=((i for i in range(5)),)) + + +class TestPositiveOnNonNumerical(_DeprecationTestCase): + # 2018-06-28, 1.16.0 + def test_positive_on_non_number(self): + self.assert_deprecated(operator.pos, args=(np.array('foo'),)) + + +class TestFromstring(_DeprecationTestCase): + # 2017-10-19, 1.14 + def test_fromstring(self): + self.assert_deprecated(np.fromstring, args=('\x00'*80,)) + + +class TestFromStringAndFileInvalidData(_DeprecationTestCase): + # 2019-06-08, 1.17.0 + # Tests should be moved to real tests when deprecation is done. + message = "string or file could not be read to its end" + + @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) + def test_deprecate_unparsable_data_file(self, invalid_str): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + + with tempfile.TemporaryFile(mode="w") as f: + x.tofile(f, sep=',', format='%.2f') + f.write(invalid_str) + + f.seek(0) + self.assert_deprecated(lambda: np.fromfile(f, sep=",")) + f.seek(0) + self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5)) + # Should not raise: + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + f.seek(0) + res = np.fromfile(f, sep=",", count=4) + assert_array_equal(res, x) + + @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"]) + def test_deprecate_unparsable_string(self, invalid_str): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + x_str = "1.51,2,3.51,4{}".format(invalid_str) + + self.assert_deprecated(lambda: np.fromstring(x_str, sep=",")) + self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5)) + + # The C-level API can use not fixed size, but 0 terminated strings, + # so test that as well: + bytestr = x_str.encode("ascii") + self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr)) + + with assert_warns(DeprecationWarning): + # this is slightly strange, in that fromstring leaves data + # potentially uninitialized (would be good to error when all is + # read, but count is larger then actual data maybe). + res = np.fromstring(x_str, sep=",", count=5) + assert_array_equal(res[:-1], x) + + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # Should not raise: + res = np.fromstring(x_str, sep=",", count=4) + assert_array_equal(res, x) + + +class Test_GetSet_NumericOps(_DeprecationTestCase): + # 2018-09-20, 1.16.0 + def test_get_numeric_ops(self): + from numpy.core._multiarray_tests import getset_numericops + self.assert_deprecated(getset_numericops, num=2) + + # empty kwargs prevents any state actually changing which would break + # other tests. + self.assert_deprecated(np.set_numeric_ops, kwargs={}) + assert_raises(ValueError, np.set_numeric_ops, add='abc') + + +class TestShape1Fields(_DeprecationTestCase): + warning_cls = FutureWarning + + # 2019-05-20, 1.17.0 + def test_shape_1_fields(self): + self.assert_deprecated(np.dtype, args=([('a', int, 1)],)) + + +class TestNonZero(_DeprecationTestCase): + # 2019-05-26, 1.17.0 + def test_zerod(self): + self.assert_deprecated(lambda: np.nonzero(np.array(0))) + self.assert_deprecated(lambda: np.nonzero(np.array(1))) + + +def test_deprecate_ragged_arrays(): + # 2019-11-29 1.19.0 + # + # NEP 34 deprecated automatic object dtype when creating ragged + # arrays. Also see the "ragged" tests in `test_multiarray` + # + # emits a VisibleDeprecationWarning + arg = [1, [2, 3]] + with assert_warns(np.VisibleDeprecationWarning): + np.array(arg) + + +class TestTooDeepDeprecation(_VisibleDeprecationTestCase): + # NumPy 1.20, 2020-05-08 + # This is a bit similar to the above ragged array deprecation case. + message = re.escape("Creating an ndarray from nested sequences exceeding") + + def test_deprecation(self): + nested = [1] + for i in range(np.MAXDIMS - 1): + nested = [nested] + self.assert_not_deprecated(np.array, args=(nested,)) + self.assert_not_deprecated(np.array, + args=(nested,), kwargs=dict(dtype=object)) + + self.assert_deprecated(np.array, args=([nested],)) + + +class TestToString(_DeprecationTestCase): + # 2020-03-06 1.19.0 + message = re.escape("tostring() is deprecated. Use tobytes() instead.") + + def test_tostring(self): + arr = np.array(list(b"test\xFF"), dtype=np.uint8) + self.assert_deprecated(arr.tostring) + + def test_tostring_matches_tobytes(self): + arr = np.array(list(b"test\xFF"), dtype=np.uint8) + b = arr.tobytes() + with assert_warns(DeprecationWarning): + s = arr.tostring() + assert s == b + + +class TestDTypeCoercion(_DeprecationTestCase): + # 2020-02-06 1.19.0 + message = "Converting .* to a dtype .*is deprecated" + deprecated_types = [ + # The builtin scalar super types: + np.generic, np.flexible, np.number, + np.inexact, np.floating, np.complexfloating, + np.integer, np.unsignedinteger, np.signedinteger, + # character is a deprecated S1 special case: + np.character, + ] + + def test_dtype_coercion(self): + for scalar_type in self.deprecated_types: + self.assert_deprecated(np.dtype, args=(scalar_type,)) + + def test_array_construction(self): + for scalar_type in self.deprecated_types: + self.assert_deprecated(np.array, args=([], scalar_type,)) + + def test_not_deprecated(self): + # All specific types are not deprecated: + for group in np.sctypes.values(): + for scalar_type in group: + self.assert_not_deprecated(np.dtype, args=(scalar_type,)) + + for scalar_type in [type, dict, list, tuple]: + # Typical python types are coerced to object currently: + self.assert_not_deprecated(np.dtype, args=(scalar_type,)) + + +class BuiltInRoundComplexDType(_DeprecationTestCase): + # 2020-03-31 1.19.0 + deprecated_types = [np.csingle, np.cdouble, np.clongdouble] + not_deprecated_types = [ + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + ] + + def test_deprecated(self): + for scalar_type in self.deprecated_types: + scalar = scalar_type(0) + self.assert_deprecated(round, args=(scalar,)) + self.assert_deprecated(round, args=(scalar, 0)) + self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + def test_not_deprecated(self): + for scalar_type in self.not_deprecated_types: + scalar = scalar_type(0) + self.assert_not_deprecated(round, args=(scalar,)) + self.assert_not_deprecated(round, args=(scalar, 0)) + self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + +class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase): + # 2020-05-27, NumPy 1.20.0 + message = "Out of bound index found. This was previously ignored.*" + + @pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])]) + def test_empty_subspace(self, index): + # Test for both a single and two/multiple advanced indices. These + # This will raise an IndexError in the future. + arr = np.ones((2, 2, 0)) + self.assert_deprecated(arr.__getitem__, args=(index,)) + self.assert_deprecated(arr.__setitem__, args=(index, 0.)) + + # for this array, the subspace is only empty after applying the slice + arr2 = np.ones((2, 2, 1)) + index2 = (slice(0, 0),) + index + self.assert_deprecated(arr2.__getitem__, args=(index2,)) + self.assert_deprecated(arr2.__setitem__, args=(index2, 0.)) + + def test_empty_index_broadcast_not_deprecated(self): + arr = np.ones((2, 2, 2)) + + index = ([[3], [2]], []) # broadcast to an empty result. + self.assert_not_deprecated(arr.__getitem__, args=(index,)) + self.assert_not_deprecated(arr.__setitem__, + args=(index, np.empty((2, 0, 2)))) + + +class TestNonExactMatchDeprecation(_DeprecationTestCase): + # 2020-04-22 + def test_non_exact_match(self): + arr = np.array([[3, 6, 6], [4, 5, 1]]) + # misspelt mode check + self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp')) + # using completely different word with first character as R + self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random')) + + +class TestDeprecatedGlobals(_DeprecationTestCase): + # 2020-06-06 + @pytest.mark.skipif( + sys.version_info < (3, 7), + reason='module-level __getattr__ not supported') + def test_type_aliases(self): + # from builtins + self.assert_deprecated(lambda: np.bool(True)) + self.assert_deprecated(lambda: np.int(1)) + self.assert_deprecated(lambda: np.float(1)) + self.assert_deprecated(lambda: np.complex(1)) + self.assert_deprecated(lambda: np.object()) + self.assert_deprecated(lambda: np.str('abc')) + + # from np.compat + self.assert_deprecated(lambda: np.long(1)) + self.assert_deprecated(lambda: np.unicode('abc')) + + # from np.core.numerictypes + self.assert_deprecated(lambda: np.typeDict) + + +class TestMatrixInOuter(_DeprecationTestCase): + # 2020-05-13 NumPy 1.20.0 + message = (r"add.outer\(\) was passed a numpy matrix as " + r"(first|second) argument.") + + def test_deprecated(self): + arr = np.array([1, 2, 3]) + m = np.array([1, 2, 3]).view(np.matrix) + self.assert_deprecated(np.add.outer, args=(m, m), num=2) + self.assert_deprecated(np.add.outer, args=(arr, m)) + self.assert_deprecated(np.add.outer, args=(m, arr)) + self.assert_not_deprecated(np.add.outer, args=(arr, arr)) + + +class TestRaggedArray(_DeprecationTestCase): + # 2020-07-24, NumPy 1.20.0 + message = "setting an array element with a sequence" + + def test_deprecated(self): + arr = np.ones((1, 1)) + # Deprecated if the array is a leave node: + self.assert_deprecated(lambda: np.array([arr, 0], dtype=np.float64)) + self.assert_deprecated(lambda: np.array([0, arr], dtype=np.float64)) + # And when it is an assignment into a lower dimensional subarray: + self.assert_deprecated(lambda: np.array([arr, [0]], dtype=np.float64)) + self.assert_deprecated(lambda: np.array([[0], arr], dtype=np.float64)) + + +class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): + # NumPy 1.20, 2020-09-03 + message = "concatenate with `axis=None` will use same-kind casting" + + def test_deprecated(self): + self.assert_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64))) + + def test_not_deprecated(self): + self.assert_not_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64), + 'casting': "unsafe"}) + + with assert_raises(TypeError): + # Tests should notice if the deprecation warning is given first... + np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64), + casting="same_kind") + + +class TestDeprecateSubarrayDTypeDuringArrayCoercion(_DeprecationTestCase): + warning_cls = FutureWarning + message = "(creating|casting) an array (with|to) a subarray dtype" + + def test_deprecated_array(self): + # Arrays are more complex, since they "broadcast" on success: + arr = np.array([1, 2]) + + self.assert_deprecated(lambda: arr.astype("(2)i,")) + with pytest.warns(FutureWarning): + res = arr.astype("(2)i,") + + assert_array_equal(res, [[1, 2], [1, 2]]) + + self.assert_deprecated(lambda: np.array(arr, dtype="(2)i,")) + with pytest.warns(FutureWarning): + res = np.array(arr, dtype="(2)i,") + + assert_array_equal(res, [[1, 2], [1, 2]]) + + with pytest.warns(FutureWarning): + res = np.array([[(1,), (2,)], arr], dtype="(2)i,") + + assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 2], [1, 2]]]) + + def test_deprecated_and_error(self): + # These error paths do not give a warning, but will succeed in the + # future. + arr = np.arange(5 * 2).reshape(5, 2) + def check(): + with pytest.raises(ValueError): + arr.astype("(2,2)f") + + self.assert_deprecated(check) + + def check(): + with pytest.raises(ValueError): + np.array(arr, dtype="(2,2)f") + + self.assert_deprecated(check) + + +class TestFutureWarningArrayLikeNotIterable(_DeprecationTestCase): + # Deprecated 2020-12-09, NumPy 1.20 + warning_cls = FutureWarning + message = "The input object of type.*but not a sequence" + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_deprecated(self, protocol): + """Test that these objects give a warning since they are not 0-D, + not coerced at the top level `np.array(obj)`, but nested, and do + *not* define the sequence protocol. + + NOTE: Tests for the versions including __len__ and __getitem__ exist + in `test_array_coercion.py` and they can be modified or ammended + when this deprecation expired. + """ + blueprint = np.arange(10) + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)}) + self.assert_deprecated(lambda: np.array([MyArr()], dtype=object)) + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_0d_not_deprecated(self, protocol): + # 0-D always worked (albeit it would use __float__ or similar for the + # conversion, which may not happen anymore) + blueprint = np.array(1.) + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)}) + myarr = MyArr() + + self.assert_not_deprecated(lambda: np.array([myarr], dtype=object)) + res = np.array([myarr], dtype=object) + expected = np.empty(1, dtype=object) + expected[0] = myarr + assert_array_equal(res, expected) + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_unnested_not_deprecated(self, protocol): + blueprint = np.arange(10) + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)}) + myarr = MyArr() + + self.assert_not_deprecated(lambda: np.array(myarr)) + res = np.array(myarr) + assert_array_equal(res, blueprint) + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_strange_dtype_handling(self, protocol): + """The old code would actually use the dtype from the array, but + then end up not using the array (for dimension discovery) + """ + blueprint = np.arange(10).astype("f4") + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol), + "__float__": lambda _: 0.5}) + myarr = MyArr() + + # Make sure we warn (and capture the FutureWarning) + with pytest.warns(FutureWarning, match=self.message): + res = np.array([[myarr]]) + + assert res.shape == (1, 1) + assert res.dtype == "f4" + assert res[0, 0] == 0.5 + + @pytest.mark.parametrize("protocol", + ["__array__", "__array_interface__", "__array_struct__"]) + def test_assignment_not_deprecated(self, protocol): + # If the result is dtype=object we do not unpack a nested array or + # array-like, if it is nested at exactly the right depth. + # NOTE: We actually do still call __array__, etc. but ignore the result + # in the end. For `dtype=object` we could optimize that away. + blueprint = np.arange(10).astype("f4") + MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol), + "__float__": lambda _: 0.5}) + myarr = MyArr() + + res = np.empty(3, dtype=object) + def set(): + res[:] = [myarr, myarr, myarr] + self.assert_not_deprecated(set) + assert res[0] is myarr + assert res[1] is myarr + assert res[2] is myarr + + +class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase): + # Deprecated 2020-11-24, NumPy 1.20 + """ + Technically, it should be impossible to create numpy object scalars, + but there was an unpickle path that would in theory allow it. That + path is invalid and must lead to the warning. + """ + message = "Unpickling a scalar with object dtype is deprecated." + + def test_deprecated(self): + ctor = np.core.multiarray.scalar + self.assert_deprecated(lambda: ctor(np.dtype("O"), 1)) + +try: + with warnings.catch_warnings(): + warnings.simplefilter("always") + import nose # noqa: F401 +except ImportError: + HAVE_NOSE = False +else: + HAVE_NOSE = True + + +@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose") +class TestNoseDecoratorsDeprecated(_DeprecationTestCase): + class DidntSkipException(Exception): + pass + + def test_slow(self): + def _test_slow(): + @np.testing.dec.slow + def slow_func(x, y, z): + pass + + assert_(slow_func.slow) + self.assert_deprecated(_test_slow) + + def test_setastest(self): + def _test_setastest(): + @np.testing.dec.setastest() + def f_default(a): + pass + + @np.testing.dec.setastest(True) + def f_istest(a): + pass + + @np.testing.dec.setastest(False) + def f_isnottest(a): + pass + + assert_(f_default.__test__) + assert_(f_istest.__test__) + assert_(not f_isnottest.__test__) + self.assert_deprecated(_test_setastest, num=3) + + def test_skip_functions_hardcoded(self): + def _test_skip_functions_hardcoded(): + @np.testing.dec.skipif(True) + def f1(x): + raise self.DidntSkipException + + try: + f1('a') + except self.DidntSkipException: + raise Exception('Failed to skip') + except SkipTest().__class__: + pass + + @np.testing.dec.skipif(False) + def f2(x): + raise self.DidntSkipException + + try: + f2('a') + except self.DidntSkipException: + pass + except SkipTest().__class__: + raise Exception('Skipped when not expected to') + self.assert_deprecated(_test_skip_functions_hardcoded, num=2) + + def test_skip_functions_callable(self): + def _test_skip_functions_callable(): + def skip_tester(): + return skip_flag == 'skip me!' + + @np.testing.dec.skipif(skip_tester) + def f1(x): + raise self.DidntSkipException + + try: + skip_flag = 'skip me!' + f1('a') + except self.DidntSkipException: + raise Exception('Failed to skip') + except SkipTest().__class__: + pass + + @np.testing.dec.skipif(skip_tester) + def f2(x): + raise self.DidntSkipException + + try: + skip_flag = 'five is right out!' + f2('a') + except self.DidntSkipException: + pass + except SkipTest().__class__: + raise Exception('Skipped when not expected to') + self.assert_deprecated(_test_skip_functions_callable, num=2) + + def test_skip_generators_hardcoded(self): + def _test_skip_generators_hardcoded(): + @np.testing.dec.knownfailureif(True, "This test is known to fail") + def g1(x): + yield from range(x) + + try: + for j in g1(10): + pass + except KnownFailureException().__class__: + pass + else: + raise Exception('Failed to mark as known failure') + + @np.testing.dec.knownfailureif(False, "This test is NOT known to fail") + def g2(x): + yield from range(x) + raise self.DidntSkipException('FAIL') + + try: + for j in g2(10): + pass + except KnownFailureException().__class__: + raise Exception('Marked incorrectly as known failure') + except self.DidntSkipException: + pass + self.assert_deprecated(_test_skip_generators_hardcoded, num=2) + + def test_skip_generators_callable(self): + def _test_skip_generators_callable(): + def skip_tester(): + return skip_flag == 'skip me!' + + @np.testing.dec.knownfailureif(skip_tester, "This test is known to fail") + def g1(x): + yield from range(x) + + try: + skip_flag = 'skip me!' + for j in g1(10): + pass + except KnownFailureException().__class__: + pass + else: + raise Exception('Failed to mark as known failure') + + @np.testing.dec.knownfailureif(skip_tester, "This test is NOT known to fail") + def g2(x): + yield from range(x) + raise self.DidntSkipException('FAIL') + + try: + skip_flag = 'do not skip' + for j in g2(10): + pass + except KnownFailureException().__class__: + raise Exception('Marked incorrectly as known failure') + except self.DidntSkipException: + pass + self.assert_deprecated(_test_skip_generators_callable, num=2) + + def test_deprecated(self): + def _test_deprecated(): + @np.testing.dec.deprecated(True) + def non_deprecated_func(): + pass + + @np.testing.dec.deprecated() + def deprecated_func(): + import warnings + warnings.warn("TEST: deprecated func", DeprecationWarning, stacklevel=1) + + @np.testing.dec.deprecated() + def deprecated_func2(): + import warnings + warnings.warn("AHHHH", stacklevel=1) + raise ValueError + + @np.testing.dec.deprecated() + def deprecated_func3(): + import warnings + warnings.warn("AHHHH", stacklevel=1) + + # marked as deprecated, but does not raise DeprecationWarning + assert_raises(AssertionError, non_deprecated_func) + # should be silent + deprecated_func() + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") # do not propagate unrelated warnings + # fails if deprecated decorator just disables test. See #1453. + assert_raises(ValueError, deprecated_func2) + # warning is not a DeprecationWarning + assert_raises(AssertionError, deprecated_func3) + self.assert_deprecated(_test_deprecated, num=4) + + def test_parametrize(self): + def _test_parametrize(): + # dec.parametrize assumes that it is being run by nose. Because + # we are running under pytest, we need to explicitly check the + # results. + @np.testing.dec.parametrize('base, power, expected', + [(1, 1, 1), + (2, 1, 2), + (2, 2, 4)]) + def check_parametrize(base, power, expected): + assert_(base**power == expected) + + count = 0 + for test in check_parametrize(): + test[0](*test[1:]) + count += 1 + assert_(count == 3) + self.assert_deprecated(_test_parametrize) + + +class TestSingleElementSignature(_DeprecationTestCase): + # Deprecated 2021-04-01, NumPy 1.21 + message = r"The use of a length 1" + + def test_deprecated(self): + self.assert_deprecated(lambda: np.add(1, 2, signature="d")) + self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),))) + + +class TestComparisonBadDType(_DeprecationTestCase): + # Deprecated 2021-04-01, NumPy 1.21 + message = r"using `dtype=` in comparisons is only useful for" + + def test_deprecated(self): + self.assert_deprecated(lambda: np.equal(1, 1, dtype=np.int64)) + # Not an error only for the transition + self.assert_deprecated(lambda: np.equal(1, 1, sig=(None, None, "l"))) + + def test_not_deprecated(self): + np.equal(True, False, dtype=bool) + np.equal(3, 5, dtype=bool, casting="unsafe") + np.equal([None], [4], dtype=object) + +class TestComparisonBadObjectDType(_DeprecationTestCase): + # Deprecated 2021-04-01, NumPy 1.21 (different branch of the above one) + message = r"using `dtype=object` \(or equivalent signature\) will" + warning_cls = FutureWarning + + def test_deprecated(self): + self.assert_deprecated(lambda: np.equal(1, 1, dtype=object)) + self.assert_deprecated( + lambda: np.equal(1, 1, sig=(None, None, object))) + + +class TestSpecialAttributeLookupFailure(_DeprecationTestCase): + message = r"An exception was ignored while fetching the attribute" + + class WeirdArrayLike: + @property + def __array__(self): + raise RuntimeError("oops!") + + class WeirdArrayInterface: + @property + def __array_interface__(self): + raise RuntimeError("oops!") + + def test_deprecated(self): + self.assert_deprecated(lambda: np.array(self.WeirdArrayLike())) + self.assert_deprecated(lambda: np.array(self.WeirdArrayInterface())) + + +class TestCtypesGetter(_DeprecationTestCase): + # Deprecated 2021-05-18, Numpy 1.21.0 + warning_cls = DeprecationWarning + ctypes = np.array([1]).ctypes + + @pytest.mark.parametrize( + "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] + ) + def test_deprecated(self, name: str) -> None: + func = getattr(self.ctypes, name) + self.assert_deprecated(lambda: func()) + + @pytest.mark.parametrize( + "name", ["data", "shape", "strides", "_as_parameter_"] + ) + def test_not_deprecated(self, name: str) -> None: + self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) + + +class TestUFuncForcedDTypeWarning(_DeprecationTestCase): + message = "The `dtype` and `signature` arguments to ufuncs only select the" + + def test_not_deprecated(self): + import pickle + # does not warn (test relies on bad pickling behaviour, simply remove + # it if the `assert int64 is not int64_2` should start failing. + int64 = np.dtype("int64") + int64_2 = pickle.loads(pickle.dumps(int64)) + assert int64 is not int64_2 + self.assert_not_deprecated(lambda: np.add(3, 4, dtype=int64_2)) + + def test_deprecation(self): + int64 = np.dtype("int64") + self.assert_deprecated(lambda: np.add(3, 5, dtype=int64.newbyteorder())) + self.assert_deprecated(lambda: np.add(3, 5, dtype="m8[ns]")) + + def test_behaviour(self): + int64 = np.dtype("int64") + arr = np.arange(10, dtype="m8[s]") + + with pytest.warns(DeprecationWarning, match=self.message): + np.add(3, 5, dtype=int64.newbyteorder()) + with pytest.warns(DeprecationWarning, match=self.message): + np.add(3, 5, dtype="m8[ns]") # previously used the "ns" + with pytest.warns(DeprecationWarning, match=self.message): + np.add(arr, arr, dtype="m8[ns]") # never preserved the "ns" + with pytest.warns(DeprecationWarning, match=self.message): + np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns" + with pytest.warns(DeprecationWarning, match=self.message): + np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns" diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_dtype.py b/MLPY/Lib/site-packages/numpy/core/tests/test_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..cee406d3bb0a5f0abea8b69922384924799aadfd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_dtype.py @@ -0,0 +1,1541 @@ +import sys +import operator +import pytest +import ctypes +import gc +import warnings + +import numpy as np +from numpy.core._rational_tests import rational +from numpy.core._multiarray_tests import create_custom_field_dtype +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT) +from numpy.compat import pickle +from itertools import permutations + +def assert_dtype_equal(a, b): + assert_equal(a, b) + assert_equal(hash(a), hash(b), + "two equivalent types do not hash to the same value !") + +def assert_dtype_not_equal(a, b): + assert_(a != b) + assert_(hash(a) != hash(b), + "two different types hash to the same value !") + +class TestBuiltin: + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object, + np.compat.unicode]) + def test_run(self, t): + """Only test hash runs at all.""" + dt = np.dtype(t) + hash(dt) + + @pytest.mark.parametrize('t', [int, float]) + def test_dtype(self, t): + # Make sure equivalent byte order char hash the same (e.g. < and = on + # little endian) + dt = np.dtype(t) + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + if dt == dt2: + assert_(dt.byteorder != dt2.byteorder, "bogus test") + assert_dtype_equal(dt, dt2) + else: + assert_(dt.byteorder != dt3.byteorder, "bogus test") + assert_dtype_equal(dt, dt3) + + def test_equivalent_dtype_hashing(self): + # Make sure equivalent dtypes with different type num hash equal + uintp = np.dtype(np.uintp) + if uintp.itemsize == 4: + left = uintp + right = np.dtype(np.uint32) + else: + left = uintp + right = np.dtype(np.ulonglong) + assert_(left == right) + assert_(hash(left) == hash(right)) + + def test_invalid_types(self): + # Make sure invalid type strings raise an error + + assert_raises(TypeError, np.dtype, 'O3') + assert_raises(TypeError, np.dtype, 'O5') + assert_raises(TypeError, np.dtype, 'O7') + assert_raises(TypeError, np.dtype, 'b3') + assert_raises(TypeError, np.dtype, 'h4') + assert_raises(TypeError, np.dtype, 'I5') + assert_raises(TypeError, np.dtype, 'e3') + assert_raises(TypeError, np.dtype, 'f5') + + if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: + assert_raises(TypeError, np.dtype, 'g12') + elif np.dtype('g').itemsize == 12: + assert_raises(TypeError, np.dtype, 'g16') + + if np.dtype('l').itemsize == 8: + assert_raises(TypeError, np.dtype, 'l4') + assert_raises(TypeError, np.dtype, 'L4') + else: + assert_raises(TypeError, np.dtype, 'l8') + assert_raises(TypeError, np.dtype, 'L8') + + if np.dtype('q').itemsize == 8: + assert_raises(TypeError, np.dtype, 'q4') + assert_raises(TypeError, np.dtype, 'Q4') + else: + assert_raises(TypeError, np.dtype, 'q8') + assert_raises(TypeError, np.dtype, 'Q8') + + def test_richcompare_invalid_dtype_equality(self): + # Make sure objects that cannot be converted to valid + # dtypes results in False/True when compared to valid dtypes. + # Here 7 cannot be converted to dtype. No exceptions should be raised + + assert not np.dtype(np.int32) == 7, "dtype richcompare failed for ==" + assert np.dtype(np.int32) != 7, "dtype richcompare failed for !=" + + @pytest.mark.parametrize( + 'operation', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_richcompare_invalid_dtype_comparison(self, operation): + # Make sure TypeError is raised for comparison operators + # for invalid dtypes. Here 7 is an invalid dtype. + + with pytest.raises(TypeError): + operation(np.dtype(np.int32), 7) + + @pytest.mark.parametrize("dtype", + ['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64', + 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64', + 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0', + "Float128", "Complex128"]) + def test_numeric_style_types_are_invalid(self, dtype): + with assert_raises(TypeError): + np.dtype(dtype) + + @pytest.mark.parametrize( + 'value', + ['m8', 'M8', 'datetime64', 'timedelta64', + 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10', + '>f', 'f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(str(dt), + "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])]") + + # If the sticky aligned flag is set to True, it makes the + # str() function use a dict representation with an 'aligned' flag + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], + (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])], + align=True) + assert_equal(str(dt), + "{'names':['top','bottom'], " + "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,))," + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]], " + "'offsets':[0,76800], " + "'itemsize':80000, " + "'aligned':True}") + assert_equal(np.dtype(eval(str(dt))), dt) + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) + assert_equal(str(dt), + "[(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')]") + + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(repr(dt), + "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])])") + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, + align=True) + assert_equal(repr(dt), + "dtype([(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')], align=True)") + + def test_repr_structured_not_packed(self): + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['f4', (2, 1)), ('b', 'u4')]) + self.check(BigEndStruct, expected) + + def test_little_endian_structure_packed(self): + class LittleEndStruct(ctypes.LittleEndianStructure): + _fields_ = [ + ('one', ctypes.c_uint8), + ('two', ctypes.c_uint32) + ] + _pack_ = 1 + expected = np.dtype([('one', 'u1'), ('two', 'B'), + ('b', '>H') + ], align=True) + self.check(PaddedStruct, expected) + + def test_simple_endian_types(self): + self.check(ctypes.c_uint16.__ctype_le__, np.dtype('u2')) + self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1')) + self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1')) + + all_types = set(np.typecodes['All']) + all_pairs = permutations(all_types, 2) + + @pytest.mark.parametrize("pair", all_pairs) + def test_pairs(self, pair): + """ + Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')] + Example: np.dtype('d,I') -> dtype([('f0', '..j", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt) + + # invalid subscript character + assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt) + assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt) + + # output subscripts must appear in input + assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt) + + # output subscripts may only be specified once + assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]], + optimize=do_opt) + + # dimensions much match when being collapsed + assert_raises(ValueError, np.einsum, "ii", + np.arange(6).reshape(2, 3), optimize=do_opt) + assert_raises(ValueError, np.einsum, "ii->i", + np.arange(6).reshape(2, 3), optimize=do_opt) + + # broadcasting to new dimensions must be enabled explicitly + assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3), + optimize=do_opt) + assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], + out=np.arange(4).reshape(2, 2), optimize=do_opt) + with assert_raises_regex(ValueError, "'b'"): + # gh-11221 - 'c' erroneously appeared in the error message + a = np.ones((3, 3, 4, 5, 6)) + b = np.ones((3, 4, 5)) + np.einsum('aabcb,abc', a, b) + + # Check order kwarg, asanyarray allows 1d to pass through + assert_raises(ValueError, np.einsum, "i->i", np.arange(6).reshape(-1, 1), + optimize=do_opt, order='d') + + def test_einsum_views(self): + # pass-through + for do_opt in [True, False]: + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("...", a, optimize=do_opt) + assert_(b.base is a) + + b = np.einsum(a, [Ellipsis], optimize=do_opt) + assert_(b.base is a) + + b = np.einsum("ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + b = np.einsum(a, [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + # output is writeable whenever input is writeable + b = np.einsum("...", a, optimize=do_opt) + assert_(b.flags['WRITEABLE']) + a.flags['WRITEABLE'] = False + b = np.einsum("...", a, optimize=do_opt) + assert_(not b.flags['WRITEABLE']) + + # transpose + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("ji", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + b = np.einsum(a, [1, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + # diagonal + a = np.arange(9) + a.shape = (3, 3) + + b = np.einsum("ii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + # diagonal with various ways of broadcasting an additional dimension + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("...ii->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum("ii...->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum("...ii->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("jii->ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("ii...->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + # triple diagonal + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("iii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + # swap axes + a = np.arange(24) + a.shape = (2, 3, 4) + + b = np.einsum("ijk->jik", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + def check_einsum_sums(self, dtype, do_opt=False): + # Check various sums. Does many sizes to exercise unrolled loops. + + # sum(a, axis=-1) + for n in range(1, 17): + a = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i->", a, optimize=do_opt), + np.sum(a, axis=-1).astype(dtype)) + assert_equal(np.einsum(a, [0], [], optimize=do_opt), + np.sum(a, axis=-1).astype(dtype)) + + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("...i->...", a, optimize=do_opt), + np.sum(a, axis=-1).astype(dtype)) + assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), + np.sum(a, axis=-1).astype(dtype)) + + # sum(a, axis=0) + for n in range(1, 17): + a = np.arange(2*n, dtype=dtype).reshape(2, n) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), + np.sum(a, axis=0).astype(dtype)) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), + np.sum(a, axis=0).astype(dtype)) + + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), + np.sum(a, axis=0).astype(dtype)) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), + np.sum(a, axis=0).astype(dtype)) + + # trace(a) + for n in range(1, 17): + a = np.arange(n*n, dtype=dtype).reshape(n, n) + assert_equal(np.einsum("ii", a, optimize=do_opt), + np.trace(a).astype(dtype)) + assert_equal(np.einsum(a, [0, 0], optimize=do_opt), + np.trace(a).astype(dtype)) + + # gh-15961: should accept numpy int64 type in subscript list + np_array = np.asarray([0, 0]) + assert_equal(np.einsum(a, np_array, optimize=do_opt), + np.trace(a).astype(dtype)) + assert_equal(np.einsum(a, list(np_array), optimize=do_opt), + np.trace(a).astype(dtype)) + + # multiply(a, b) + assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case + for n in range(1, 17): + a = np.arange(3 * n, dtype=dtype).reshape(3, n) + b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("..., ...", a, b, optimize=do_opt), + np.multiply(a, b)) + assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), + np.multiply(a, b)) + + # inner(a,b) + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) + assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), + np.inner(a, b)) + + for n in range(1, 11): + a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt), + np.inner(a.T, b.T).T) + assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), + np.inner(a.T, b.T).T) + + # outer(a,b) + for n in range(1, 17): + a = np.arange(3, dtype=dtype)+1 + b = np.arange(n, dtype=dtype)+1 + assert_equal(np.einsum("i,j", a, b, optimize=do_opt), + np.outer(a, b)) + assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), + np.outer(a, b)) + + # Suppress the complex warnings for the 'as f8' tests + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning) + + # matvec(a,b) / a.dot(b) where a is matrix, b is vector + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt), + np.dot(a, b)) + + c = np.arange(4, dtype=dtype) + np.einsum("ij,j", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), + np.dot(b.T, a.T)) + assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), + np.dot(b.T, a.T)) + + c = np.arange(4, dtype=dtype) + np.einsum("ji,j", a.T, b.T, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a.T, [1, 0], b.T, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + + # matmat(a,b) / a.dot(b) where a is matrix, b is matrix + for n in range(1, 17): + if n < 8 or dtype != 'f2': + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) + assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), + np.dot(a, b)) + + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) + c = np.arange(24, dtype=dtype).reshape(4, 6) + np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', + optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + # matrix triple product (note this is not currently an efficient + # way to multiply 3 matrices) + a = np.arange(12, dtype=dtype).reshape(3, 4) + b = np.arange(20, dtype=dtype).reshape(4, 5) + c = np.arange(30, dtype=dtype).reshape(5, 6) + if dtype != 'f2': + assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), + a.dot(b).dot(c)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], + optimize=do_opt), a.dot(b).dot(c)) + + d = np.arange(18, dtype=dtype).reshape(3, 6) + np.einsum("ij,jk,kl", a, b, c, out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + d[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + # tensordot(a, b) + if np.dtype(dtype) != np.dtype('f2'): + a = np.arange(60, dtype=dtype).reshape(3, 4, 5) + b = np.arange(24, dtype=dtype).reshape(4, 3, 2) + assert_equal(np.einsum("ijk, jil -> kl", a, b), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + + c = np.arange(10, dtype=dtype).reshape(5, 2) + np.einsum("ijk,jil->kl", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + + # logical_and(logical_and(a!=0, b!=0), c!=0) + a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype) + b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype) + c = np.array([True, True, False, True, True, False, True, True]) + assert_equal(np.einsum("i,i,i->i", a, b, c, + dtype='?', casting='unsafe', optimize=do_opt), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], + dtype='?', casting='unsafe'), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + + a = np.arange(9, dtype=dtype) + assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a)) + assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a)) + assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a)) + assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) + + # Various stride0, contiguous, and SSE aligned variants + for n in range(1, 25): + a = np.arange(n, dtype=dtype) + if np.dtype(dtype).itemsize > 1: + assert_equal(np.einsum("...,...", a, a, optimize=do_opt), + np.multiply(a, a)) + assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) + assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a) + assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a) + assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a)) + assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a)) + + assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), + np.multiply(a[1:], a[:-1])) + assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), + np.dot(a[1:], a[:-1])) + assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:]) + assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:]) + assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), + 2*np.sum(a[1:])) + assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), + 2*np.sum(a[1:])) + + # An object array, summed as the data type + a = np.arange(9, dtype=object) + + b = np.einsum("i->", a, dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + assert_equal(b.dtype, np.dtype(dtype)) + + b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + assert_equal(b.dtype, np.dtype(dtype)) + + # A case which was failing (ticket #1885) + p = np.arange(2) + 1 + q = np.arange(4).reshape(2, 2) + 3 + r = np.arange(4).reshape(2, 2) + 7 + assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) + + # singleton dimensions broadcast (gh-10343) + p = np.ones((10,2)) + q = np.ones((1,2)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + np.einsum('ij,ij->j', p, q, optimize=False)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + [10.] * 2) + + # a blas-compatible contraction broadcasting case which was failing + # for optimize=True (ticket #10930) + x = np.array([2., 3.]) + y = np.array([4.]) + assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) + assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) + + # all-ones array was bypassing bug (ticket #10930) + p = np.ones((1, 5)) / 2 + q = np.ones((5, 5)) / 2 + for optimize in (True, False): + assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, + optimize=optimize), + np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize)) + assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize), + np.full((1, 5), 1.25)) + + # Cases which were failing (gh-10899) + x = np.eye(2, dtype=dtype) + y = np.ones(2, dtype=dtype) + assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize), + [2.]) # contig_contig_outstride0_two + assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize), + [2.]) # stride0_contig_outstride0_two + assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize), + [2.]) # contig_stride0_outstride0_two + + def test_einsum_sums_int8(self): + self.check_einsum_sums('i1') + + def test_einsum_sums_uint8(self): + self.check_einsum_sums('u1') + + def test_einsum_sums_int16(self): + self.check_einsum_sums('i2') + + def test_einsum_sums_uint16(self): + self.check_einsum_sums('u2') + + def test_einsum_sums_int32(self): + self.check_einsum_sums('i4') + self.check_einsum_sums('i4', True) + + def test_einsum_sums_uint32(self): + self.check_einsum_sums('u4') + self.check_einsum_sums('u4', True) + + def test_einsum_sums_int64(self): + self.check_einsum_sums('i8') + + def test_einsum_sums_uint64(self): + self.check_einsum_sums('u8') + + def test_einsum_sums_float16(self): + self.check_einsum_sums('f2') + + def test_einsum_sums_float32(self): + self.check_einsum_sums('f4') + + def test_einsum_sums_float64(self): + self.check_einsum_sums('f8') + self.check_einsum_sums('f8', True) + + def test_einsum_sums_longdouble(self): + self.check_einsum_sums(np.longdouble) + + def test_einsum_sums_cfloat64(self): + self.check_einsum_sums('c8') + self.check_einsum_sums('c8', True) + + def test_einsum_sums_cfloat128(self): + self.check_einsum_sums('c16') + + def test_einsum_sums_clongdouble(self): + self.check_einsum_sums(np.clongdouble) + + def test_einsum_misc(self): + # This call used to crash because of a bug in + # PyArray_AssignZero + a = np.ones((1, 2)) + b = np.ones((2, 2, 1)) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) + + # Regression test for issue #10369 (test unicode inputs with Python 2) + assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], + optimize=u'greedy'), 20) + + # The iterator had an issue with buffering this reduction + a = np.ones((5, 12, 4, 2, 3), np.int64) + b = np.ones((5, 12, 11), np.int64) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), + np.einsum('ijklm,ijn->', a, b)) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True), + np.einsum('ijklm,ijn->', a, b, optimize=True)) + + # Issue #2027, was a problem in the contiguous 3-argument + # inner loop implementation + a = np.arange(1, 3) + b = np.arange(1, 5).reshape(2, 2) + c = np.arange(1, 9).reshape(4, 2) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + + # Ensure explicitly setting out=None does not cause an error + # see issue gh-15776 and issue gh-15256 + assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]]) + + def test_subscript_range(self): + # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used + # when creating a subscript from arrays + a = np.ones((2, 3)) + b = np.ones((3, 4)) + np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) + np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) + np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) + assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) + assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) + + def test_einsum_broadcast(self): + # Issue #2455 change in handling ellipsis + # remove the 'middle broadcast' error + # only use the 'RIGHT' iteration in prepare_op_axes + # adds auto broadcast on left where it belongs + # broadcast on right has to be explicit + # We need to test the optimized parsing as well + + A = np.arange(2 * 3 * 4).reshape(2, 3, 4) + B = np.arange(3) + ref = np.einsum('ijk,j->ijk', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error + + A = np.arange(12).reshape((4, 3)) + B = np.arange(6).reshape((3, 2)) + ref = np.einsum('ik,kj->ij', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error + + dims = [2, 3, 4, 5] + a = np.arange(np.prod(dims)).reshape(dims) + v = np.arange(dims[2]) + ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) + + J, K, M = 160, 160, 120 + A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) + B = np.arange(J * K * M * 3).reshape(J, K, M, 3) + ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('...lmn,lmno->...o', A, B, + optimize=opt), ref) # used to raise error + + def test_einsum_fixedstridebug(self): + # Issue #4485 obscure einsum bug + # This case revealed a bug in nditer where it reported a stride + # as 'fixed' (0) when it was in fact not fixed during processing + # (0 or 4). The reason for the bug was that the check for a fixed + # stride was using the information from the 2D inner loop reuse + # to restrict the iteration dimensions it had to validate to be + # the same, but that 2D inner loop reuse logic is only triggered + # during the buffer copying step, and hence it was invalid to + # rely on those values. The fix is to check all the dimensions + # of the stride in question, which in the test case reveals that + # the stride is not fixed. + # + # NOTE: This test is triggered by the fact that the default buffersize, + # used by einsum, is 8192, and 3*2731 = 8193, is larger than that + # and results in a mismatch between the buffering and the + # striding for operand A. + A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) + B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) + es = np.einsum('cl, cpx->lpx', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + # The following is the original test case from the bug report, + # made repeatable by changing random arrays to aranges. + A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) + B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) + es = np.einsum('cl, cpxy->lpxy', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + + def test_einsum_fixed_collapsingbug(self): + # Issue #5147. + # The bug only occurred when output argument of einssum was used. + x = np.random.normal(0, 1, (5, 5, 5, 5)) + y1 = np.zeros((5, 5)) + np.einsum('aabb->ab', x, out=y1) + idx = np.arange(5) + y2 = x[idx[:, None], idx[:, None], idx, idx] + assert_equal(y1, y2) + + def test_einsum_failed_on_p9_and_s390x(self): + # Issues gh-14692 and gh-12689 + # Bug with signed vs unsigned char errored on power9 and s390x Linux + tensor = np.random.random_sample((10, 10, 10, 10)) + x = np.einsum('ijij->', tensor) + y = tensor.trace(axis1=0, axis2=2).trace() + assert_allclose(x, y) + + def test_einsum_all_contig_non_contig_output(self): + # Issue gh-5907, tests that the all contiguous special case + # actually checks the contiguity of the output + x = np.ones((5, 5)) + out = np.ones(10)[::2] + correct_base = np.ones(10) + correct_base[::2] = 5 + # Always worked (inner iteration is done with 0-stride): + np.einsum('mi,mi,mi->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 1: + out = np.ones(10)[::2] + np.einsum('im,im,im->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 2, buffering causes x to be contiguous but + # special cases do not catch the operation before: + out = np.ones((2, 2, 2))[..., 0] + correct_base = np.ones((2, 2, 2)) + correct_base[..., 0] = 2 + x = np.ones((2, 2), np.float32) + np.einsum('ij,jk->ik', x, x, out=out) + assert_array_equal(out.base, correct_base) + + def test_small_boolean_arrays(self): + # See gh-5946. + # Use array of True embedded in False. + a = np.zeros((16, 1, 1), dtype=np.bool_)[:2] + a[...] = True + out = np.zeros((16, 1, 1), dtype=np.bool_)[:2] + tgt = np.ones((2, 1, 1), dtype=np.bool_) + res = np.einsum('...ij,...jk->...ik', a, a, out=out) + assert_equal(res, tgt) + + def test_out_is_res(self): + a = np.arange(9).reshape(3, 3) + res = np.einsum('...ij,...jk->...ik', a, a, out=a) + assert res is a + + def optimize_compare(self, subscripts, operands=None): + # Tests all paths of the optimization function against + # conventional einsum + if operands is None: + args = [subscripts] + terms = subscripts.split('->')[0].split(',') + for term in terms: + dims = [global_size_dict[x] for x in term] + args.append(np.random.rand(*dims)) + else: + args = [subscripts] + operands + + noopt = np.einsum(*args, optimize=False) + opt = np.einsum(*args, optimize='greedy') + assert_almost_equal(opt, noopt) + opt = np.einsum(*args, optimize='optimal') + assert_almost_equal(opt, noopt) + + def test_hadamard_like_products(self): + # Hadamard outer products + self.optimize_compare('a,ab,abc->abc') + self.optimize_compare('a,b,ab->ab') + + def test_index_transformations(self): + # Simple index transformation cases + self.optimize_compare('ea,fb,gc,hd,abcd->efgh') + self.optimize_compare('ea,fb,abcd,gc,hd->efgh') + self.optimize_compare('abcd,ea,fb,gc,hd->efgh') + + def test_complex(self): + # Long test cases + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac') + self.optimize_compare('abhe,hidj,jgba,hiab,gab') + self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac') + self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad') + self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb') + self.optimize_compare('bdhe,acad,hiab,agac,hibd') + + def test_collapse(self): + # Inner products + self.optimize_compare('ab,ab,c->') + self.optimize_compare('ab,ab,c->c') + self.optimize_compare('ab,ab,cd,cd->') + self.optimize_compare('ab,ab,cd,cd->ac') + self.optimize_compare('ab,ab,cd,cd->cd') + self.optimize_compare('ab,ab,cd,cd,ef,ef->') + + def test_expand(self): + # Outer products + self.optimize_compare('ab,cd,ef->abcdef') + self.optimize_compare('ab,cd,ef->acdf') + self.optimize_compare('ab,cd,de->abcde') + self.optimize_compare('ab,cd,de->be') + self.optimize_compare('ab,bcd,cd->abcd') + self.optimize_compare('ab,bcd,cd->abd') + + def test_edge_cases(self): + # Difficult edge cases for optimization + self.optimize_compare('eb,cb,fb->cef') + self.optimize_compare('dd,fb,be,cdb->cef') + self.optimize_compare('bca,cdb,dbf,afc->') + self.optimize_compare('dcc,fce,ea,dbf->ab') + self.optimize_compare('fdf,cdd,ccd,afe->ae') + self.optimize_compare('abcd,ad') + self.optimize_compare('ed,fcd,ff,bcf->be') + self.optimize_compare('baa,dcf,af,cde->be') + self.optimize_compare('bd,db,eac->ace') + self.optimize_compare('fff,fae,bef,def->abd') + self.optimize_compare('efc,dbc,acf,fd->abe') + self.optimize_compare('ba,ac,da->bcd') + + def test_inner_product(self): + # Inner products + self.optimize_compare('ab,ab') + self.optimize_compare('ab,ba') + self.optimize_compare('abc,abc') + self.optimize_compare('abc,bac') + self.optimize_compare('abc,cba') + + def test_random_cases(self): + # Randomly built test cases + self.optimize_compare('aab,fa,df,ecc->bde') + self.optimize_compare('ecb,fef,bad,ed->ac') + self.optimize_compare('bcf,bbb,fbf,fc->') + self.optimize_compare('bb,ff,be->e') + self.optimize_compare('bcb,bb,fc,fff->') + self.optimize_compare('fbb,dfd,fc,fc->') + self.optimize_compare('afd,ba,cc,dc->bf') + self.optimize_compare('adb,bc,fa,cfc->d') + self.optimize_compare('bbd,bda,fc,db->acf') + self.optimize_compare('dba,ead,cad->bce') + self.optimize_compare('aef,fbc,dca->bde') + + def test_combined_views_mapping(self): + # gh-10792 + a = np.arange(9).reshape(1, 1, 3, 1, 3) + b = np.einsum('bbcdc->d', a) + assert_equal(b, [12]) + + def test_broadcasting_dot_cases(self): + # Ensures broadcasting cases are not mistaken for GEMM + + a = np.random.rand(1, 5, 4) + b = np.random.rand(4, 6) + c = np.random.rand(5, 6) + d = np.random.rand(10) + + self.optimize_compare('ijk,kl,jl', operands=[a, b, c]) + self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d]) + + e = np.random.rand(1, 1, 5, 4) + f = np.random.rand(7, 7) + self.optimize_compare('abjk,kl,jl', operands=[e, b, c]) + self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f]) + + # Edge case found in gh-11308 + g = np.arange(64).reshape(2, 4, 8) + self.optimize_compare('obk,ijk->ioj', operands=[g, g]) + + def test_output_order(self): + # Ensure output order is respected for optimize cases, the below + # conraction should yield a reshaped tensor view + # gh-16415 + + a = np.ones((2, 3, 5), order='F') + b = np.ones((4, 3), order='F') + + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt) + assert_(tmp.flags.c_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + c = np.ones((4, 3), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + + d = np.ones((2, 3, 5), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + +class TestEinsumPath: + def build_operands(self, string, size_dict=global_size_dict): + + # Builds views based off initial operands + operands = [string] + terms = string.split('->')[0].split(',') + for term in terms: + dims = [size_dict[x] for x in term] + operands.append(np.random.rand(*dims)) + + return operands + + def assert_path_equal(self, comp, benchmark): + # Checks if list of tuples are equivalent + ret = (len(comp) == len(benchmark)) + assert_(ret) + for pos in range(len(comp) - 1): + ret &= isinstance(comp[pos + 1], tuple) + ret &= (comp[pos + 1] == benchmark[pos + 1]) + assert_(ret) + + def test_memory_contraints(self): + # Ensure memory constraints are satisfied + + outer_test = self.build_operands('a,b,c->abc') + + path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + long_test = self.build_operands('acdf,jbje,gihb,hfac') + path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + def test_long_paths(self): + # Long complex cases + + # Long test 1 + long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + path, path_str = np.einsum_path(*long_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + # Long test 2 + long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb') + path, path_str = np.einsum_path(*long_test2, optimize='greedy') + print(path) + self.assert_path_equal(path, ['einsum_path', + (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test2, optimize='optimal') + print(path) + self.assert_path_equal(path, ['einsum_path', + (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]) + + def test_edge_paths(self): + # Difficult edge cases + + # Edge test1 + edge_test1 = self.build_operands('eb,cb,fb->cef') + path, path_str = np.einsum_path(*edge_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + # Edge test2 + edge_test2 = self.build_operands('dd,fb,be,cdb->cef') + path, path_str = np.einsum_path(*edge_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + # Edge test3 + edge_test3 = self.build_operands('bca,cdb,dbf,afc->') + path, path_str = np.einsum_path(*edge_test3, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test3, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test4 + edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab') + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test5 + edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', + size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + def test_path_type_input(self): + # Test explicit path handeling + path_test = self.build_operands('dcc,fce,ea,dbf->ab') + + path, path_str = np.einsum_path(*path_test, optimize=False) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*path_test, optimize=True) + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)] + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_spaces(self): + #gh-10794 + arr = np.array([[1]]) + for sp in itertools.product(['', ' '], repeat=4): + # no error for any spacing + np.einsum('{}...a{}->{}...a{}'.format(*sp), arr) + +def test_overlap(): + a = np.arange(9, dtype=int).reshape(3, 3) + b = np.arange(9, dtype=int).reshape(3, 3) + d = np.dot(a, b) + # sanity check + c = np.einsum('ij,jk->ik', a, b) + assert_equal(c, d) + #gh-10080, out overlaps one of the operands + c = np.einsum('ij,jk->ik', a, b, out=b) + assert_equal(c, d) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_errstate.py b/MLPY/Lib/site-packages/numpy/core/tests/test_errstate.py new file mode 100644 index 0000000000000000000000000000000000000000..646921ebf739f4eb7c68570b16383c0447a02c58 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_errstate.py @@ -0,0 +1,59 @@ +import pytest +import sysconfig + +import numpy as np +from numpy.testing import assert_, assert_raises + +# The floating point emulation on ARM EABI systems lacking a hardware FPU is +# known to be buggy. This is an attempt to identify these hosts. It may not +# catch all possible cases, but it catches the known cases of gh-413 and +# gh-15562. +hosttype = sysconfig.get_config_var('HOST_GNU_TYPE') +arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi') + +class TestErrstate: + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-413,-15562)') + def test_invalid(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(invalid='ignore'): + np.sqrt(a) + # While this should fail! + with assert_raises(FloatingPointError): + np.sqrt(a) + + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-15562)') + def test_divide(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(divide='ignore'): + a // 0 + # While this should fail! + with assert_raises(FloatingPointError): + a // 0 + # As should this, see gh-15562 + with assert_raises(FloatingPointError): + a // a + + def test_errcall(self): + def foo(*args): + print(args) + + olderrcall = np.geterrcall() + with np.errstate(call=foo): + assert_(np.geterrcall() is foo, 'call is not foo') + with np.errstate(call=None): + assert_(np.geterrcall() is None, 'call is not None') + assert_(np.geterrcall() is olderrcall, 'call is not olderrcall') + + def test_errstate_decorator(self): + @np.errstate(all='ignore') + def foo(): + a = -np.arange(3) + a // 0 + + foo() diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_extint128.py b/MLPY/Lib/site-packages/numpy/core/tests/test_extint128.py new file mode 100644 index 0000000000000000000000000000000000000000..68c79df5cd68c32bbdc8de052101304375f2d22b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_extint128.py @@ -0,0 +1,219 @@ +import itertools +import contextlib +import operator +import pytest + +import numpy as np +import numpy.core._multiarray_tests as mt + +from numpy.testing import assert_raises, assert_equal + + +INT64_MAX = np.iinfo(np.int64).max +INT64_MIN = np.iinfo(np.int64).min +INT64_MID = 2**32 + +# int128 is not two's complement, the sign bit is separate +INT128_MAX = 2**128 - 1 +INT128_MIN = -INT128_MAX +INT128_MID = 2**64 + +INT64_VALUES = ( + [INT64_MIN + j for j in range(20)] + + [INT64_MAX - j for j in range(20)] + + [INT64_MID + j for j in range(-20, 20)] + + [2*INT64_MID + j for j in range(-20, 20)] + + [INT64_MID//2 + j for j in range(-20, 20)] + + list(range(-70, 70)) +) + +INT128_VALUES = ( + [INT128_MIN + j for j in range(20)] + + [INT128_MAX - j for j in range(20)] + + [INT128_MID + j for j in range(-20, 20)] + + [2*INT128_MID + j for j in range(-20, 20)] + + [INT128_MID//2 + j for j in range(-20, 20)] + + list(range(-70, 70)) + + [False] # negative zero +) + +INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0] + + +@contextlib.contextmanager +def exc_iter(*args): + """ + Iterate over Cartesian product of *args, and if an exception is raised, + add information of the current iterate. + """ + + value = [None] + + def iterate(): + for v in itertools.product(*args): + value[0] = v + yield v + + try: + yield iterate() + except Exception: + import traceback + msg = "At: %r\n%s" % (repr(value[0]), + traceback.format_exc()) + raise AssertionError(msg) + + +def test_safe_binop(): + # Test checked arithmetic routines + + ops = [ + (operator.add, 1), + (operator.sub, 2), + (operator.mul, 3) + ] + + with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it: + for xop, a, b in it: + pyop, op = xop + c = pyop(a, b) + + if not (INT64_MIN <= c <= INT64_MAX): + assert_raises(OverflowError, mt.extint_safe_binop, a, b, op) + else: + d = mt.extint_safe_binop(a, b, op) + if c != d: + # assert_equal is slow + assert_equal(d, c) + + +def test_to_128(): + with exc_iter(INT64_VALUES) as it: + for a, in it: + b = mt.extint_to_128(a) + if a != b: + assert_equal(b, a) + + +def test_to_64(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if not (INT64_MIN <= a <= INT64_MAX): + assert_raises(OverflowError, mt.extint_to_64, a) + else: + b = mt.extint_to_64(a) + if a != b: + assert_equal(b, a) + + +def test_mul_64_64(): + with exc_iter(INT64_VALUES, INT64_VALUES) as it: + for a, b in it: + c = a * b + d = mt.extint_mul_64_64(a, b) + if c != d: + assert_equal(d, c) + + +def test_add_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a + b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_add_128, a, b) + else: + d = mt.extint_add_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_sub_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a - b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_sub_128, a, b) + else: + d = mt.extint_sub_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_neg_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + b = -a + c = mt.extint_neg_128(a) + if b != c: + assert_equal(c, b) + + +def test_shl_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -(((-a) << 1) & (2**128-1)) + else: + b = (a << 1) & (2**128-1) + c = mt.extint_shl_128(a) + if b != c: + assert_equal(c, b) + + +def test_shr_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -((-a) >> 1) + else: + b = a >> 1 + c = mt.extint_shr_128(a) + if b != c: + assert_equal(c, b) + + +def test_gt_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a > b + d = mt.extint_gt_128(a, b) + if c != d: + assert_equal(d, c) + + +@pytest.mark.slow +def test_divmod_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + if a >= 0: + c, cr = divmod(a, b) + else: + c, cr = divmod(-a, b) + c = -c + cr = -cr + + d, dr = mt.extint_divmod_128_64(a, b) + + if c != d or d != dr or b*d + dr != a: + assert_equal(d, c) + assert_equal(dr, cr) + assert_equal(b*d + dr, a) + + +def test_floordiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = a // b + d = mt.extint_floordiv_128_64(a, b) + + if c != d: + assert_equal(d, c) + + +def test_ceildiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = (a + b - 1) // b + d = mt.extint_ceildiv_128_64(a, b) + + if c != d: + assert_equal(d, c) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_function_base.py b/MLPY/Lib/site-packages/numpy/core/tests/test_function_base.py new file mode 100644 index 0000000000000000000000000000000000000000..7cff65bf80f247d4e80686cae3d0edf0f8e1b50f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_function_base.py @@ -0,0 +1,409 @@ +from numpy import ( + logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan, + ndarray, sqrt, nextafter, stack, errstate + ) +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, + ) + + +class PhysicalQuantity(float): + def __new__(cls, value): + return float.__new__(cls, value) + + def __add__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) + float(self)) + __radd__ = __add__ + + def __sub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(self) - float(x)) + + def __rsub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) - float(self)) + + def __mul__(self, x): + return PhysicalQuantity(float(x) * float(self)) + __rmul__ = __mul__ + + def __div__(self, x): + return PhysicalQuantity(float(self) / float(x)) + + def __rdiv__(self, x): + return PhysicalQuantity(float(x) / float(self)) + + +class PhysicalQuantity2(ndarray): + __array_priority__ = 10 + + +class TestLogspace: + + def test_basic(self): + y = logspace(0, 6) + assert_(len(y) == 50) + y = logspace(0, 6, num=100) + assert_(y[-1] == 10 ** 6) + y = logspace(0, 6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = logspace(0, 6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + def test_start_stop_array(self): + start = array([0., 1.]) + stop = array([6., 7.]) + t1 = logspace(start, stop, 6) + t2 = stack([logspace(_start, _stop, 6) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = logspace(start, stop[0], 6) + t4 = stack([logspace(_start, stop[0], 6) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = logspace(start, stop, 6, axis=-1) + assert_equal(t5, t2.T) + + def test_dtype(self): + y = logspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = logspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = logspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(logspace(a, b), logspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + ls = logspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0)) + ls = logspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0, 1)) + + +class TestGeomspace: + + def test_basic(self): + y = geomspace(1, 1e6) + assert_(len(y) == 50) + y = geomspace(1, 1e6, num=100) + assert_(y[-1] == 10 ** 6) + y = geomspace(1, 1e6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = geomspace(1, 1e6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + y = geomspace(8, 2, num=3) + assert_allclose(y, [8, 4, 2]) + assert_array_equal(y.imag, 0) + + y = geomspace(-1, -100, num=3) + assert_array_equal(y, [-1, -10, -100]) + assert_array_equal(y.imag, 0) + + y = geomspace(-100, -1, num=3) + assert_array_equal(y, [-100, -10, -1]) + assert_array_equal(y.imag, 0) + + def test_boundaries_match_start_and_stop_exactly(self): + # make sure that the boundaries of the returned array exactly + # equal 'start' and 'stop' - this isn't obvious because + # np.exp(np.log(x)) isn't necessarily exactly equal to x + start = 0.3 + stop = 20.3 + + y = geomspace(start, stop, num=1) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=1, endpoint=False) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=3) + assert_equal(y[0], start) + assert_equal(y[-1], stop) + + y = geomspace(start, stop, num=3, endpoint=False) + assert_equal(y[0], start) + + def test_nan_interior(self): + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:-1]).all()) + assert_equal(y[3], 3.0) + + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4, endpoint=False) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:]).all()) + + def test_complex(self): + # Purely imaginary + y = geomspace(1j, 16j, num=5) + assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) + assert_array_equal(y.real, 0) + + y = geomspace(-4j, -324j, num=5) + assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) + assert_array_equal(y.real, 0) + + y = geomspace(1+1j, 1000+1000j, num=4) + assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j]) + + y = geomspace(-1+1j, -1000+1000j, num=4) + assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j]) + + # Logarithmic spirals + y = geomspace(-1, 1, num=3, dtype=complex) + assert_allclose(y, [-1, 1j, +1]) + + y = geomspace(0+3j, -3+0j, 3) + assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) + y = geomspace(0+3j, 3+0j, 3) + assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j]) + y = geomspace(-3+0j, 0-3j, 3) + assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j]) + y = geomspace(0+3j, -3+0j, 3) + assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) + y = geomspace(-2-3j, 5+7j, 7) + assert_allclose(y, [-2-3j, -0.29058977-4.15771027j, + 2.08885354-4.34146838j, 4.58345529-3.16355218j, + 6.41401745-0.55233457j, 6.75707386+3.11795092j, + 5+7j]) + + # Type promotion should prevent the -5 from becoming a NaN + y = geomspace(3j, -5, 2) + assert_allclose(y, [3j, -5]) + y = geomspace(-5, 3j, 2) + assert_allclose(y, [-5, 3j]) + + def test_dtype(self): + y = geomspace(1, 1e6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = geomspace(1, 1e6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = geomspace(1, 1e6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + # Native types + y = geomspace(1, 1e6, dtype=float) + assert_equal(y.dtype, dtype('float_')) + y = geomspace(1, 1e6, dtype=complex) + assert_equal(y.dtype, dtype('complex')) + + def test_start_stop_array_scalar(self): + lim1 = array([120, 100], dtype="int8") + lim2 = array([-120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = geomspace(lim1[0], lim1[1], 5) + t2 = geomspace(lim2[0], lim2[1], 5) + t3 = geomspace(lim3[0], lim3[1], 5) + t4 = geomspace(120.0, 100.0, 5) + t5 = geomspace(-120.0, -100.0, 5) + t6 = geomspace(1200.0, 1000.0, 5) + + # t3 uses float32, t6 uses float64 + assert_allclose(t1, t4, rtol=1e-2) + assert_allclose(t2, t5, rtol=1e-2) + assert_allclose(t3, t6, rtol=1e-5) + + def test_start_stop_array(self): + # Try to use all special cases. + start = array([1.e0, 32., 1j, -4j, 1+1j, -1]) + stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1]) + t1 = geomspace(start, stop, 5) + t2 = stack([geomspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = geomspace(start, stop[0], 5) + t4 = stack([geomspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = geomspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + gs = geomspace(a, b) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0)) + gs = geomspace(a, b, 1) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0, 1)) + + def test_bounds(self): + assert_raises(ValueError, geomspace, 0, 10) + assert_raises(ValueError, geomspace, 10, 0) + assert_raises(ValueError, geomspace, 0, 0) + + +class TestLinspace: + + def test_basic(self): + y = linspace(0, 10) + assert_(len(y) == 50) + y = linspace(2, 10, num=100) + assert_(y[-1] == 10) + y = linspace(2, 10, endpoint=False) + assert_(y[-1] < 10) + assert_raises(ValueError, linspace, 0, 10, num=-1) + + def test_corner(self): + y = list(linspace(0, 1, 1)) + assert_(y == [0.0], y) + assert_raises(TypeError, linspace, 0, 1, num=2.5) + + def test_type(self): + t1 = linspace(0, 1, 0).dtype + t2 = linspace(0, 1, 1).dtype + t3 = linspace(0, 1, 2).dtype + assert_equal(t1, t2) + assert_equal(t2, t3) + + def test_dtype(self): + y = linspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = linspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = linspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_start_stop_array_scalar(self): + lim1 = array([-120, 100], dtype="int8") + lim2 = array([120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = linspace(lim1[0], lim1[1], 5) + t2 = linspace(lim2[0], lim2[1], 5) + t3 = linspace(lim3[0], lim3[1], 5) + t4 = linspace(-120.0, 100.0, 5) + t5 = linspace(120.0, -100.0, 5) + t6 = linspace(1200.0, 1000.0, 5) + assert_equal(t1, t4) + assert_equal(t2, t5) + assert_equal(t3, t6) + + def test_start_stop_array(self): + start = array([-120, 120], dtype="int8") + stop = array([100, -100], dtype="int8") + t1 = linspace(start, stop, 5) + t2 = stack([linspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = linspace(start, stop[0], 5) + t4 = stack([linspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = linspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_complex(self): + lim1 = linspace(1 + 2j, 3 + 4j, 5) + t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j]) + lim2 = linspace(1j, 10, 5) + t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j]) + assert_equal(lim1, t1) + assert_equal(lim2, t2) + + def test_physical_quantities(self): + a = PhysicalQuantity(0.0) + b = PhysicalQuantity(1.0) + assert_equal(linspace(a, b), linspace(0.0, 1.0)) + + def test_subclass(self): + a = array(0).view(PhysicalQuantity2) + b = array(1).view(PhysicalQuantity2) + ls = linspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0)) + ls = linspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0, 1)) + + def test_array_interface(self): + # Regression test for https://github.com/numpy/numpy/pull/6659 + # Ensure that start/stop can be objects that implement + # __array_interface__ and are convertible to numeric scalars + + class Arrayish: + """ + A generic object that supports the __array_interface__ and hence + can in principle be converted to a numeric scalar, but is not + otherwise recognized as numeric, but also happens to support + multiplication by floats. + + Data should be an object that implements the buffer interface, + and contains at least 4 bytes. + """ + + def __init__(self, data): + self._data = data + + @property + def __array_interface__(self): + return {'shape': (), 'typestr': ' 1) + assert_(info.minexp < -1) + assert_(info.maxexp > 1) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_half.py b/MLPY/Lib/site-packages/numpy/core/tests/test_half.py new file mode 100644 index 0000000000000000000000000000000000000000..89c915b7be2b246bb77ecbf8fcb0fe6a31fcb0a1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_half.py @@ -0,0 +1,554 @@ +import platform +import pytest + +import numpy as np +from numpy import uint16, float16, float32, float64 +from numpy.testing import assert_, assert_equal + + +def assert_raises_fpe(strmatch, callable, *args, **kwargs): + try: + callable(*args, **kwargs) + except FloatingPointError as exc: + assert_(str(exc).find(strmatch) >= 0, + "Did not raise floating point %s error" % strmatch) + else: + assert_(False, + "Did not raise floating point %s error" % strmatch) + +class TestHalf: + def setup(self): + # An array of all possible float16 values + self.all_f16 = np.arange(0x10000, dtype=uint16) + self.all_f16.dtype = float16 + self.all_f32 = np.array(self.all_f16, dtype=float32) + self.all_f64 = np.array(self.all_f16, dtype=float64) + + # An array of all non-NaN float16 values, in sorted order + self.nonan_f16 = np.concatenate( + (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), + np.arange(0x0000, 0x7c01, 1, dtype=uint16))) + self.nonan_f16.dtype = float16 + self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) + self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) + + # An array of all finite float16 values, in sorted order + self.finite_f16 = self.nonan_f16[1:-1] + self.finite_f32 = self.nonan_f32[1:-1] + self.finite_f64 = self.nonan_f64[1:-1] + + def test_half_conversions(self): + """Checks that all 16-bit values survive conversion + to/from 32-bit and 64-bit float""" + # Because the underlying routines preserve the NaN bits, every + # value is preserved when converting to/from other floats. + + # Convert from float32 back to float16 + b = np.array(self.all_f32, dtype=float16) + assert_equal(self.all_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Convert from float64 back to float16 + b = np.array(self.all_f64, dtype=float16) + assert_equal(self.all_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Convert float16 to longdouble and back + # This doesn't necessarily preserve the extra NaN bits, + # so exclude NaNs. + a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + b = np.array(a_ld, dtype=float16) + assert_equal(self.nonan_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Check the range for which all integers can be represented + i_int = np.arange(-2048, 2049) + i_f16 = np.array(i_int, dtype=float16) + j = np.array(i_f16, dtype=int) + assert_equal(i_int, j) + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_to_string(self, string_dt): + # Currently uses S/U32 (which is sufficient for float32) + expected_dt = np.dtype(f"{string_dt}32") + assert np.promote_types(np.float16, string_dt) == expected_dt + assert np.promote_types(string_dt, np.float16) == expected_dt + + arr = np.ones(3, dtype=np.float16).astype(string_dt) + assert arr.dtype == expected_dt + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_from_string(self, string_dt): + string = np.array("3.1416", dtype=string_dt) + assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16) + + @pytest.mark.parametrize("offset", [None, "up", "down"]) + @pytest.mark.parametrize("shift", [None, "up", "down"]) + @pytest.mark.parametrize("float_t", [np.float32, np.float64]) + def test_half_conversion_rounding(self, float_t, shift, offset): + # Assumes that round to even is used during casting. + max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) + + # Test all (positive) finite numbers, denormals are most interesting + # however: + f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16) + f16s_float = f16s_patterns.view(np.float16).astype(float_t) + + # Shift the values by half a bit up or a down (or do not shift), + if shift == "up": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:] + elif shift == "down": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1] + else: + f16s_float = f16s_float[1:-1] + + # Increase the float by a minimal value: + if offset == "up": + f16s_float = np.nextafter(f16s_float, float_t(1e50)) + elif offset == "down": + f16s_float = np.nextafter(f16s_float, float_t(-1e50)) + + # Convert back to float16 and its bit pattern: + res_patterns = f16s_float.astype(np.float16).view(np.uint16) + + # The above calculations tries the original values, or the exact + # mid points between the float16 values. It then further offsets them + # by as little as possible. If no offset occurs, "round to even" + # logic will be necessary, an arbitrarily small offset should cause + # normal up/down rounding always. + + # Calculate the expected pattern: + cmp_patterns = f16s_patterns[1:-1].copy() + + if shift == "down" and offset != "up": + shift_pattern = -1 + elif shift == "up" and offset != "down": + shift_pattern = 1 + else: + # There cannot be a shift, either shift is None, so all rounding + # will go back to original, or shift is reduced by offset too much. + shift_pattern = 0 + + # If rounding occurs, is it normal rounding or round to even? + if offset is None: + # Round to even occurs, modify only non-even, cast to allow + (-1) + cmp_patterns[0::2].view(np.int16)[...] += shift_pattern + else: + cmp_patterns.view(np.int16)[...] += shift_pattern + + assert_equal(res_patterns, cmp_patterns) + + @pytest.mark.parametrize(["float_t", "uint_t", "bits"], + [(np.float32, np.uint32, 23), + (np.float64, np.uint64, 52)]) + def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): + # Test specifically that all bits are considered when deciding + # whether round to even should occur (i.e. no bits are lost at the + # end. Compare also gh-12721. The most bits can get lost for the + # smallest denormal: + smallest_value = np.uint16(1).view(np.float16).astype(float_t) + assert smallest_value == 2**-24 + + # Will be rounded to zero based on round to even rule: + rounded_to_zero = smallest_value / float_t(2) + assert rounded_to_zero.astype(np.float16) == 0 + + # The significand will be all 0 for the float_t, test that we do not + # lose the lower ones of these: + for i in range(bits): + # slightly increasing the value should make it round up: + larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i) + larger_value = larger_pattern.view(float_t) + assert larger_value.astype(np.float16) == smallest_value + + def test_nans_infs(self): + with np.errstate(all='ignore'): + # Check some of the ufuncs + assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) + assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) + assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) + assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.spacing(float16(65504)), np.inf) + + # Check comparisons of all values with NaN + nan = float16(np.nan) + + assert_(not (self.all_f16 == nan).any()) + assert_(not (nan == self.all_f16).any()) + + assert_((self.all_f16 != nan).all()) + assert_((nan != self.all_f16).all()) + + assert_(not (self.all_f16 < nan).any()) + assert_(not (nan < self.all_f16).any()) + + assert_(not (self.all_f16 <= nan).any()) + assert_(not (nan <= self.all_f16).any()) + + assert_(not (self.all_f16 > nan).any()) + assert_(not (nan > self.all_f16).any()) + + assert_(not (self.all_f16 >= nan).any()) + assert_(not (nan >= self.all_f16).any()) + + def test_half_values(self): + """Confirms a small number of known half values""" + a = np.array([1.0, -1.0, + 2.0, -2.0, + 0.0999755859375, 0.333251953125, # 1/10, 1/3 + 65504, -65504, # Maximum magnitude + 2.0**(-14), -2.0**(-14), # Minimum normal + 2.0**(-24), -2.0**(-24), # Minimum subnormal + 0, -1/1e1000, # Signed zeros + np.inf, -np.inf]) + b = np.array([0x3c00, 0xbc00, + 0x4000, 0xc000, + 0x2e66, 0x3555, + 0x7bff, 0xfbff, + 0x0400, 0x8400, + 0x0001, 0x8001, + 0x0000, 0x8000, + 0x7c00, 0xfc00], dtype=uint16) + b.dtype = float16 + assert_equal(a, b) + + def test_half_rounding(self): + """Checks that rounding when converting to half is correct""" + a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal + 2.0**-25, # Underflows to zero (nearest even mode) + 2.0**-26, # Underflows to zero + 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) + 1.0+2.0**-11, # rounds to 1.0 (nearest even mode) + 1.0+2.0**-12, # rounds to 1.0 + 65519, # rounds to 65504 + 65520], # rounds to inf + dtype=float64) + rounded = [2.0**-24, + 0.0, + 0.0, + 1.0+2.0**(-10), + 1.0, + 1.0, + 65504, + np.inf] + + # Check float64->float16 rounding + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + # Check float32->float16 rounding + a = np.array(a, dtype=float32) + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + def test_half_correctness(self): + """Take every finite float16, and check the casting functions with + a manual conversion.""" + + # Create an array of all finite float16s + a_bits = self.finite_f16.view(dtype=uint16) + + # Convert to 64-bit float manually + a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) + a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15 + a_man = (a_bits & 0x03ff) * 2.0**(-10) + # Implicit bit of normalized floats + a_man[a_exp != -15] += 1 + # Denormalized exponent is -14 + a_exp[a_exp == -15] = -14 + + a_manual = a_sgn * a_man * 2.0**a_exp + + a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + if len(a32_fail) != 0: + bad_index = a32_fail[0] + assert_equal(self.finite_f32, a_manual, + "First non-equal is half value %x -> %g != %g" % + (self.finite_f16[bad_index], + self.finite_f32[bad_index], + a_manual[bad_index])) + + a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + if len(a64_fail) != 0: + bad_index = a64_fail[0] + assert_equal(self.finite_f64, a_manual, + "First non-equal is half value %x -> %g != %g" % + (self.finite_f16[bad_index], + self.finite_f64[bad_index], + a_manual[bad_index])) + + def test_half_ordering(self): + """Make sure comparisons are working right""" + + # All non-NaN float16 values in reverse order + a = self.nonan_f16[::-1].copy() + + # 32-bit float copy + b = np.array(a, dtype=float32) + + # Should sort the same + a.sort() + b.sort() + assert_equal(a, b) + + # Comparisons should work + assert_((a[:-1] <= a[1:]).all()) + assert_(not (a[:-1] > a[1:]).any()) + assert_((a[1:] >= a[:-1]).all()) + assert_(not (a[1:] < a[:-1]).any()) + # All != except for +/-0 + assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2) + assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2) + + def test_half_funcs(self): + """Test the various ArrFuncs""" + + # fill + assert_equal(np.arange(10, dtype=float16), + np.arange(10, dtype=float32)) + + # fillwithscalar + a = np.zeros((5,), dtype=float16) + a.fill(1) + assert_equal(a, np.ones((5,), dtype=float16)) + + # nonzero and copyswap + a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + a = a.byteswap().newbyteorder() + assert_equal(a.nonzero()[0], + [2, 5, 6]) + + # dot + a = np.arange(0, 10, 0.5, dtype=float16) + b = np.ones((20,), dtype=float16) + assert_equal(np.dot(a, b), + 95) + + # argmax + a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 4) + a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 5) + + # getitem + a = np.arange(10, dtype=float16) + for i in range(10): + assert_equal(a.item(i), i) + + def test_spacing_nextafter(self): + """Test np.spacing and np.nextafter""" + # All non-negative finite #'s + a = np.arange(0x7c00, dtype=uint16) + hinf = np.array((np.inf,), dtype=float16) + hnan = np.array((np.nan,), dtype=float16) + a_f16 = a.view(dtype=float16) + + assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) + + assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) + assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) + + assert_equal(np.nextafter(hinf, a_f16), a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1]) + + assert_equal(np.nextafter(hinf, hinf), hinf) + assert_equal(np.nextafter(hinf, -hinf), a_f16[-1]) + assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, -hinf), -hinf) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + assert_equal(np.nextafter(hnan, hnan), hnan) + assert_equal(np.nextafter(hinf, hnan), hnan) + assert_equal(np.nextafter(hnan, hinf), hnan) + + # switch to negatives + a |= 0x8000 + + assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) + assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) + + assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) + assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) + + assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1]) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + def test_half_ufuncs(self): + """Test the various ufuncs""" + + a = np.array([0, 1, 2, 4, 2], dtype=float16) + b = np.array([-2, 5, 1, 4, 3], dtype=float16) + c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) + + assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) + assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) + assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) + assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) + + assert_equal(np.equal(a, b), [False, False, False, True, False]) + assert_equal(np.not_equal(a, b), [True, True, True, False, True]) + assert_equal(np.less(a, b), [False, True, False, False, True]) + assert_equal(np.less_equal(a, b), [False, True, False, True, True]) + assert_equal(np.greater(a, b), [True, False, True, False, False]) + assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) + assert_equal(np.logical_and(a, b), [False, True, True, True, True]) + assert_equal(np.logical_or(a, b), [True, True, True, True, True]) + assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) + assert_equal(np.logical_not(a), [True, False, False, False, False]) + + assert_equal(np.isnan(c), [False, False, False, True, False]) + assert_equal(np.isinf(c), [False, False, True, False, False]) + assert_equal(np.isfinite(c), [True, True, False, False, True]) + assert_equal(np.signbit(b), [True, False, False, False, False]) + + assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) + + assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) + + x = np.maximum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [0, 5, 1, 0, 6]) + + assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) + + x = np.minimum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [-2, -1, -np.inf, 0, 3]) + + assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) + assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) + assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) + assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) + + assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) + assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) + assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) + assert_equal(np.square(b), [4, 25, 1, 16, 9]) + assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) + assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) + assert_equal(np.conjugate(b), b) + assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) + assert_equal(np.negative(b), [2, -5, -1, -4, -3]) + assert_equal(np.positive(b), b) + assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) + assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) + assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) + assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) + + def test_half_coercion(self): + """Test that half gets coerced properly with the other types""" + a16 = np.array((1,), dtype=float16) + a32 = np.array((1,), dtype=float32) + b16 = float16(1) + b32 = float32(1) + + assert_equal(np.power(a16, 2).dtype, float16) + assert_equal(np.power(a16, 2.0).dtype, float16) + assert_equal(np.power(a16, b16).dtype, float16) + assert_equal(np.power(a16, b32).dtype, float16) + assert_equal(np.power(a16, a16).dtype, float16) + assert_equal(np.power(a16, a32).dtype, float32) + + assert_equal(np.power(b16, 2).dtype, float64) + assert_equal(np.power(b16, 2.0).dtype, float64) + assert_equal(np.power(b16, b16).dtype, float16) + assert_equal(np.power(b16, b32).dtype, float32) + assert_equal(np.power(b16, a16).dtype, float16) + assert_equal(np.power(b16, a32).dtype, float32) + + assert_equal(np.power(a32, a16).dtype, float32) + assert_equal(np.power(a32, b16).dtype, float32) + assert_equal(np.power(b32, a16).dtype, float16) + assert_equal(np.power(b32, b16).dtype, float32) + + @pytest.mark.skipif(platform.machine() == "armv5tel", + reason="See gh-413.") + def test_half_fpe(self): + with np.errstate(all='raise'): + sx16 = np.array((1e-4,), dtype=float16) + bx16 = np.array((1e4,), dtype=float16) + sy16 = float16(1e-4) + by16 = float16(1e4) + + # Underflow errors + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(-2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14+2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(-2.**-14-2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b:a/b, + float16(2.**-14+2**-23), float16(4)) + + # Overflow errors + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b:a+b, + float16(65504), float16(17)) + assert_raises_fpe('overflow', lambda a, b:a-b, + float16(-65504), float16(17)) + assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.spacing, float16(65504)) + + # Invalid value errors + assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.nan)) + + # These should not raise + float16(65472)+float16(32) + float16(2**-13)/float16(2) + float16(2**-14)/float16(2**10) + np.spacing(float16(-65504)) + np.nextafter(float16(65504), float16(-np.inf)) + np.nextafter(float16(-65504), float16(np.inf)) + np.nextafter(float16(np.inf), float16(0)) + np.nextafter(float16(-np.inf), float16(0)) + np.nextafter(float16(0), float16(np.nan)) + np.nextafter(float16(np.nan), float16(0)) + float16(2**-14)/float16(2**10) + float16(-2**-14)/float16(2**10) + float16(2**-14+2**-23)/float16(2) + float16(-2**-14-2**-23)/float16(2) + + def test_half_array_interface(self): + """Test that half is compatible with __array_interface__""" + class Dummy: + pass + + a = np.ones((1,), dtype=float16) + b = Dummy() + b.__array_interface__ = a.__array_interface__ + c = np.array(b) + assert_(c.dtype == float16) + assert_equal(a, c) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_indexerrors.py b/MLPY/Lib/site-packages/numpy/core/tests/test_indexerrors.py new file mode 100644 index 0000000000000000000000000000000000000000..979a4e398e722c98160f92302dfdcf5b4c2246ef --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_indexerrors.py @@ -0,0 +1,133 @@ +import numpy as np +from numpy.testing import ( + assert_raises, assert_raises_regex, + ) + + +class TestIndexErrors: + '''Tests to exercise indexerrors not covered by other tests.''' + + def test_arraytypes_fasttake(self): + 'take from a 0-length dimension' + x = np.empty((2, 3, 0, 4)) + assert_raises(IndexError, x.take, [0], axis=2) + assert_raises(IndexError, x.take, [1], axis=2) + assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') + assert_raises(IndexError, x.take, [0], axis=2, mode='clip') + + def test_take_from_object(self): + # Check exception taking from object array + d = np.zeros(5, dtype=object) + assert_raises(IndexError, d.take, [6]) + + # Check exception taking from 0-d array + d = np.zeros((5, 0), dtype=object) + assert_raises(IndexError, d.take, [1], axis=1) + assert_raises(IndexError, d.take, [0], axis=1) + assert_raises(IndexError, d.take, [0]) + assert_raises(IndexError, d.take, [0], mode='wrap') + assert_raises(IndexError, d.take, [0], mode='clip') + + def test_multiindex_exceptions(self): + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.item, 20) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.item, (0, 0)) + + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.itemset, 20, 0) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.itemset, (0, 0), 0) + + def test_put_exceptions(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + + def test_iterators_exceptions(self): + "cases in iterators.c" + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a[0, 5, None, 2]) + assert_raises(IndexError, lambda: a[0, 5, 0, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) + assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) + + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a[0, 0, None, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + + def test_mapping(self): + "cases from mapping.c" + + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros((0, 10)) + assert_raises(IndexError, lambda: a[12]) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(10, 20)]) + assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, 0)]) + assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) + + a = np.zeros((10,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + a = np.zeros((0,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(1, [1, 20])]) + assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, [0, 1])]) + assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) + + def test_mapping_error_message(self): + a = np.zeros((3, 5)) + index = (1, 2, 3, 4, 5) + assert_raises_regex( + IndexError, + "too many indices for array: " + "array is 2-dimensional, but 5 were indexed", + lambda: a[index]) + + def test_methods(self): + "cases from methods.c" + + a = np.zeros((3, 3)) + assert_raises(IndexError, lambda: a.item(100)) + assert_raises(IndexError, lambda: a.itemset(100, 1)) + a = np.zeros((0, 3)) + assert_raises(IndexError, lambda: a.item(100)) + assert_raises(IndexError, lambda: a.itemset(100, 1)) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_indexing.py b/MLPY/Lib/site-packages/numpy/core/tests/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..07b21f12503f4d5c69034442bf632dcf8c1bc77c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_indexing.py @@ -0,0 +1,1411 @@ +import sys +import warnings +import functools +import operator + +import pytest + +import numpy as np +from numpy.core._multiarray_tests import array_indexing +from itertools import product +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex, + assert_array_equal, assert_warns, HAS_REFCOUNT, + ) + + +class TestIndexing: + def test_index_no_floats(self): + a = np.array([[[5]]]) + + assert_raises(IndexError, lambda: a[0.0]) + assert_raises(IndexError, lambda: a[0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0]) + assert_raises(IndexError, lambda: a[0.0,:]) + assert_raises(IndexError, lambda: a[:, 0.0]) + assert_raises(IndexError, lambda: a[:, 0.0,:]) + assert_raises(IndexError, lambda: a[0.0,:,:]) + assert_raises(IndexError, lambda: a[0, 0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0, 0]) + assert_raises(IndexError, lambda: a[0, 0.0, 0]) + assert_raises(IndexError, lambda: a[-1.4]) + assert_raises(IndexError, lambda: a[0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0]) + assert_raises(IndexError, lambda: a[-1.4,:]) + assert_raises(IndexError, lambda: a[:, -1.4]) + assert_raises(IndexError, lambda: a[:, -1.4,:]) + assert_raises(IndexError, lambda: a[-1.4,:,:]) + assert_raises(IndexError, lambda: a[0, 0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0, 0]) + assert_raises(IndexError, lambda: a[0, -1.4, 0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0,:]) + + def test_slicing_no_floats(self): + a = np.array([[5]]) + + # start as float. + assert_raises(TypeError, lambda: a[0.0:]) + assert_raises(TypeError, lambda: a[0:, 0.0:2]) + assert_raises(TypeError, lambda: a[0.0::2, :0]) + assert_raises(TypeError, lambda: a[0.0:1:2,:]) + assert_raises(TypeError, lambda: a[:, 0.0:]) + # stop as float. + assert_raises(TypeError, lambda: a[:0.0]) + assert_raises(TypeError, lambda: a[:0, 1:2.0]) + assert_raises(TypeError, lambda: a[:0.0:2, :0]) + assert_raises(TypeError, lambda: a[:0.0,:]) + assert_raises(TypeError, lambda: a[:, 0:4.0:2]) + # step as float. + assert_raises(TypeError, lambda: a[::1.0]) + assert_raises(TypeError, lambda: a[0:, :2:2.0]) + assert_raises(TypeError, lambda: a[1::4.0, :0]) + assert_raises(TypeError, lambda: a[::5.0,:]) + assert_raises(TypeError, lambda: a[:, 0:4:2.0]) + # mixed. + assert_raises(TypeError, lambda: a[1.0:2:2.0]) + assert_raises(TypeError, lambda: a[1.0::2.0]) + assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) + assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) + assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:]) + assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) + # should still get the DeprecationWarning if step = 0. + assert_raises(TypeError, lambda: a[::0.0]) + + def test_index_no_array_to_index(self): + # No non-scalar arrays. + a = np.array([[[1]]]) + + assert_raises(TypeError, lambda: a[a:a:a]) + + def test_none_index(self): + # `None` index adds newaxis + a = np.array([1, 2, 3]) + assert_equal(a[None], a[np.newaxis]) + assert_equal(a[None].ndim, a.ndim + 1) + + def test_empty_tuple_index(self): + # Empty tuple index creates a view + a = np.array([1, 2, 3]) + assert_equal(a[()], a) + assert_(a[()].base is a) + a = np.array(0) + assert_(isinstance(a[()], np.int_)) + + def test_void_scalar_empty_tuple(self): + s = np.zeros((), dtype='V4') + assert_equal(s[()].dtype, s.dtype) + assert_equal(s[()], s) + assert_equal(type(s[...]), np.ndarray) + + def test_same_kind_index_casting(self): + # Indexes should be cast with same-kind and not safe, even if that + # is somewhat unsafe. So test various different code paths. + index = np.arange(5) + u_index = index.astype(np.uintp) + arr = np.arange(10) + + assert_array_equal(arr[index], arr[u_index]) + arr[u_index] = np.arange(5) + assert_array_equal(arr, np.arange(10)) + + arr = np.arange(10).reshape(5, 2) + assert_array_equal(arr[index], arr[u_index]) + + arr[u_index] = np.arange(5)[:,None] + assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1)) + + arr = np.arange(25).reshape(5, 5) + assert_array_equal(arr[u_index, u_index], arr[index, index]) + + def test_empty_fancy_index(self): + # Empty list index creates an empty array + # with the same dtype (but with weird shape) + a = np.array([1, 2, 3]) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([], dtype=np.intp) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([]) + assert_raises(IndexError, a.__getitem__, b) + + def test_ellipsis_index(self): + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + assert_(a[...] is not a) + assert_equal(a[...], a) + # `a[...]` was `a` in numpy <1.9. + assert_(a[...].base is a) + + # Slicing with ellipsis can skip an + # arbitrary number of dimensions + assert_equal(a[0, ...], a[0]) + assert_equal(a[0, ...], a[0,:]) + assert_equal(a[..., 0], a[:, 0]) + + # Slicing with ellipsis always results + # in an array, not a scalar + assert_equal(a[0, ..., 1], np.array(2)) + + # Assignment with `(Ellipsis,)` on 0-d arrays + b = np.array(1) + b[(Ellipsis,)] = 2 + assert_equal(b, 2) + + def test_single_int_index(self): + # Single integer index selects one row + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[0], [1, 2, 3]) + assert_equal(a[-1], [7, 8, 9]) + + # Index out of bounds produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 30) + # Index overflow produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 64) + + def test_single_bool_index(self): + # Single boolean index + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[np.array(True)], a[None]) + assert_equal(a[np.array(False)], a[None][0:0]) + + def test_boolean_shape_mismatch(self): + arr = np.ones((5, 4, 3)) + + index = np.array([True]) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.array([False] * 6) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.zeros((4, 4), dtype=bool) + assert_raises(IndexError, arr.__getitem__, index) + + assert_raises(IndexError, arr.__getitem__, (slice(None), index)) + + def test_boolean_indexing_onedim(self): + # Indexing a 2-dimensional array with + # boolean array of length one + a = np.array([[ 0., 0., 0.]]) + b = np.array([ True], dtype=bool) + assert_equal(a[b], a) + # boolean assignment + a[b] = 1. + assert_equal(a, [[1., 1., 1.]]) + + def test_boolean_assignment_value_mismatch(self): + # A boolean assignment should fail when the shape of the values + # cannot be broadcast to the subscription. (see also gh-3458) + a = np.arange(4) + + def f(a, v): + a[a > -1] = v + + assert_raises(ValueError, f, a, []) + assert_raises(ValueError, f, a, [1, 2, 3]) + assert_raises(ValueError, f, a[:1], [1, 2, 3]) + + def test_boolean_assignment_needs_api(self): + # See also gh-7666 + # This caused a segfault on Python 2 due to the GIL not being + # held when the iterator does not need it, but the transfer function + # does + arr = np.zeros(1000) + indx = np.zeros(1000, dtype=bool) + indx[:100] = True + arr[indx] = np.ones(100, dtype=object) + + expected = np.zeros(1000) + expected[:100] = 1 + assert_array_equal(arr, expected) + + def test_boolean_indexing_twodim(self): + # Indexing a 2-dimensional array with + # 2-dimensional boolean array + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + b = np.array([[ True, False, True], + [False, True, False], + [ True, False, True]]) + assert_equal(a[b], [1, 3, 5, 7, 9]) + assert_equal(a[b[1]], [[4, 5, 6]]) + assert_equal(a[b[0]], a[b[2]]) + + # boolean assignment + a[b] = 0 + assert_equal(a, [[0, 2, 0], + [4, 0, 6], + [0, 8, 0]]) + + def test_boolean_indexing_list(self): + # Regression test for #13715. It's a use-after-free bug which the + # test won't directly catch, but it will show up in valgrind. + a = np.array([1, 2, 3]) + b = [True, False, True] + # Two variants of the test because the first takes a fast path + assert_equal(a[b], [1, 3]) + assert_equal(a[None, b], [[1, 3]]) + + def test_reverse_strides_and_subspace_bufferinit(self): + # This tests that the strides are not reversed for simple and + # subspace fancy indexing. + a = np.ones(5) + b = np.zeros(5, dtype=np.intp)[::-1] + c = np.arange(5)[::-1] + + a[b] = c + # If the strides are not reversed, the 0 in the arange comes last. + assert_equal(a[0], 0) + + # This also tests that the subspace buffer is initialized: + a = np.ones((5, 2)) + c = np.arange(10).reshape(5, 2)[::-1] + a[b, :] = c + assert_equal(a[0], [0, 1]) + + def test_reversed_strides_result_allocation(self): + # Test a bug when calculating the output strides for a result array + # when the subspace size was 1 (and test other cases as well) + a = np.arange(10)[:, None] + i = np.arange(10)[::-1] + assert_array_equal(a[i], a[i.copy('C')]) + + a = np.arange(20).reshape(-1, 2) + + def test_uncontiguous_subspace_assignment(self): + # During development there was a bug activating a skip logic + # based on ndim instead of size. + a = np.full((3, 4, 2), -1) + b = np.full((3, 4, 2), -1) + + a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T + b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() + + assert_equal(a, b) + + def test_too_many_fancy_indices_special_case(self): + # Just documents behaviour, this is a small limitation. + a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS + assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32) + + def test_scalar_array_bool(self): + # NumPy bools can be used as boolean index (python ones as of yet not) + a = np.array(1) + assert_equal(a[np.bool_(True)], a[np.array(True)]) + assert_equal(a[np.bool_(False)], a[np.array(False)]) + + # After deprecating bools as integers: + #a = np.array([0,1,2]) + #assert_equal(a[True, :], a[None, :]) + #assert_equal(a[:, True], a[:, None]) + # + #assert_(not np.may_share_memory(a, a[True, :])) + + def test_everything_returns_views(self): + # Before `...` would return a itself. + a = np.arange(5) + + assert_(a is not a[()]) + assert_(a is not a[...]) + assert_(a is not a[:]) + + def test_broaderrors_indexing(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) + assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) + + def test_trivial_fancy_out_of_bounds(self): + a = np.zeros(5) + ind = np.ones(20, dtype=np.intp) + ind[-1] = 10 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + ind = np.ones(20, dtype=np.intp) + ind[0] = 11 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + + def test_trivial_fancy_not_possible(self): + # Test that the fast path for trivial assignment is not incorrectly + # used when the index is not contiguous or 1D, see also gh-11467. + a = np.arange(6) + idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0] + assert_array_equal(a[idx], idx) + + # this case must not go into the fast path, note that idx is + # a non-contiuguous none 1D array here. + a[idx] = -1 + res = np.arange(6) + res[0] = -1 + res[3] = -1 + assert_array_equal(a, res) + + def test_nonbaseclass_values(self): + class SubClass(np.ndarray): + def __array_finalize__(self, old): + # Have array finalize do funny things + self.fill(99) + + a = np.zeros((5, 5)) + s = a.copy().view(type=SubClass) + s.fill(1) + + a[[0, 1, 2, 3, 4], :] = s + assert_((a == 1).all()) + + # Subspace is last, so transposing might want to finalize + a[:, [0, 1, 2, 3, 4]] = s + assert_((a == 1).all()) + + a.fill(0) + a[...] = s + assert_((a == 1).all()) + + def test_array_like_values(self): + # Similar to the above test, but use a memoryview instead + a = np.zeros((5, 5)) + s = np.arange(25, dtype=np.float64).reshape(5, 5) + + a[[0, 1, 2, 3, 4], :] = memoryview(s) + assert_array_equal(a, s) + + a[:, [0, 1, 2, 3, 4]] = memoryview(s) + assert_array_equal(a, s) + + a[...] = memoryview(s) + assert_array_equal(a, s) + + def test_subclass_writeable(self): + d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], + dtype=[('target', 'S20'), ('V_mag', '>f4')]) + ind = np.array([False, True, True], dtype=bool) + assert_(d[ind].flags.writeable) + ind = np.array([0, 1]) + assert_(d[ind].flags.writeable) + assert_(d[...].flags.writeable) + assert_(d[0].flags.writeable) + + def test_memory_order(self): + # This is not necessary to preserve. Memory layouts for + # more complex indices are not as simple. + a = np.arange(10) + b = np.arange(10).reshape(5,2).T + assert_(a[b].flags.f_contiguous) + + # Takes a different implementation branch: + a = a.reshape(-1, 1) + assert_(a[b, 0].flags.f_contiguous) + + def test_scalar_return_type(self): + # Full scalar indices should return scalars and object + # arrays should not call PyArray_Return on their items + class Zero: + # The most basic valid indexing + def __index__(self): + return 0 + + z = Zero() + + class ArrayLike: + # Simple array, should behave like the array + def __array__(self): + return np.array(0) + + a = np.zeros(()) + assert_(isinstance(a[()], np.float_)) + a = np.zeros(1) + assert_(isinstance(a[z], np.float_)) + a = np.zeros((1, 1)) + assert_(isinstance(a[z, np.array(0)], np.float_)) + assert_(isinstance(a[z, ArrayLike()], np.float_)) + + # And object arrays do not call it too often: + b = np.array(0) + a = np.array(0, dtype=object) + a[()] = b + assert_(isinstance(a[()], np.ndarray)) + a = np.array([b, None]) + assert_(isinstance(a[z], np.ndarray)) + a = np.array([[b, None]]) + assert_(isinstance(a[z, np.array(0)], np.ndarray)) + assert_(isinstance(a[z, ArrayLike()], np.ndarray)) + + def test_small_regressions(self): + # Reference count of intp for index checks + a = np.array([0]) + if HAS_REFCOUNT: + refcount = sys.getrefcount(np.dtype(np.intp)) + # item setting always checks indices in separate function: + a[np.array([0], dtype=np.intp)] = 1 + a[np.array([0], dtype=np.uint8)] = 1 + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.intp), 1) + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.uint8), 1) + + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) + + def test_unaligned(self): + v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] + d = v.view(np.dtype("S8")) + # unaligned source + x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] + x = x.view(np.dtype("S8")) + x[...] = np.array("b" * 8, dtype="S") + b = np.arange(d.size) + #trivial + assert_equal(d[b], d) + d[b] = x + # nontrivial + # unaligned index array + b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] + b = b.view(np.intp)[:d.size] + b[...] = np.arange(d.size) + assert_equal(d[b.astype(np.int16)], d) + d[b.astype(np.int16)] = x + # boolean + d[b % 2 == 0] + d[b % 2 == 0] = x[::2] + + def test_tuple_subclass(self): + arr = np.ones((5, 5)) + + # A tuple subclass should also be an nd-index + class TupleSubclass(tuple): + pass + index = ([1], [1]) + index = TupleSubclass(index) + assert_(arr[index].shape == (1,)) + # Unlike the non nd-index: + assert_(arr[index,].shape != (1,)) + + def test_broken_sequence_not_nd_index(self): + # See gh-5063: + # If we have an object which claims to be a sequence, but fails + # on item getting, this should not be converted to an nd-index (tuple) + # If this object happens to be a valid index otherwise, it should work + # This object here is very dubious and probably bad though: + class SequenceLike: + def __index__(self): + return 0 + + def __len__(self): + return 1 + + def __getitem__(self, item): + raise IndexError('Not possible') + + arr = np.arange(10) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + # also test that field indexing does not segfault + # for a similar reason, by indexing a structured array + arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')]) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + def test_indexing_array_weird_strides(self): + # See also gh-6221 + # the shapes used here come from the issue and create the correct + # size for the iterator buffering size. + x = np.ones(10) + x2 = np.ones((10, 2)) + ind = np.arange(10)[:, None, None, None] + ind = np.broadcast_to(ind, (10, 55, 4, 4)) + + # single advanced index case + assert_array_equal(x[ind], x[ind.copy()]) + # higher dimensional advanced index + zind = np.zeros(4, dtype=np.intp) + assert_array_equal(x2[ind, zind], x2[ind.copy(), zind]) + + def test_indexing_array_negative_strides(self): + # From gh-8264, + # core dumps if negative strides are used in iteration + arro = np.zeros((4, 4)) + arr = arro[::-1, ::-1] + + slices = (slice(None), [0, 1, 2, 3]) + arr[slices] = 10 + assert_array_equal(arr, 10.) + + def test_character_assignment(self): + # This is an example a function going through CopyObject which + # used to have an untested special path for scalars + # (the character special dtype case, should be deprecated probably) + arr = np.zeros((1, 5), dtype="c") + arr[0] = np.str_("asdfg") # must assign as a sequence + assert_array_equal(arr[0], np.array("asdfg", dtype="c")) + assert arr[0, 1] == b"s" # make sure not all were set to "a" for both + + @pytest.mark.parametrize("index", + [True, False, np.array([0])]) + @pytest.mark.parametrize("num", [32, 40]) + @pytest.mark.parametrize("original_ndim", [1, 32]) + def test_too_many_advanced_indices(self, index, num, original_ndim): + # These are limitations based on the number of arguments we can process. + # For `num=32` (and all boolean cases), the result is actually define; + # but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons. + arr = np.ones((1,) * original_ndim) + with pytest.raises(IndexError): + arr[(index,) * num] + with pytest.raises(IndexError): + arr[(index,) * num] = 1. + + def test_structured_advanced_indexing(self): + # Test that copyswap(n) used by integer array indexing is threadsafe + # for structured datatypes, see gh-15387. This test can behave randomly. + from concurrent.futures import ThreadPoolExecutor + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)] * 2) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0] + + rng = np.random.default_rng() + def func(arr): + indx = rng.integers(0, len(arr), size=6000, dtype=np.intp) + arr[indx] + + tpe = ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + + +class TestFieldIndexing: + def test_scalar_return_type(self): + # Field access on an array should return an array, even if it + # is 0-d. + a = np.zeros((), [('a','f8')]) + assert_(isinstance(a['a'], np.ndarray)) + assert_(isinstance(a[['a']], np.ndarray)) + + +class TestBroadcastedAssignments: + def assign(self, a, ind, val): + a[ind] = val + return a + + def test_prepending_ones(self): + a = np.zeros((3, 2)) + + a[...] = np.ones((1, 3, 2)) + # Fancy with subspace with and without transpose + a[[0, 1, 2], :] = np.ones((1, 3, 2)) + a[:, [0, 1]] = np.ones((1, 3, 2)) + # Fancy without subspace (with broadcasting) + a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) + + def test_prepend_not_one(self): + assign = self.assign + s_ = np.s_ + a = np.zeros(5) + + # Too large and not only ones. + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1))) + + def test_simple_broadcasting_errors(self): + assign = self.assign + s_ = np.s_ + a = np.zeros((5, 1)) + + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) + + @pytest.mark.parametrize("index", [ + (..., [1, 2], slice(None)), + ([0, 1], ..., 0), + (..., [1, 2], [1, 2])]) + def test_broadcast_error_reports_correct_shape(self, index): + values = np.zeros((100, 100)) # will never broadcast below + + arr = np.zeros((3, 4, 5, 6, 7)) + # We currently report without any spaces (could be changed) + shape_str = str(arr[index].shape).replace(" ", "") + + with pytest.raises(ValueError) as e: + arr[index] = values + + assert str(e.value).endswith(shape_str) + + def test_index_is_larger(self): + # Simple case of fancy index broadcasting of the index. + a = np.zeros((5, 5)) + a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] + + assert_((a[:3, :3] == [2, 3, 4]).all()) + + def test_broadcast_subspace(self): + a = np.zeros((100, 100)) + v = np.arange(100)[:,None] + b = np.arange(100)[::-1] + a[b] = v + assert_((a[::-1] == v).all()) + + +class TestSubclasses: + def test_basic(self): + # Test that indexing in various ways produces SubClass instances, + # and that the base is set up correctly: the original subclass + # instance for views, and a new ndarray for advanced/boolean indexing + # where a copy was made (latter a regression test for gh-11983). + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s_slice = s[:3] + assert_(type(s_slice) is SubClass) + assert_(s_slice.base is s) + assert_array_equal(s_slice, a[:3]) + + s_fancy = s[[0, 1, 2]] + assert_(type(s_fancy) is SubClass) + assert_(s_fancy.base is not s) + assert_(type(s_fancy.base) is np.ndarray) + assert_array_equal(s_fancy, a[[0, 1, 2]]) + assert_array_equal(s_fancy.base, a[[0, 1, 2]]) + + s_bool = s[s > 0] + assert_(type(s_bool) is SubClass) + assert_(s_bool.base is not s) + assert_(type(s_bool.base) is np.ndarray) + assert_array_equal(s_bool, a[a > 0]) + assert_array_equal(s_bool.base, a[a > 0]) + + def test_fancy_on_read_only(self): + # Test that fancy indexing on read-only SubClass does not make a + # read-only copy (gh-14132) + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s.flags.writeable = False + s_fancy = s[[0, 1, 2]] + assert_(s_fancy.flags.writeable) + + + def test_finalize_gets_full_info(self): + # Array finalize should be called on the filled array. + class SubClass(np.ndarray): + def __array_finalize__(self, old): + self.finalize_status = np.array(self) + self.old = old + + s = np.arange(10).view(SubClass) + new_s = s[:3] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[[0,1,2,3]] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[s > 0] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + +class TestFancyIndexingCast: + def test_boolean_index_cast_assign(self): + # Setup the boolean index and float arrays. + shape = (8, 63) + bool_index = np.zeros(shape).astype(bool) + bool_index[0, 1] = True + zero_array = np.zeros(shape) + + # Assigning float is fine. + zero_array[bool_index] = np.array([1]) + assert_equal(zero_array[0, 1], 1) + + # Fancy indexing works, although we get a cast warning. + assert_warns(np.ComplexWarning, + zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) + assert_equal(zero_array[0, 1], 2) # No complex part + + # Cast complex to float, throwing away the imaginary portion. + assert_warns(np.ComplexWarning, + zero_array.__setitem__, bool_index, np.array([1j])) + assert_equal(zero_array[0, 1], 0) + +class TestFancyIndexingEquivalence: + def test_object_assign(self): + # Check that the field and object special case using copyto is active. + # The right hand side cannot be converted to an array here. + a = np.arange(5, dtype=object) + b = a.copy() + a[:3] = [1, (1,2), 3] + b[[0, 1, 2]] = [1, (1,2), 3] + assert_array_equal(a, b) + + # test same for subspace fancy indexing + b = np.arange(5, dtype=object)[None, :] + b[[0], :3] = [[1, (1,2), 3]] + assert_array_equal(a, b[0]) + + # Check that swapping of axes works. + # There was a bug that made the later assignment throw a ValueError + # do to an incorrectly transposed temporary right hand side (gh-5714) + b = b.T + b[:3, [0]] = [[1], [(1,2)], [3]] + assert_array_equal(a, b[:, 0]) + + # Another test for the memory order of the subspace + arr = np.ones((3, 4, 5), dtype=object) + # Equivalent slicing assignment for comparison + cmp_arr = arr.copy() + cmp_arr[:1, ...] = [[[1], [2], [3], [4]]] + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + arr = arr.copy('F') + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + + def test_cast_equivalence(self): + # Yes, normal slicing uses unsafe casting. + a = np.arange(5) + b = a.copy() + + a[:3] = np.array(['2', '-3', '-1']) + b[[0, 2, 1]] = np.array(['2', '-1', '-3']) + assert_array_equal(a, b) + + # test the same for subspace fancy indexing + b = np.arange(5)[None, :] + b[[0], :3] = np.array([['2', '-3', '-1']]) + assert_array_equal(a, b[0]) + + +class TestMultiIndexingAutomated: + """ + These tests use code to mimic the C-Code indexing for selection. + + NOTE: + + * This still lacks tests for complex item setting. + * If you change behavior of indexing, you might want to modify + these tests to try more combinations. + * Behavior was written to match numpy version 1.8. (though a + first version matched 1.7.) + * Only tuple indices are supported by the mimicking code. + (and tested as of writing this) + * Error types should match most of the time as long as there + is only one error. For multiple errors, what gets raised + will usually not be the same one. They are *not* tested. + + Update 2016-11-30: It is probably not worth maintaining this test + indefinitely and it can be dropped if maintenance becomes a burden. + + """ + + def setup(self): + self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + self.b = np.empty((3, 0, 5, 6)) + self.complex_indices = ['skip', Ellipsis, + 0, + # Boolean indices, up to 3-d for some special cases of eating up + # dimensions, also need to test all False + np.array([True, False, False]), + np.array([[True, False], [False, True]]), + np.array([[[False, False], [False, False]]]), + # Some slices: + slice(-5, 5, 2), + slice(1, 1, 100), + slice(4, -1, -2), + slice(None, None, -3), + # Some Fancy indexes: + np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast + np.array([0, 1, -2]), + np.array([[2], [0], [1]]), + np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), + np.array([2, -1], dtype=np.int8), + np.zeros([1]*31, dtype=int), # trigger too large array. + np.array([0., 1.])] # invalid datatype + # Some simpler indices that still cover a bit more + self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), + 'skip'] + # Very simple ones to fill the rest: + self.fill_indices = [slice(None, None), 0] + + def _get_multi_index(self, arr, indices): + """Mimic multi dimensional indexing. + + Parameters + ---------- + arr : ndarray + Array to be indexed. + indices : tuple of index objects + + Returns + ------- + out : ndarray + An array equivalent to the indexing operation (but always a copy). + `arr[indices]` should be identical. + no_copy : bool + Whether the indexing operation requires a copy. If this is `True`, + `np.may_share_memory(arr, arr[indices])` should be `True` (with + some exceptions for scalars and possibly 0-d arrays). + + Notes + ----- + While the function may mostly match the errors of normal indexing this + is generally not the case. + """ + in_indices = list(indices) + indices = [] + # if False, this is a fancy or boolean index + no_copy = True + # number of fancy/scalar indexes that are not consecutive + num_fancy = 0 + # number of dimensions indexed by a "fancy" index + fancy_dim = 0 + # NOTE: This is a funny twist (and probably OK to change). + # The boolean array has illegal indexes, but this is + # allowed if the broadcast fancy-indices are 0-sized. + # This variable is to catch that case. + error_unless_broadcast_to_empty = False + + # We need to handle Ellipsis and make arrays from indices, also + # check if this is fancy indexing (set no_copy). + ndim = 0 + ellipsis_pos = None # define here mostly to replace all but first. + for i, indx in enumerate(in_indices): + if indx is None: + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + no_copy = False + if indx.ndim == 0: + raise IndexError + # boolean indices can have higher dimensions + ndim += indx.ndim + fancy_dim += indx.ndim + continue + if indx is Ellipsis: + if ellipsis_pos is None: + ellipsis_pos = i + continue # do not increment ndim counter + raise IndexError + if isinstance(indx, slice): + ndim += 1 + continue + if not isinstance(indx, np.ndarray): + # This could be open for changes in numpy. + # numpy should maybe raise an error if casting to intp + # is not safe. It rejects np.array([1., 2.]) but not + # [1., 2.] as index (same for ie. np.take). + # (Note the importance of empty lists if changing this here) + try: + indx = np.array(indx, dtype=np.intp) + except ValueError: + raise IndexError + in_indices[i] = indx + elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': + raise IndexError('arrays used as indices must be of ' + 'integer (or boolean) type') + if indx.ndim != 0: + no_copy = False + ndim += 1 + fancy_dim += 1 + + if arr.ndim - ndim < 0: + # we can't take more dimensions then we have, not even for 0-d + # arrays. since a[()] makes sense, but not a[(),]. We will + # raise an error later on, unless a broadcasting error occurs + # first. + raise IndexError + + if ndim == 0 and None not in in_indices: + # Well we have no indexes or one Ellipsis. This is legal. + return arr.copy(), no_copy + + if ellipsis_pos is not None: + in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] * + (arr.ndim - ndim)) + + for ax, indx in enumerate(in_indices): + if isinstance(indx, slice): + # convert to an index array + indx = np.arange(*indx.indices(arr.shape[ax])) + indices.append(['s', indx]) + continue + elif indx is None: + # this is like taking a slice with one element from a new axis: + indices.append(['n', np.array([0], dtype=np.intp)]) + arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + if indx.shape != arr.shape[ax:ax+indx.ndim]: + raise IndexError + + try: + flat_indx = np.ravel_multi_index(np.nonzero(indx), + arr.shape[ax:ax+indx.ndim], mode='raise') + except Exception: + error_unless_broadcast_to_empty = True + # fill with 0s instead, and raise error later + flat_indx = np.array([0]*indx.sum(), dtype=np.intp) + # concatenate axis into a single one: + if indx.ndim != 0: + arr = arr.reshape((arr.shape[:ax] + + (np.prod(arr.shape[ax:ax+indx.ndim]),) + + arr.shape[ax+indx.ndim:])) + indx = flat_indx + else: + # This could be changed, a 0-d boolean index can + # make sense (even outside the 0-d indexed array case) + # Note that originally this is could be interpreted as + # integer in the full integer special case. + raise IndexError + else: + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + if indx.ndim == 0: + if indx >= arr.shape[ax] or indx < -arr.shape[ax]: + raise IndexError + if indx.ndim == 0: + # The index is a scalar. This used to be two fold, but if + # fancy indexing was active, the check was done later, + # possibly after broadcasting it away (1.7. or earlier). + # Now it is always done. + if indx >= arr.shape[ax] or indx < - arr.shape[ax]: + raise IndexError + if (len(indices) > 0 and + indices[-1][0] == 'f' and + ax != ellipsis_pos): + # NOTE: There could still have been a 0-sized Ellipsis + # between them. Checked that with ellipsis_pos. + indices[-1].append(indx) + else: + # We have a fancy index that is not after an existing one. + # NOTE: A 0-d array triggers this as well, while one may + # expect it to not trigger it, since a scalar would not be + # considered fancy indexing. + num_fancy += 1 + indices.append(['f', indx]) + + if num_fancy > 1 and not no_copy: + # We have to flush the fancy indexes left + new_indices = indices[:] + axes = list(range(arr.ndim)) + fancy_axes = [] + new_indices.insert(0, ['f']) + ni = 0 + ai = 0 + for indx in indices: + ni += 1 + if indx[0] == 'f': + new_indices[0].extend(indx[1:]) + del new_indices[ni] + ni -= 1 + for ax in range(ai, ai + len(indx[1:])): + fancy_axes.append(ax) + axes.remove(ax) + ai += len(indx) - 1 # axis we are at + indices = new_indices + # and now we need to transpose arr: + arr = arr.transpose(*(fancy_axes + axes)) + + # We only have one 'f' index now and arr is transposed accordingly. + # Now handle newaxis by reshaping... + ax = 0 + for indx in indices: + if indx[0] == 'f': + if len(indx) == 1: + continue + # First of all, reshape arr to combine fancy axes into one: + orig_shape = arr.shape + orig_slice = orig_shape[ax:ax + len(indx[1:])] + arr = arr.reshape((arr.shape[:ax] + + (np.prod(orig_slice).astype(int),) + + arr.shape[ax + len(indx[1:]):])) + + # Check if broadcasting works + res = np.broadcast(*indx[1:]) + # unfortunately the indices might be out of bounds. So check + # that first, and use mode='wrap' then. However only if + # there are any indices... + if res.size != 0: + if error_unless_broadcast_to_empty: + raise IndexError + for _indx, _size in zip(indx[1:], orig_slice): + if _indx.size == 0: + continue + if np.any(_indx >= _size) or np.any(_indx < -_size): + raise IndexError + if len(indx[1:]) == len(orig_slice): + if np.product(orig_slice) == 0: + # Work around for a crash or IndexError with 'wrap' + # in some 0-sized cases. + try: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='raise') + except Exception: + # This happens with 0-sized orig_slice (sometimes?) + # here it is a ValueError, but indexing gives a: + raise IndexError('invalid index into 0-sized') + else: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='wrap') + else: + # Maybe never happens... + raise ValueError + arr = arr.take(mi.ravel(), axis=ax) + try: + arr = arr.reshape((arr.shape[:ax] + + mi.shape + + arr.shape[ax+1:])) + except ValueError: + # too many dimensions, probably + raise IndexError + ax += mi.ndim + continue + + # If we are here, we have a 1D array for take: + arr = arr.take(indx[1], axis=ax) + ax += 1 + + return arr, no_copy + + def _check_multi_index(self, arr, index): + """Check a multi index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be a reshaped arange. + index : tuple of indexing objects + Index being tested. + """ + # Test item getting + try: + mimic_get, no_copy = self._get_multi_index(arr, index) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _check_single_index(self, arr, index): + """Check a single index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be an arange. + index : indexing object + Index being tested. Must be a single index and not a tuple + of indexing objects (see also `_check_multi_index`). + """ + try: + mimic_get, no_copy = self._get_multi_index(arr, (index,)) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _compare_index_result(self, arr, index, mimic_get, no_copy): + """Compare mimicked result to indexing result. + """ + arr = arr.copy() + indexed_arr = arr[index] + assert_array_equal(indexed_arr, mimic_get) + # Check if we got a view, unless its a 0-sized or 0-d array. + # (then its not a view, and that does not matter) + if indexed_arr.size != 0 and indexed_arr.ndim != 0: + assert_(np.may_share_memory(indexed_arr, arr) == no_copy) + # Check reference count of the original array + if HAS_REFCOUNT: + if no_copy: + # refcount increases by one: + assert_equal(sys.getrefcount(arr), 3) + else: + assert_equal(sys.getrefcount(arr), 2) + + # Test non-broadcast setitem: + b = arr.copy() + b[index] = mimic_get + 1000 + if b.size == 0: + return # nothing to compare here... + if no_copy and indexed_arr.ndim != 0: + # change indexed_arr in-place to manipulate original: + indexed_arr += 1000 + assert_array_equal(arr, b) + return + # Use the fact that the array is originally an arange: + arr.flat[indexed_arr.ravel()] += 1000 + assert_array_equal(arr, b) + + def test_boolean(self): + a = np.array(5) + assert_equal(a[np.array(True)], 5) + a[np.array(True)] = 1 + assert_equal(a, 1) + # NOTE: This is different from normal broadcasting, as + # arr[boolean_array] works like in a multi index. Which means + # it is aligned to the left. This is probably correct for + # consistency with arr[boolean_array,] also no broadcasting + # is done at all + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool),)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + + def test_multidim(self): + # Automatically test combinations with complex indexes on 2nd (or 1st) + # spot and the simple ones in one other spot. + with warnings.catch_warnings(): + # This is so that np.array(True) is not accepted in a full integer + # index, when running the file separately. + warnings.filterwarnings('error', '', DeprecationWarning) + warnings.filterwarnings('error', '', np.VisibleDeprecationWarning) + + def isskip(idx): + return isinstance(idx, str) and idx == "skip" + + for simple_pos in [0, 2, 3]: + tocheck = [self.fill_indices, self.complex_indices, + self.fill_indices, self.fill_indices] + tocheck[simple_pos] = self.simple_indices + for index in product(*tocheck): + index = tuple(i for i in index if not isskip(i)) + self._check_multi_index(self.a, index) + self._check_multi_index(self.b, index) + + # Check very simple item getting: + self._check_multi_index(self.a, (0, 0, 0, 0)) + self._check_multi_index(self.b, (0, 0, 0, 0)) + # Also check (simple cases of) too many indices: + assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + + def test_1d(self): + a = np.arange(10) + for index in self.complex_indices: + self._check_single_index(a, index) + +class TestFloatNonIntegerArgument: + """ + These test that ``TypeError`` is raised when you try to use + non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` + and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. + + """ + def test_valid_indexing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[np.array([0])] + a[[0, 0]] + a[:, [0, 0]] + a[:, 0,:] + a[:,:,:] + + def test_valid_slicing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[::] + a[0:] + a[:2] + a[0:2] + a[::2] + a[1::2] + a[:2:2] + a[1:2:2] + + def test_non_integer_argument_errors(self): + a = np.array([[5]]) + + assert_raises(TypeError, np.reshape, a, (1., 1., -1)) + assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) + assert_raises(TypeError, np.take, a, [0], 1.) + assert_raises(TypeError, np.take, a, [0], np.float64(1.)) + + def test_non_integer_sequence_multiplication(self): + # NumPy scalar sequence multiply should not work with non-integers + def mult(a, b): + return a * b + + assert_raises(TypeError, mult, [1], np.float_(3)) + # following should be OK + mult([1], np.int_(3)) + + def test_reduce_axis_float_index(self): + d = np.zeros((3,3,3)) + assert_raises(TypeError, np.min, d, 0.5) + assert_raises(TypeError, np.min, d, (0.5, 1)) + assert_raises(TypeError, np.min, d, (1, 2.2)) + assert_raises(TypeError, np.min, d, (.2, 1.2)) + + +class TestBooleanIndexing: + # Using a boolean as integer argument/indexing is an error. + def test_bool_as_int_argument_errors(self): + a = np.array([[[1]]]) + + assert_raises(TypeError, np.reshape, a, (True, -1)) + assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1)) + # Note that operator.index(np.array(True)) does not work, a boolean + # array is thus also deprecated, but not with the same message: + assert_raises(TypeError, operator.index, np.array(True)) + assert_warns(DeprecationWarning, operator.index, np.True_) + assert_raises(TypeError, np.take, args=(a, [0], False)) + + def test_boolean_indexing_weirdness(self): + # Weird boolean indexing things + a = np.ones((2, 3, 4)) + a[False, True, ...].shape == (0, 2, 3, 4) + a[True, [0, 1], True, True, [1], [[2]]] == (1, 2) + assert_raises(IndexError, lambda: a[False, [0, 1], ...]) + + + def test_boolean_indexing_fast_path(self): + # These used to either give the wrong error, or incorrectly give no + # error. + a = np.ones((3, 3)) + + # This used to incorrectly work (and give an array of shape (0,)) + idx1 = np.array([[False]*9]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along dimension 0; " + "dimension is 3 but corresponding boolean dimension is 1", + lambda: a[idx1]) + + # This used to incorrectly give a ValueError: operands could not be broadcast together + idx2 = np.array([[False]*8 + [True]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along dimension 0; " + "dimension is 3 but corresponding boolean dimension is 1", + lambda: a[idx2]) + + # This is the same as it used to be. The above two should work like this. + idx3 = np.array([[False]*10]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along dimension 0; " + "dimension is 3 but corresponding boolean dimension is 1", + lambda: a[idx3]) + + # This used to give ValueError: non-broadcastable operand + a = np.ones((1, 1, 2)) + idx = np.array([[[True], [False]]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along dimension 1; " + "dimension is 1 but corresponding boolean dimension is 2", + lambda: a[idx]) + + +class TestArrayToIndexDeprecation: + """Creating an an index from array not 0-D is an error. + + """ + def test_array_to_index_error(self): + # so no exception is expected. The raising is effectively tested above. + a = np.array([[[1]]]) + + assert_raises(TypeError, operator.index, np.array([1])) + assert_raises(TypeError, np.reshape, a, (a, -1)) + assert_raises(TypeError, np.take, a, [0], a) + + +class TestNonIntegerArrayLike: + """Tests that array_likes only valid if can safely cast to integer. + + For instance, lists give IndexError when they cannot be safely cast to + an integer. + + """ + def test_basic(self): + a = np.arange(10) + + assert_raises(IndexError, a.__getitem__, [0.5, 1.5]) + assert_raises(IndexError, a.__getitem__, (['1', '2'],)) + + # The following is valid + a.__getitem__([]) + + +class TestMultipleEllipsisError: + """An index can only have a single ellipsis. + + """ + def test_basic(self): + a = np.arange(10) + assert_raises(IndexError, lambda: a[..., ...]) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) + + +class TestCApiAccess: + def test_getitem(self): + subscript = functools.partial(array_indexing, 0) + + # 0-d arrays don't work: + assert_raises(IndexError, subscript, np.ones(()), 0) + # Out of bound values: + assert_raises(IndexError, subscript, np.ones(10), 11) + assert_raises(IndexError, subscript, np.ones(10), -11) + assert_raises(IndexError, subscript, np.ones((10, 10)), 11) + assert_raises(IndexError, subscript, np.ones((10, 10)), -11) + + a = np.arange(10) + assert_array_equal(a[4], subscript(a, 4)) + a = a.reshape(5, 2) + assert_array_equal(a[-4], subscript(a, -4)) + + def test_setitem(self): + assign = functools.partial(array_indexing, 1) + + # Deletion is impossible: + assert_raises(ValueError, assign, np.ones(10), 0) + # 0-d arrays don't work: + assert_raises(IndexError, assign, np.ones(()), 0, 0) + # Out of bound values: + assert_raises(IndexError, assign, np.ones(10), 11, 0) + assert_raises(IndexError, assign, np.ones(10), -11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) + + a = np.arange(10) + assign(a, 4, 10) + assert_(a[4] == 10) + + a = a.reshape(5, 2) + assign(a, 4, 10) + assert_array_equal(a[-1], [10, 10]) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_item_selection.py b/MLPY/Lib/site-packages/numpy/core/tests/test_item_selection.py new file mode 100644 index 0000000000000000000000000000000000000000..34c00df2635645fc73dfe69f60c1a79245b75c22 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_item_selection.py @@ -0,0 +1,86 @@ +import sys + +import numpy as np +from numpy.testing import ( + assert_, assert_raises, assert_array_equal, HAS_REFCOUNT + ) + + +class TestTake: + def test_simple(self): + a = [[1, 2], [3, 4]] + a_str = [[b'1', b'2'], [b'3', b'4']] + modes = ['raise', 'wrap', 'clip'] + indices = [-1, 4] + index_arrays = [np.empty(0, dtype=np.intp), + np.empty(tuple(), dtype=np.intp), + np.empty((1, 1), dtype=np.intp)] + real_indices = {'raise': {-1: 1, 4: IndexError}, + 'wrap': {-1: 1, 4: 0}, + 'clip': {-1: 0, 4: 1}} + # Currently all types but object, use the same function generation. + # So it should not be necessary to test all. However test also a non + # refcounted struct on top of object, which has a size that hits the + # default (non-specialized) path. + types = int, object, np.dtype([('', 'i2', 3)]) + for t in types: + # ta works, even if the array may be odd if buffer interface is used + ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) + tresult = list(ta.T.copy()) + for index_array in index_arrays: + if index_array.size != 0: + tresult[0].shape = (2,) + index_array.shape + tresult[1].shape = (2,) + index_array.shape + for mode in modes: + for index in indices: + real_index = real_indices[mode][index] + if real_index is IndexError and index_array.size != 0: + index_array.put(0, index) + assert_raises(IndexError, ta.take, index_array, + mode=mode, axis=1) + elif index_array.size != 0: + index_array.put(0, index) + res = ta.take(index_array, mode=mode, axis=1) + assert_array_equal(res, tresult[real_index]) + else: + res = ta.take(index_array, mode=mode, axis=1) + assert_(res.shape == (2,) + index_array.shape) + + def test_refcounting(self): + objects = [object() for i in range(10)] + for mode in ('raise', 'clip', 'wrap'): + a = np.array(objects) + b = np.array([2, 2, 4, 5, 3, 5]) + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == 3 for o in objects)) + # not contiguous, example: + a = np.array(objects * 2)[::2] + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == 3 for o in objects)) + + def test_unicode_mode(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.take, 5, mode=k) + + def test_empty_partition(self): + # In reference to github issue #6530 + a_original = np.array([0, 2, 4, 6, 8, 10]) + a = a_original.copy() + + # An empty partition should be a successful no-op + a.partition(np.array([], dtype=np.int16)) + + assert_array_equal(a, a_original) + + def test_empty_argpartition(self): + # In reference to github issue #6530 + a = np.array([0, 2, 4, 6, 8, 10]) + a = a.argpartition(np.array([], dtype=np.int16)) + + b = np.array([0, 1, 2, 3, 4, 5]) + assert_array_equal(a, b) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_longdouble.py b/MLPY/Lib/site-packages/numpy/core/tests/test_longdouble.py new file mode 100644 index 0000000000000000000000000000000000000000..9038c14baf42cc714714a8d87e01f45a7fe7a61b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_longdouble.py @@ -0,0 +1,369 @@ +import warnings +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, + temppath, + ) +from numpy.core.tests._locales import CommaDecimalPointLocale + +LD_INFO = np.finfo(np.longdouble) +longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) + + +_o = 1 + LD_INFO.eps +string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o))) +del _o + + +def test_scalar_extraction(): + """Confirm that extracting a value doesn't convert to python float""" + o = 1 + LD_INFO.eps + a = np.array([o, o, o]) + assert_equal(a[1], o) + + +# Conversions string -> long double + +# 0.1 not exactly representable in base 2 floating point. +repr_precision = len(repr(np.longdouble(0.1))) +# +2 from macro block starting around line 842 in scalartypes.c.src. +@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, + reason="repr precision not enough to show eps") +def test_repr_roundtrip(): + # We will only see eps in repr if within printing precision. + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o)) + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_repr_roundtrip_bytes(): + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(repr(o).encode("ascii")), o) + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes)) +def test_array_and_stringlike_roundtrip(strtype): + """ + Test that string representations of long-double roundtrip both + for array casting and scalar coercion, see also gh-15608. + """ + o = 1 + LD_INFO.eps + + if strtype in (np.bytes_, bytes): + o_str = strtype(repr(o).encode("ascii")) + else: + o_str = strtype(repr(o)) + + # Test that `o` is correctly coerced from the string-like + assert o == np.longdouble(o_str) + + # Test that arrays also roundtrip correctly: + o_strarr = np.asarray([o] * 3, dtype=strtype) + assert (o == o_strarr.astype(np.longdouble)).all() + + # And array coercion and casting to string give the same as scalar repr: + assert (o_strarr == o_str).all() + assert (np.asarray([o] * 3).astype(strtype) == o_str).all() + + +def test_bogus_string(): + assert_raises(ValueError, np.longdouble, "spam") + assert_raises(ValueError, np.longdouble, "1.0 flub") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_fromstring(): + o = 1 + LD_INFO.eps + s = (" " + repr(o))*5 + a = np.array([o]*5) + assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, + err_msg="reading '%s'" % s) + + +def test_fromstring_complex(): + for ctype in ["complex", "cdouble", "cfloat"]: + # Check spacing between separator + assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype), + np.array([1., 2., 3., 4.])) + # Real component not specified + assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype), + np.array([1.j, -2.j, 3.j, 40.j])) + # Both components specified + assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), + np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + # Spaces at wrong places + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1+j", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1+", dtype=ctype, sep=","), + np.array([1.])) + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","), + np.array([1j])) + + +def test_fromstring_bogus(): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), + np.array([1., 2., 3.])) + + +def test_fromstring_empty(): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("xxxxx", sep="x"), + np.array([])) + + +def test_fromstring_missing(): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), + np.array([1])) + + +class TestFileBased: + + ldbl = 1 + LD_INFO.eps + tgt = np.array([ldbl]*5) + out = ''.join([repr(t) + '\n' for t in tgt]) + + def test_fromfile_bogus(self): + with temppath() as path: + with open(path, 'wt') as f: + f.write("1. 2. 3. flop 4.\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=float, sep=" ") + assert_equal(res, np.array([1., 2., 3.])) + + def test_fromfile_complex(self): + for ctype in ["complex", "cdouble", "cfloat"]: + # Check spacing between separator and only real component specified + with temppath() as path: + with open(path, 'wt') as f: + f.write("1, 2 , 3 ,4\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1., 2., 3., 4.])) + + # Real component not specified + with temppath() as path: + with open(path, 'wt') as f: + f.write("1j, -2j, 3j, 4e1j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j])) + + # Both components specified + with temppath() as path: + with open(path, 'wt') as f: + f.write("1+1j,2-2j, -3+3j, -4e1+4j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'wt') as f: + f.write("1+2 j,3\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'wt') as f: + f.write("1+ 2j,3\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'wt') as f: + f.write("1 +2j,3\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'wt') as f: + f.write("1+j\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'wt') as f: + f.write("1+\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'wt') as f: + f.write("1j+1\n") + + with assert_warns(DeprecationWarning): + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.j])) + + + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_fromfile(self): + with temppath() as path: + with open(path, 'wt') as f: + f.write(self.out) + res = np.fromfile(path, dtype=np.longdouble, sep="\n") + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_genfromtxt(self): + with temppath() as path: + with open(path, 'wt') as f: + f.write(self.out) + res = np.genfromtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_loadtxt(self): + with temppath() as path: + with open(path, 'wt') as f: + f.write(self.out) + res = np.loadtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_tofile_roundtrip(self): + with temppath() as path: + self.tgt.tofile(path, sep=" ") + res = np.fromfile(path, dtype=np.longdouble, sep=" ") + assert_equal(res, self.tgt) + + +# Conversions long double -> string + + +def test_repr_exact(): + o = 1 + LD_INFO.eps + assert_(repr(o) != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_format(): + o = 1 + LD_INFO.eps + assert_("{0:.40g}".format(o) != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_percent(): + o = 1 + LD_INFO.eps + assert_("%.40g" % o != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, + reason="array repr problem") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_array_repr(): + o = 1 + LD_INFO.eps + a = np.array([o]) + b = np.array([1], dtype=np.longdouble) + if not np.all(a != b): + raise ValueError("precision loss creating arrays") + assert_(repr(a) != repr(b)) + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_repr_roundtrip_foreign(self): + o = 1.5 + assert_equal(o, np.longdouble(repr(o))) + + def test_fromstring_foreign_repr(self): + f = 1.234 + a = np.fromstring(repr(f), dtype=float, sep=" ") + assert_equal(a[0], f) + + def test_fromstring_best_effort_float(self): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1,234", dtype=float, sep=" "), + np.array([1.])) + + def test_fromstring_best_effort(self): + with assert_warns(DeprecationWarning): + assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), + np.array([1.])) + + def test_fromstring_foreign(self): + s = "1.234" + a = np.fromstring(s, dtype=np.longdouble, sep=" ") + assert_equal(a[0], np.longdouble(s)) + + def test_fromstring_foreign_sep(self): + a = np.array([1, 2, 3, 4]) + b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") + assert_array_equal(a, b) + + def test_fromstring_foreign_value(self): + with assert_warns(DeprecationWarning): + b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") + assert_array_equal(b[0], 1) + + +@pytest.mark.parametrize("int_val", [ + # cases discussed in gh-10723 + # and gh-9968 + 2 ** 1024, 0]) +def test_longdouble_from_int(int_val): + # for issue gh-9968 + str_val = str(int_val) + # we'll expect a RuntimeWarning on platforms + # with np.longdouble equivalent to np.double + # for large integer input + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + # can be inf==inf on some platforms + assert np.longdouble(int_val) == np.longdouble(str_val) + # we can't directly compare the int and + # max longdouble value on all platforms + if np.allclose(np.finfo(np.longdouble).max, + np.finfo(np.double).max) and w: + assert w[0].category is RuntimeWarning + +@pytest.mark.parametrize("bool_val", [ + True, False]) +def test_longdouble_from_bool(bool_val): + assert np.longdouble(bool_val) == np.longdouble(int(bool_val)) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_machar.py b/MLPY/Lib/site-packages/numpy/core/tests/test_machar.py new file mode 100644 index 0000000000000000000000000000000000000000..291fa35f2e0c0088b4063b01b40b008f45404b4f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_machar.py @@ -0,0 +1,30 @@ +""" +Test machar. Given recent changes to hardcode type data, we might want to get +rid of both MachAr and this test at some point. + +""" +from numpy.core.machar import MachAr +import numpy.core.numerictypes as ntypes +from numpy import errstate, array + + +class TestMachAr: + def _run_machar_highprec(self): + # Instantiate MachAr instance with high enough precision to cause + # underflow + try: + hiprec = ntypes.float96 + MachAr(lambda v: array(v, hiprec)) + except AttributeError: + # Fixme, this needs to raise a 'skip' exception. + "Skipping test: no ntypes.float96 available on this platform." + + def test_underlow(self): + # Regression test for #759: + # instantiating MachAr for dtype = np.float96 raises spurious warning. + with errstate(all='raise'): + try: + self._run_machar_highprec() + except FloatingPointError as e: + msg = "Caught %s exception, should not have been raised." % e + raise AssertionError(msg) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_mem_overlap.py b/MLPY/Lib/site-packages/numpy/core/tests/test_mem_overlap.py new file mode 100644 index 0000000000000000000000000000000000000000..03f36b88d4ccecb9de9dca94991d3f50bae53df9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_mem_overlap.py @@ -0,0 +1,931 @@ +import itertools +import pytest + +import numpy as np +from numpy.core._multiarray_tests import solve_diophantine, internal_overlap +from numpy.core import _umath_tests +from numpy.lib.stride_tricks import as_strided +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_array_equal + ) + + +ndims = 2 +size = 10 +shape = tuple([size] * ndims) + +MAY_SHARE_BOUNDS = 0 +MAY_SHARE_EXACT = -1 + + +def _indices_for_nelems(nelems): + """Returns slices of length nelems, from start onwards, in direction sign.""" + + if nelems == 0: + return [size // 2] # int index + + res = [] + for step in (1, 2): + for sign in (-1, 1): + start = size // 2 - nelems * step * sign // 2 + stop = start + nelems * step * sign + res.append(slice(start, stop, step * sign)) + + return res + + +def _indices_for_axis(): + """Returns (src, dst) pairs of indices.""" + + res = [] + for nelems in (0, 2, 3): + ind = _indices_for_nelems(nelems) + res.extend(itertools.product(ind, ind)) # all assignments of size "nelems" + + return res + + +def _indices(ndims): + """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" + + ind = _indices_for_axis() + return itertools.product(ind, repeat=ndims) + + +def _check_assignment(srcidx, dstidx): + """Check assignment arr[dstidx] = arr[srcidx] works.""" + + arr = np.arange(np.product(shape)).reshape(shape) + + cpy = arr.copy() + + cpy[dstidx] = arr[srcidx] + arr[dstidx] = arr[srcidx] + + assert_(np.all(arr == cpy), + 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)) + + +def test_overlapping_assignments(): + # Test automatically generated assignments which overlap in memory. + + inds = _indices(ndims) + + for ind in inds: + srcidx = tuple([a[0] for a in ind]) + dstidx = tuple([a[1] for a in ind]) + + _check_assignment(srcidx, dstidx) + + +@pytest.mark.slow +def test_diophantine_fuzz(): + # Fuzz test the diophantine solver + rng = np.random.RandomState(1234) + + max_int = np.iinfo(np.intp).max + + for ndim in range(10): + feasible_count = 0 + infeasible_count = 0 + + min_count = 500//(ndim + 1) + + while min(feasible_count, infeasible_count) < min_count: + # Ensure big and small integer problems + A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 + U_max = rng.randint(0, 11, dtype=np.intp)**6 + + A_max = min(max_int, A_max) + U_max = min(max_int-1, U_max) + + A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp)) + for j in range(ndim)) + U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp)) + for j in range(ndim)) + + b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U))) + b = rng.randint(-1, b_ub+2, dtype=np.intp) + + if ndim == 0 and feasible_count < min_count: + b = 0 + + X = solve_diophantine(A, U, b) + + if X is None: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is None, (A, U, b, X_simplified)) + + # Check no solution exists (provided the problem is + # small enough so that brute force checking doesn't + # take too long) + ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U)) + + size = 1 + for r in ranges: + size *= len(r) + if size < 100000: + assert_(not any(sum(w) == b for w in itertools.product(*ranges))) + infeasible_count += 1 + else: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is not None, (A, U, b, X_simplified)) + + # Check validity + assert_(sum(a*x for a, x in zip(A, X)) == b) + assert_(all(0 <= x <= ub for x, ub in zip(X, U))) + feasible_count += 1 + + +def test_diophantine_overflow(): + # Smoke test integer overflow detection + max_intp = np.iinfo(np.intp).max + max_int64 = np.iinfo(np.int64).max + + if max_int64 <= max_intp: + # Check that the algorithm works internally in 128-bit; + # solving this problem requires large intermediate numbers + A = (max_int64//2, max_int64//2 - 10) + U = (max_int64//2, max_int64//2 - 10) + b = 2*(max_int64//2) - 10 + + assert_equal(solve_diophantine(A, U, b), (1, 1)) + + +def check_may_share_memory_exact(a, b): + got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + assert_equal(np.may_share_memory(a, b), + np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) + + a.fill(0) + b.fill(0) + a.fill(1) + exact = b.any() + + err_msg = "" + if got != exact: + err_msg = " " + "\n ".join([ + "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],), + "shape_a = %r" % (a.shape,), + "shape_b = %r" % (b.shape,), + "strides_a = %r" % (a.strides,), + "strides_b = %r" % (b.strides,), + "size_a = %r" % (a.size,), + "size_b = %r" % (b.size,) + ]) + + assert_equal(got, exact, err_msg=err_msg) + + +def test_may_share_memory_manual(): + # Manual test cases for may_share_memory + + # Base arrays + xs0 = [ + np.zeros([13, 21, 23, 22], dtype=np.int8), + np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:] + ] + + # Generate all negative stride combinations + xs = [] + for x in xs0: + for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)): + xp = x[ss] + xs.append(xp) + + for x in xs: + # The default is a simple extent check + assert_(np.may_share_memory(x[:,0,:], x[:,1,:])) + assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None)) + + # Exact checks + check_may_share_memory_exact(x[:,0,:], x[:,1,:]) + check_may_share_memory_exact(x[:,::7], x[:,3::3]) + + try: + xp = x.ravel() + if xp.flags.owndata: + continue + xp = xp.view(np.int16) + except ValueError: + continue + + # 0-size arrays cannot overlap + check_may_share_memory_exact(x.ravel()[6:6], + xp.reshape(13, 21, 23, 11)[:,::7]) + + # Test itemsize is dealt with + check_may_share_memory_exact(x[:,::7], + xp.reshape(13, 21, 23, 11)) + check_may_share_memory_exact(x[:,::7], + xp.reshape(13, 21, 23, 11)[:,3::3]) + check_may_share_memory_exact(x.ravel()[6:7], + xp.reshape(13, 21, 23, 11)[:,::7]) + + # Check unit size + x = np.zeros([1], dtype=np.int8) + check_may_share_memory_exact(x, x) + check_may_share_memory_exact(x, x.copy()) + + +def iter_random_view_pairs(x, same_steps=True, equal_size=False): + rng = np.random.RandomState(1234) + + if equal_size and same_steps: + raise ValueError() + + def random_slice(n, step): + start = rng.randint(0, n+1, dtype=np.intp) + stop = rng.randint(start, n+1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + def random_slice_fixed_size(n, step, size): + start = rng.randint(0, n+1 - size*step) + stop = start + (size-1)*step + 1 + if rng.randint(0, 2) == 0: + stop, start = start-1, stop-1 + if stop < 0: + stop = None + step *= -1 + return slice(start, stop, step) + + # First a few regular views + yield x, x + for j in range(1, 7, 3): + yield x[j:], x[:-j] + yield x[...,j:], x[...,:-j] + + # An array with zero stride internal overlap + strides = list(x.strides) + strides[0] = 0 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # An array with non-zero stride internal overlap + strides = list(x.strides) + if strides[0] > 1: + strides[0] = 1 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # Then discontiguous views + while True: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + + t1 = np.arange(x.ndim) + rng.shuffle(t1) + + if equal_size: + t2 = t1 + else: + t2 = np.arange(x.ndim) + rng.shuffle(t2) + + a = x[s1] + + if equal_size: + if a.size == 0: + continue + + steps2 = tuple(rng.randint(1, max(2, p//(1+pa))) + if rng.randint(0, 5) == 0 else 1 + for p, s, pa in zip(x.shape, s1, a.shape)) + s2 = tuple(random_slice_fixed_size(p, s, pa) + for p, s, pa in zip(x.shape, steps2, a.shape)) + elif same_steps: + steps2 = steps + else: + steps2 = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + + if not equal_size: + s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) + + a = a.transpose(t1) + b = x[s2].transpose(t2) + + yield a, b + + +def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): + # Check that overlap problems with common strides are solved with + # little work. + x = np.zeros([17,34,71,97], dtype=np.int16) + + feasible = 0 + infeasible = 0 + + pair_iter = iter_random_view_pairs(x, same_steps) + + while min(feasible, infeasible) < min_count: + a, b = next(pair_iter) + + bounds_overlap = np.may_share_memory(a, b) + may_share_answer = np.may_share_memory(a, b) + easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) + exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + if easy_answer != exact_answer: + # assert_equal is slow... + assert_equal(easy_answer, exact_answer) + + if may_share_answer != bounds_overlap: + assert_equal(may_share_answer, bounds_overlap) + + if bounds_overlap: + if exact_answer: + feasible += 1 + else: + infeasible += 1 + + +@pytest.mark.slow +def test_may_share_memory_easy_fuzz(): + # Check that overlap problems with common strides are always + # solved with little work. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, + same_steps=True, + min_count=2000) + + +@pytest.mark.slow +def test_may_share_memory_harder_fuzz(): + # Overlap problems with not necessarily common strides take more + # work. + # + # The work bound below can't be reduced much. Harder problems can + # also exist but not be detected here, as the set of problems + # comes from RNG. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2, + same_steps=False, + min_count=2000) + + +def test_shares_memory_api(): + x = np.zeros([4, 5, 6], dtype=np.int8) + + assert_equal(np.shares_memory(x, x), True) + assert_equal(np.shares_memory(x, x.copy()), False) + + a = x[:,::2,::3] + b = x[:,::3,::2] + assert_equal(np.shares_memory(a, b), True) + assert_equal(np.shares_memory(a, b, max_work=None), True) + assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1) + + +def test_may_share_memory_bad_max_work(): + x = np.zeros([1]) + assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) + assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) + + +def test_internal_overlap_diophantine(): + def check(A, U, exists=None): + X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) + + if exists is None: + exists = (X is not None) + + if X is not None: + assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U))) + assert_(all(0 <= x <= u for x, u in zip(X, U))) + assert_(any(x != u//2 for x, u in zip(X, U))) + + if exists: + assert_(X is not None, repr(X)) + else: + assert_(X is None, repr(X)) + + # Smoke tests + check((3, 2), (2*2, 3*2), exists=True) + check((3*2, 2), (15*2, (3-1)*2), exists=False) + + +def test_internal_overlap_slices(): + # Slicing an array never generates internal overlap + + x = np.zeros([17,34,71,97], dtype=np.int16) + + rng = np.random.RandomState(1234) + + def random_slice(n, step): + start = rng.randint(0, n+1, dtype=np.intp) + stop = rng.randint(start, n+1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + cases = 0 + min_count = 5000 + + while cases < min_count: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + t1 = np.arange(x.ndim) + rng.shuffle(t1) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + a = x[s1].transpose(t1) + + assert_(not internal_overlap(a)) + cases += 1 + + +def check_internal_overlap(a, manual_expected=None): + got = internal_overlap(a) + + # Brute-force check + m = set() + ranges = tuple(range(n) for n in a.shape) + for v in itertools.product(*ranges): + offset = sum(s*w for s, w in zip(a.strides, v)) + if offset in m: + expected = True + break + else: + m.add(offset) + else: + expected = False + + # Compare + if got != expected: + assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) + if manual_expected is not None and expected != manual_expected: + assert_equal(expected, manual_expected) + return got + + +def test_internal_overlap_manual(): + # Stride tricks can construct arrays with internal overlap + + # We don't care about memory bounds, the array is not + # read/write accessed + x = np.arange(1).astype(np.int8) + + # Check low-dimensional special cases + + check_internal_overlap(x, False) # 1-dim + check_internal_overlap(x.reshape([]), False) # 0-dim + + a = as_strided(x, strides=(3, 4), shape=(4, 4)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(3, 4), shape=(5, 4)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0,), shape=(0,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(1,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(2,)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(87, 22)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(1, 22)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0, -9993), shape=(0, 22)) + check_internal_overlap(a, False) + + +def test_internal_overlap_fuzz(): + # Fuzz check; the brute-force check is fairly slow + + x = np.arange(1).astype(np.int8) + + overlap = 0 + no_overlap = 0 + min_count = 100 + + rng = np.random.RandomState(1234) + + while min(overlap, no_overlap) < min_count: + ndim = rng.randint(1, 4, dtype=np.intp) + + strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) + for j in range(ndim)) + shape = tuple(rng.randint(1, 30, dtype=np.intp) + for j in range(ndim)) + + a = as_strided(x, strides=strides, shape=shape) + result = check_internal_overlap(a) + + if result: + overlap += 1 + else: + no_overlap += 1 + + +def test_non_ndarray_inputs(): + # Regression check for gh-5604 + + class MyArray: + def __init__(self, data): + self.data = data + + @property + def __array_interface__(self): + return self.data.__array_interface__ + + class MyArray2: + def __init__(self, data): + self.data = data + + def __array__(self): + return self.data + + for cls in [MyArray, MyArray2]: + x = np.arange(5) + + assert_(np.may_share_memory(cls(x[::2]), x[1::2])) + assert_(not np.shares_memory(cls(x[::2]), x[1::2])) + + assert_(np.shares_memory(cls(x[1::3]), x[::2])) + assert_(np.may_share_memory(cls(x[1::3]), x[::2])) + + +def view_element_first_byte(x): + """Construct an array viewing the first byte of each element of `x`""" + from numpy.lib.stride_tricks import DummyArray + interface = dict(x.__array_interface__) + interface['typestr'] = '|b1' + interface['descr'] = [('', '|b1')] + return np.asarray(DummyArray(interface, x)) + + +def assert_copy_equivalent(operation, args, out, **kwargs): + """ + Check that operation(*args, out=out) produces results + equivalent to out[...] = operation(*args, out=out.copy()) + """ + + kwargs['out'] = out + kwargs2 = dict(kwargs) + kwargs2['out'] = out.copy() + + out_orig = out.copy() + out[...] = operation(*args, **kwargs2) + expected = out.copy() + out[...] = out_orig + + got = operation(*args, **kwargs).copy() + + if (got != expected).any(): + assert_equal(got, expected) + + +class TestUFunc: + """ + Test ufunc call memory overlap handling + """ + + def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, + count=5000): + shapes = [7, 13, 8, 21, 29, 32] + + rng = np.random.RandomState(1234) + + for ndim in range(1, 6): + x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = count // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + a_orig = a.copy() + b_orig = b.copy() + + if get_out_axis_size is None: + assert_copy_equivalent(operation, [a], out=b) + + if np.shares_memory(a, b): + overlapping += 1 + else: + for axis in itertools.chain(range(ndim), [None]): + a[...] = a_orig + b[...] = b_orig + + # Determine size for reduction axis (None if scalar) + outsize, scalarize = get_out_axis_size(a, b, axis) + if outsize == 'skip': + continue + + # Slice b to get an output array of the correct size + sl = [slice(None)] * ndim + if axis is None: + if outsize is None: + sl = [slice(0, 1)] + [0]*(ndim - 1) + else: + sl = [slice(0, outsize)] + [0]*(ndim - 1) + else: + if outsize is None: + k = b.shape[axis]//2 + if ndim == 1: + sl[axis] = slice(k, k + 1) + else: + sl[axis] = k + else: + assert b.shape[axis] >= outsize + sl[axis] = slice(0, outsize) + b_out = b[tuple(sl)] + + if scalarize: + b_out = b_out.reshape([]) + + if np.shares_memory(a, b_out): + overlapping += 1 + + # Check result + assert_copy_equivalent(operation, [a], out=b_out, axis=axis) + + @pytest.mark.slow + def test_unary_ufunc_call_fuzz(self): + self.check_unary_fuzz(np.invert, None, np.int16) + + @pytest.mark.slow + def test_unary_ufunc_call_complex_fuzz(self): + # Complex typically has a smaller alignment than itemsize + self.check_unary_fuzz(np.negative, None, np.complex128, count=500) + + def test_binary_ufunc_accumulate_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # accumulate doesn't support this + else: + return a.shape[axis], False + + self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduce_fuzz(self): + def get_out_axis_size(a, b, axis): + return None, (axis is None or a.ndim == 1) + + self.check_unary_fuzz(np.add.reduce, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # reduceat doesn't support this + else: + return a.shape[axis], False + + def do_reduceat(a, out, axis): + if axis is None: + size = len(a) + step = size//len(out) + else: + size = a.shape[axis] + step = a.shape[axis] // out.shape[axis] + idx = np.arange(0, size, step) + return np.add.reduceat(a, idx, out=out, axis=axis) + + self.check_unary_fuzz(do_reduceat, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_manual(self): + def check(ufunc, a, ind, out): + c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) + c2 = ufunc.reduceat(a, ind, out=out) + assert_array_equal(c1, c2) + + # Exactly same input/output arrays + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1].copy(), a) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1], a) + + @pytest.mark.slow + def test_unary_gufunc_fuzz(self): + shapes = [7, 13, 8, 21, 29, 32] + gufunc = _umath_tests.euclidean_pdist + + rng = np.random.RandomState(1234) + + for ndim in range(2, 6): + x = rng.rand(*shapes[:ndim]) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = 500 // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: + continue + + # Ensure the shapes are so that euclidean_pdist is happy + if b.shape[-1] > b.shape[-2]: + b = b[...,0,:] + else: + b = b[...,:,0] + + n = a.shape[-2] + p = n * (n - 1) // 2 + if p <= b.shape[-1] and p > 0: + b = b[...,:p] + else: + n = max(2, int(np.sqrt(b.shape[-1]))//2) + p = n * (n - 1) // 2 + a = a[...,:n,:] + b = b[...,:p] + + # Call + if np.shares_memory(a, b): + overlapping += 1 + + with np.errstate(over='ignore', invalid='ignore'): + assert_copy_equivalent(gufunc, [a], out=b) + + def test_ufunc_at_manual(self): + def check(ufunc, a, ind, b=None): + a0 = a.copy() + if b is None: + ufunc.at(a0, ind.copy()) + c1 = a0.copy() + ufunc.at(a, ind) + c2 = a.copy() + else: + ufunc.at(a0, ind.copy(), b.copy()) + c1 = a0.copy() + ufunc.at(a, ind, b) + c2 = a.copy() + assert_array_equal(c1, c2) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.invert, a[::-1], a) + + # Overlap with second data array + a = np.arange(100, dtype=np.int16) + ind = np.arange(0, 100, 2, dtype=np.int16) + check(np.add, a, ind, a[25:75]) + + def test_unary_ufunc_1d_manual(self): + # Exercise ufunc fast-paths (that avoid creation of an `np.nditer`) + + def check(a, b): + a_orig = a.copy() + b_orig = b.copy() + + b0 = b.copy() + c1 = ufunc(a, out=b0) + c2 = ufunc(a, out=b) + assert_array_equal(c1, c2) + + # Trigger "fancy ufunc loop" code path + mask = view_element_first_byte(b).view(np.bool_) + + a[...] = a_orig + b[...] = b_orig + c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() + + a[...] = a_orig + b[...] = b_orig + c2 = ufunc(a, out=b, where=mask.copy()).copy() + + # Also, mask overlapping with output + a[...] = a_orig + b[...] = b_orig + c3 = ufunc(a, out=b, where=mask).copy() + + assert_array_equal(c1, c2) + assert_array_equal(c1, c3) + + dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, np.complex64, np.complex128] + dtypes = [np.dtype(x) for x in dtypes] + + for dtype in dtypes: + if np.issubdtype(dtype, np.integer): + ufunc = np.invert + else: + ufunc = np.reciprocal + + n = 1000 + k = 10 + indices = [ + np.index_exp[:n], + np.index_exp[k:k+n], + np.index_exp[n-1::-1], + np.index_exp[k+n-1:k-1:-1], + np.index_exp[:2*n:2], + np.index_exp[k:k+2*n:2], + np.index_exp[2*n-1::-2], + np.index_exp[k+2*n-1:k-1:-2], + ] + + for xi, yi in itertools.product(indices, indices): + v = np.arange(1, 1 + n*2 + k, dtype=dtype) + x = v[xi] + y = v[yi] + + with np.errstate(all='ignore'): + check(x, y) + + # Scalar cases + check(x[:1], y) + check(x[-1:], y) + check(x[:1].reshape([]), y) + check(x[-1:].reshape([]), y) + + def test_unary_ufunc_where_same(self): + # Check behavior at wheremask overlap + ufunc = np.invert + + def check(a, out, mask): + c1 = ufunc(a, out=out.copy(), where=mask.copy()) + c2 = ufunc(a, out=out, where=mask) + assert_array_equal(c1, c2) + + # Check behavior with same input and output arrays + x = np.arange(100).astype(np.bool_) + check(x, x, x) + check(x, x.copy(), x) + check(x, x, x.copy()) + + @pytest.mark.slow + def test_binary_ufunc_1d_manual(self): + ufunc = np.add + + def check(a, b, c): + c0 = c.copy() + c1 = ufunc(a, b, out=c0) + c2 = ufunc(a, b, out=c) + assert_array_equal(c1, c2) + + for dtype in [np.int8, np.int16, np.int32, np.int64, + np.float32, np.float64, np.complex64, np.complex128]: + # Check different data dependency orders + + n = 1000 + k = 10 + + indices = [] + for p in [1, 2]: + indices.extend([ + np.index_exp[:p*n:p], + np.index_exp[k:k+p*n:p], + np.index_exp[p*n-1::-p], + np.index_exp[k+p*n-1:k-1:-p], + ]) + + for x, y, z in itertools.product(indices, indices, indices): + v = np.arange(6*n).astype(dtype) + x = v[x] + y = v[y] + z = v[z] + + check(x, y, z) + + # Scalar cases + check(x[:1], y, z) + check(x[-1:], y, z) + check(x[:1].reshape([]), y, z) + check(x[-1:].reshape([]), y, z) + check(x, y[:1], z) + check(x, y[-1:], z) + check(x, y[:1].reshape([]), z) + check(x, y[-1:].reshape([]), z) + + def test_inplace_op_simple_manual(self): + rng = np.random.RandomState(1234) + x = rng.rand(200, 200) # bigger than bufsize + + x += x.T + assert_array_equal(x - x.T, 0) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_memmap.py b/MLPY/Lib/site-packages/numpy/core/tests/test_memmap.py new file mode 100644 index 0000000000000000000000000000000000000000..d7a919f1a93f32abd82d598172738abaf6823e62 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_memmap.py @@ -0,0 +1,215 @@ +import sys +import os +import mmap +import pytest +from pathlib import Path +from tempfile import NamedTemporaryFile, TemporaryFile + +from numpy import ( + memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) + +from numpy import arange, allclose, asarray +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY, + break_cycles + ) + +class TestMemmap: + def setup(self): + self.tmpfp = NamedTemporaryFile(prefix='mmap') + self.shape = (3, 4) + self.dtype = 'float32' + self.data = arange(12, dtype=self.dtype) + self.data.resize(self.shape) + + def teardown(self): + self.tmpfp.close() + self.data = None + if IS_PYPY: + break_cycles() + break_cycles() + + def test_roundtrip(self): + # Write data to file + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp # Test __del__ machinery, which handles cleanup + + # Read data back from file + newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', + shape=self.shape) + assert_(allclose(self.data, newfp)) + assert_array_equal(self.data, newfp) + assert_equal(newfp.flags.writeable, False) + + def test_open_with_filename(self, tmp_path): + tmpname = tmp_path / 'mmap' + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp + + def test_unnamed_file(self): + with TemporaryFile() as f: + fp = memmap(f, dtype=self.dtype, shape=self.shape) + del fp + + def test_attributes(self): + offset = 1 + mode = "w+" + fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, + shape=self.shape, offset=offset) + assert_equal(offset, fp.offset) + assert_equal(mode, fp.mode) + del fp + + def test_filename(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + abspath = Path(os.path.abspath(tmpname)) + fp[:] = self.data[:] + assert_equal(abspath, fp.filename) + b = fp[:1] + assert_equal(abspath, b.filename) + del b + del fp + + def test_path(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', + shape=self.shape) + # os.path.realpath does not resolve symlinks on Windows + # see: https://bugs.python.org/issue9949 + # use Path.resolve, just as memmap class does internally + abspath = str(Path(tmpname).resolve()) + fp[:] = self.data[:] + assert_equal(abspath, str(fp.filename.resolve())) + b = fp[:1] + assert_equal(abspath, str(b.filename.resolve())) + del b + del fp + + def test_filename_fileobj(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", + shape=self.shape) + assert_equal(fp.filename, self.tmpfp.name) + + @pytest.mark.skipif(sys.platform == 'gnu0', + reason="Known to fail on hurd") + def test_flush(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + assert_equal(fp[0], self.data[0]) + fp.flush() + + def test_del(self): + # Make sure a view does not delete the underlying mmap + fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp_base[0] = 5 + fp_view = fp_base[0:1] + assert_equal(fp_view[0], 5) + del fp_view + # Should still be able to access and assign values after + # deleting the view + assert_equal(fp_base[0], 5) + fp_base[0] = 6 + assert_equal(fp_base[0], 6) + + def test_arithmetic_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = (fp + 10) + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_indexing_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = fp[(1, 2), (2, 3)] + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_slicing_keeps_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + assert_(fp[:2, :2]._mmap is fp._mmap) + + def test_view(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + new1 = fp.view() + new2 = new1.view() + assert_(new1.base is fp) + assert_(new2.base is fp) + new_array = asarray(fp) + assert_(new_array.base is fp) + + def test_ufunc_return_ndarray(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + with suppress_warnings() as sup: + sup.filter(FutureWarning, "np.average currently does not preserve") + for unary_op in [sum, average, product]: + result = unary_op(fp) + assert_(isscalar(result)) + assert_(result.__class__ is self.data[0, 0].__class__) + + assert_(unary_op(fp, axis=0).__class__ is ndarray) + assert_(unary_op(fp, axis=1).__class__ is ndarray) + + for binary_op in [add, subtract, multiply]: + assert_(binary_op(fp, self.data).__class__ is ndarray) + assert_(binary_op(self.data, fp).__class__ is ndarray) + assert_(binary_op(fp, fp).__class__ is ndarray) + + fp += 1 + assert(fp.__class__ is memmap) + add(fp, 1, out=fp) + assert(fp.__class__ is memmap) + + def test_getitem(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + assert_(fp[1:, :-1].__class__ is memmap) + # Fancy indexing returns a copy that is not memmapped + assert_(fp[[0, 1]].__class__ is ndarray) + + def test_memmap_subclass(self): + class MemmapSubClass(memmap): + pass + + fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + # We keep previous behavior for subclasses of memmap, i.e. the + # ufunc and __getitem__ output is never turned into a ndarray + assert_(sum(fp, axis=0).__class__ is MemmapSubClass) + assert_(sum(fp).__class__ is MemmapSubClass) + assert_(fp[1:, :-1].__class__ is MemmapSubClass) + assert(fp[[0, 1]].__class__ is MemmapSubClass) + + def test_mmap_offset_greater_than_allocation_granularity(self): + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_(fp.offset == offset) + + def test_no_shape(self): + self.tmpfp.write(b'a'*16) + mm = memmap(self.tmpfp, dtype='float64') + assert_equal(mm.shape, (2,)) + + def test_empty_array(self): + # gh-12653 + with pytest.raises(ValueError, match='empty file'): + memmap(self.tmpfp, shape=(0,4), mode='w+') + + self.tmpfp.write(b'\0') + + # ok now the file is not empty + memmap(self.tmpfp, shape=(0,4), mode='w+') diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_multiarray.py b/MLPY/Lib/site-packages/numpy/core/tests/test_multiarray.py new file mode 100644 index 0000000000000000000000000000000000000000..2b2666088e4afe539d6753fe82777c6ce13fb182 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_multiarray.py @@ -0,0 +1,8951 @@ +import collections.abc +import tempfile +import sys +import warnings +import operator +import io +import itertools +import functools +import ctypes +import os +import gc +import weakref +import pytest +from contextlib import contextmanager + +from numpy.compat import pickle + +import pathlib +import builtins +from decimal import Decimal + +import numpy as np +import numpy.core._multiarray_tests as _multiarray_tests +from numpy.core._rational_tests import rational +from numpy.testing import ( + assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, + assert_array_equal, assert_raises_regex, assert_array_almost_equal, + assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, + temppath, suppress_warnings, break_cycles, + ) +from numpy.testing._private.utils import _no_tracing +from numpy.core.tests._locales import CommaDecimalPointLocale + +# Need to test an object that does not fully implement math interface +from datetime import timedelta, datetime + + +def _aligned_zeros(shape, dtype=float, order="C", align=None): + """ + Allocate a new ndarray with aligned memory. + + The ndarray is guaranteed *not* aligned to twice the requested alignment. + Eg, if align=4, guarantees it is not aligned to 8. If align=None uses + dtype.alignment.""" + dtype = np.dtype(dtype) + if dtype == np.dtype(object): + # Can't do this, fall back to standard allocation (which + # should always be sufficiently aligned) + if align is not None: + raise ValueError("object array alignment not supported") + return np.zeros(shape, dtype=dtype, order=order) + if align is None: + align = dtype.alignment + if not hasattr(shape, '__len__'): + shape = (shape,) + size = functools.reduce(operator.mul, shape) * dtype.itemsize + buf = np.empty(size + 2*align + 1, np.uint8) + + ptr = buf.__array_interface__['data'][0] + offset = ptr % align + if offset != 0: + offset = align - offset + if (ptr % (2*align)) == 0: + offset += align + + # Note: slices producing 0-size arrays do not necessarily change + # data pointer --- so we use and allocate size+1 + buf = buf[offset:offset+size+1][:-1] + data = np.ndarray(shape, dtype, buf, order=order) + data.fill(0) + return data + + +class TestFlags: + def setup(self): + self.a = np.arange(10) + + def test_writeable(self): + mydict = locals() + self.a.flags.writeable = False + assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) + assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) + self.a.flags.writeable = True + self.a[0] = 5 + self.a[0] = 0 + + def test_writeable_any_base(self): + # Ensure that any base being writeable is sufficient to change flag; + # this is especially interesting for arrays from an array interface. + arr = np.arange(10) + + class subclass(np.ndarray): + pass + + # Create subclass so base will not be collapsed, this is OK to change + view1 = arr.view(subclass) + view2 = view1[...] + arr.flags.writeable = False + view2.flags.writeable = False + view2.flags.writeable = True # Can be set to True again. + + arr = np.arange(10) + + class frominterface: + def __init__(self, arr): + self.arr = arr + self.__array_interface__ = arr.__array_interface__ + + view1 = np.asarray(frominterface) + view2 = view1[...] + view2.flags.writeable = False + view2.flags.writeable = True + + view1.flags.writeable = False + view2.flags.writeable = False + with assert_raises(ValueError): + # Must assume not writeable, since only base is not: + view2.flags.writeable = True + + def test_writeable_from_readonly(self): + # gh-9440 - make sure fromstring, from buffer on readonly buffers + # set writeable False + data = b'\x00' * 100 + vals = np.frombuffer(data, 'B') + assert_raises(ValueError, vals.setflags, write=True) + types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + values = np.core.records.fromstring(data, types) + vals = values['vals'] + assert_raises(ValueError, vals.setflags, write=True) + + def test_writeable_from_buffer(self): + data = bytearray(b'\x00' * 100) + vals = np.frombuffer(data, 'B') + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] ) + values = np.core.records.fromstring(data, types) + vals = values['vals'] + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") + def test_writeable_pickle(self): + import pickle + # Small arrays will be copied without setting base. + # See condition for using PyArray_SetBaseObject in + # array_setstate. + a = np.arange(1000) + for v in range(pickle.HIGHEST_PROTOCOL): + vals = pickle.loads(pickle.dumps(a, v)) + assert_(vals.flags.writeable) + assert_(isinstance(vals.base, bytes)) + + def test_writeable_from_c_data(self): + # Test that the writeable flag can be changed for an array wrapping + # low level C-data, but not owning its data. + # Also see that this is deprecated to change from python. + from numpy.core._multiarray_tests import get_c_wrapping_array + + arr_writeable = get_c_wrapping_array(True) + assert not arr_writeable.flags.owndata + assert arr_writeable.flags.writeable + view = arr_writeable[...] + + # Toggling the writeable flag works on the view: + view.flags.writeable = False + assert not view.flags.writeable + view.flags.writeable = True + assert view.flags.writeable + # Flag can be unset on the arr_writeable: + arr_writeable.flags.writeable = False + + arr_readonly = get_c_wrapping_array(False) + assert not arr_readonly.flags.owndata + assert not arr_readonly.flags.writeable + + for arr in [arr_writeable, arr_readonly]: + view = arr[...] + view.flags.writeable = False # make sure it is readonly + arr.flags.writeable = False + assert not arr.flags.writeable + + with assert_raises(ValueError): + view.flags.writeable = True + + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + with assert_raises(DeprecationWarning): + arr.flags.writeable = True + + with assert_warns(DeprecationWarning): + arr.flags.writeable = True + + def test_warnonwrite(self): + a = np.arange(10) + a.flags._warn_on_write = True + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always') + a[1] = 10 + a[2] = 10 + # only warn once + assert_(len(w) == 1) + + @pytest.mark.parametrize(["flag", "flag_value", "writeable"], + [("writeable", True, True), + # Delete _warn_on_write after deprecation and simplify + # the parameterization: + ("_warn_on_write", True, False), + ("writeable", False, False)]) + def test_readonly_flag_protocols(self, flag, flag_value, writeable): + a = np.arange(10) + setattr(a.flags, flag, flag_value) + + class MyArr(): + __array_struct__ = a.__array_struct__ + + assert memoryview(a).readonly is not writeable + assert a.__array_interface__['data'][1] is not writeable + assert np.asarray(MyArr()).flags.writeable is writeable + + def test_otherflags(self): + assert_equal(self.a.flags.carray, True) + assert_equal(self.a.flags['C'], True) + assert_equal(self.a.flags.farray, False) + assert_equal(self.a.flags.behaved, True) + assert_equal(self.a.flags.fnc, False) + assert_equal(self.a.flags.forc, True) + assert_equal(self.a.flags.owndata, True) + assert_equal(self.a.flags.writeable, True) + assert_equal(self.a.flags.aligned, True) + with assert_warns(DeprecationWarning): + assert_equal(self.a.flags.updateifcopy, False) + with assert_warns(DeprecationWarning): + assert_equal(self.a.flags['U'], False) + assert_equal(self.a.flags['UPDATEIFCOPY'], False) + assert_equal(self.a.flags.writebackifcopy, False) + assert_equal(self.a.flags['X'], False) + assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + + def test_string_align(self): + a = np.zeros(4, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + # not power of two are accessed byte-wise and thus considered aligned + a = np.zeros(5, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + + def test_void_align(self): + a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) + assert_(a.flags.aligned) + + +class TestHash: + # see #3793 + def test_int(self): + for st, ut, s in [(np.int8, np.uint8, 8), + (np.int16, np.uint16, 16), + (np.int32, np.uint32, 32), + (np.int64, np.uint64, 64)]: + for i in range(1, s): + assert_equal(hash(st(-2**i)), hash(-2**i), + err_msg="%r: -2**%d" % (st, i)) + assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (st, i - 1)) + assert_equal(hash(st(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (st, i)) + + i = max(i - 1, 1) + assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (ut, i - 1)) + assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (ut, i)) + + +class TestAttributes: + def setup(self): + self.one = np.arange(10) + self.two = np.arange(20).reshape(4, 5) + self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + + def test_attributes(self): + assert_equal(self.one.shape, (10,)) + assert_equal(self.two.shape, (4, 5)) + assert_equal(self.three.shape, (2, 5, 6)) + self.three.shape = (10, 3, 2) + assert_equal(self.three.shape, (10, 3, 2)) + self.three.shape = (2, 5, 6) + assert_equal(self.one.strides, (self.one.itemsize,)) + num = self.two.itemsize + assert_equal(self.two.strides, (5*num, num)) + num = self.three.itemsize + assert_equal(self.three.strides, (30*num, 6*num, num)) + assert_equal(self.one.ndim, 1) + assert_equal(self.two.ndim, 2) + assert_equal(self.three.ndim, 3) + num = self.two.itemsize + assert_equal(self.two.size, 20) + assert_equal(self.two.nbytes, 20*num) + assert_equal(self.two.itemsize, self.two.dtype.itemsize) + assert_equal(self.two.base, np.arange(20)) + + def test_dtypeattr(self): + assert_equal(self.one.dtype, np.dtype(np.int_)) + assert_equal(self.three.dtype, np.dtype(np.float_)) + assert_equal(self.one.dtype.char, 'l') + assert_equal(self.three.dtype.char, 'd') + assert_(self.three.dtype.str[0] in '<>') + assert_equal(self.one.dtype.str[1], 'i') + assert_equal(self.three.dtype.str[1], 'f') + + def test_int_subclassing(self): + # Regression test for https://github.com/numpy/numpy/pull/3526 + + numpy_int = np.int_(0) + + # int_ doesn't inherit from Python int, because it's not fixed-width + assert_(not isinstance(numpy_int, int)) + + def test_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + return np.ndarray(size, buffer=x, dtype=int, + offset=offset*x.itemsize, + strides=strides*x.itemsize) + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(ValueError, make_array, 8, 3, 1) + assert_equal(make_array(8, 3, 0), np.array([3]*8)) + # Check behavior reported in gh-2503: + assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) + make_array(0, 0, 10) + + def test_set_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + try: + r = np.ndarray([size], dtype=int, buffer=x, + offset=offset*x.itemsize) + except Exception as e: + raise RuntimeError(e) + r.strides = strides = strides*x.itemsize + return r + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(RuntimeError, make_array, 8, 3, 1) + # Check that the true extent of the array is used. + # Test relies on as_strided base not exposing a buffer. + x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + + def set_strides(arr, strides): + arr.strides = strides + + assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) + + # Test for offset calculations: + x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + shape=(10,), strides=(-1,)) + assert_raises(ValueError, set_strides, x[::-1], -1) + a = x[::-1] + a.strides = 1 + a[::2].strides = 2 + + # test 0d + arr_0d = np.array(0) + arr_0d.strides = () + assert_raises(TypeError, set_strides, arr_0d, None) + + def test_fill(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = np.empty((3, 2, 1), t) + y = np.empty((3, 2, 1), t) + x.fill(1) + y[...] = 1 + assert_equal(x, y) + + def test_fill_max_uint64(self): + x = np.empty((3, 2, 1), dtype=np.uint64) + y = np.empty((3, 2, 1), dtype=np.uint64) + value = 2**64 - 1 + y[...] = value + x.fill(value) + assert_array_equal(x, y) + + def test_fill_struct_array(self): + # Filling from a scalar + x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') + x.fill(x[0]) + assert_equal(x['f1'][1], x['f1'][0]) + # Filling from a tuple that can be converted + # to a scalar + x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) + x.fill((3.5, -2)) + assert_array_equal(x['a'], [3.5, 3.5]) + assert_array_equal(x['b'], [-2, -2]) + + +class TestArrayConstruction: + def test_array(self): + d = np.ones(6) + r = np.array([d, d]) + assert_equal(r, np.ones((2, 6))) + + d = np.ones(6) + tgt = np.ones((2, 6)) + r = np.array([d, d]) + assert_equal(r, tgt) + tgt[1] = 2 + r = np.array([d, d + 1]) + assert_equal(r, tgt) + + d = np.ones(6) + r = np.array([[d, d]]) + assert_equal(r, np.ones((1, 2, 6))) + + d = np.ones(6) + r = np.array([[d, d], [d, d]]) + assert_equal(r, np.ones((2, 2, 6))) + + d = np.ones((6, 6)) + r = np.array([d, d]) + assert_equal(r, np.ones((2, 6, 6))) + + d = np.ones((6, )) + r = np.array([[d, d + 1], d + 2], dtype=object) + assert_equal(len(r), 2) + assert_equal(r[0], [d, d + 1]) + assert_equal(r[1], d + 2) + + tgt = np.ones((2, 3), dtype=bool) + tgt[0, 2] = False + tgt[1, 0:2] = False + r = np.array([[True, True, False], [False, False, True]]) + assert_equal(r, tgt) + r = np.array([[True, False], [True, False], [False, True]]) + assert_equal(r, tgt.T) + + def test_array_empty(self): + assert_raises(TypeError, np.array) + + def test_array_copy_false(self): + d = np.array([1, 2, 3]) + e = np.array(d, copy=False) + d[1] = 3 + assert_array_equal(e, [1, 3, 3]) + e = np.array(d, copy=False, order='F') + d[1] = 4 + assert_array_equal(e, [1, 4, 3]) + e[2] = 7 + assert_array_equal(d, [1, 4, 7]) + + def test_array_copy_true(self): + d = np.array([[1,2,3], [1, 2, 3]]) + e = np.array(d, copy=True) + d[0, 1] = 3 + e[0, 2] = -7 + assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) + assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) + e = np.array(d, copy=True, order='F') + d[0, 1] = 5 + e[0, 2] = 7 + assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) + assert_array_equal(d, [[1, 5, 3], [1,2,3]]) + + def test_array_cont(self): + d = np.ones(10)[::2] + assert_(np.ascontiguousarray(d).flags.c_contiguous) + assert_(np.ascontiguousarray(d).flags.f_contiguous) + assert_(np.asfortranarray(d).flags.c_contiguous) + assert_(np.asfortranarray(d).flags.f_contiguous) + d = np.ones((10, 10))[::2,::2] + assert_(np.ascontiguousarray(d).flags.c_contiguous) + assert_(np.asfortranarray(d).flags.f_contiguous) + + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_bad_arguments_error(self, func): + with pytest.raises(TypeError): + func(3, dtype="bad dtype") + with pytest.raises(TypeError): + func() # missing arguments + with pytest.raises(TypeError): + func(1, 2, 3, 4, 5, 6, 7, 8) # too many arguments + + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_array_as_keyword(self, func): + # This should likely be made positional only, but do not change + # the name accidentally. + if func is np.array: + func(object=3) + else: + func(a=3) + +class TestAssignment: + def test_assignment_broadcasting(self): + a = np.arange(6).reshape(2, 3) + + # Broadcasting the input to the output + a[...] = np.arange(3) + assert_equal(a, [[0, 1, 2], [0, 1, 2]]) + a[...] = np.arange(2).reshape(2, 1) + assert_equal(a, [[0, 0, 0], [1, 1, 1]]) + + # For compatibility with <= 1.5, a limited version of broadcasting + # the output to the input. + # + # This behavior is inconsistent with NumPy broadcasting + # in general, because it only uses one of the two broadcasting + # rules (adding a new "1" dimension to the left of the shape), + # applied to the output instead of an input. In NumPy 2.0, this kind + # of broadcasting assignment will likely be disallowed. + a[...] = np.arange(6)[::-1].reshape(1, 2, 3) + assert_equal(a, [[5, 4, 3], [2, 1, 0]]) + # The other type of broadcasting would require a reduction operation. + + def assign(a, b): + a[...] = b + + assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) + + def test_assignment_errors(self): + # Address issue #2276 + class C: + pass + a = np.zeros(1) + + def assign(v): + a[0] = v + + assert_raises((AttributeError, TypeError), assign, C()) + assert_raises(ValueError, assign, [1]) + + def test_unicode_assignment(self): + # gh-5049 + from numpy.core.numeric import set_string_function + + @contextmanager + def inject_str(s): + """ replace ndarray.__str__ temporarily """ + set_string_function(lambda x: s, repr=False) + try: + yield + finally: + set_string_function(None, repr=False) + + a1d = np.array([u'test']) + a0d = np.array(u'done') + with inject_str(u'bad'): + a1d[0] = a0d # previously this would invoke __str__ + assert_equal(a1d[0], u'done') + + # this would crash for the same reason + np.array([np.array(u'\xe5\xe4\xf6')]) + + def test_stringlike_empty_list(self): + # gh-8902 + u = np.array([u'done']) + b = np.array([b'done']) + + class bad_sequence: + def __getitem__(self): pass + def __len__(self): raise RuntimeError + + assert_raises(ValueError, operator.setitem, u, 0, []) + assert_raises(ValueError, operator.setitem, b, 0, []) + + assert_raises(ValueError, operator.setitem, u, 0, bad_sequence()) + assert_raises(ValueError, operator.setitem, b, 0, bad_sequence()) + + def test_longdouble_assignment(self): + # only relevant if longdouble is larger than float + # we're looking for loss of precision + + for dtype in (np.longdouble, np.longcomplex): + # gh-8902 + tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype) + tinya = np.nextafter(np.longdouble(0), -1).astype(dtype) + + # construction + tiny1d = np.array([tinya]) + assert_equal(tiny1d[0], tinya) + + # scalar = scalar + tiny1d[0] = tinyb + assert_equal(tiny1d[0], tinyb) + + # 0d = scalar + tiny1d[0, ...] = tinya + assert_equal(tiny1d[0], tinya) + + # 0d = 0d + tiny1d[0, ...] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + # scalar = 0d + tiny1d[0] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + arr = np.array([np.array(tinya)]) + assert_equal(arr[0], tinya) + + def test_cast_to_string(self): + # cast to str should do "str(scalar)", not "str(scalar.item())" + # Example: In python2, str(float) is truncated, so we want to avoid + # str(np.float64(...).item()) as this would incorrectly truncate. + a = np.zeros(1, dtype='S20') + a[:] = np.array(['1.12345678901234567890'], dtype='f8') + assert_equal(a[0], b"1.1234567890123457") + + +class TestDtypedescr: + def test_construction(self): + d1 = np.dtype('i4') + assert_equal(d1, np.dtype(np.int32)) + d2 = np.dtype('f8') + assert_equal(d2, np.dtype(np.float64)) + + def test_byteorders(self): + assert_(np.dtype('i4')) + assert_(np.dtype([('a', 'i4')])) + + def test_structured_non_void(self): + fields = [('a', 'i8'), ('b', 'f8')]) + assert_equal(a == b, [False, True]) + + def test_casting(self): + # Check that casting a structured array to change its byte order + # works + a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe')) + b = a.astype([('a', '>i4')]) + assert_equal(b, a.byteswap().newbyteorder()) + assert_equal(a['a'][0], b['a'][0]) + + # Check that equality comparison works on structured arrays if + # they are 'equiv'-castable + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')]) + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + assert_equal(a == b, [True, True]) + + # Check that 'equiv' casting can change byte order + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + c = a.astype(b.dtype, casting='equiv') + assert_equal(a == c, [True, True]) + + # Check that 'safe' casting can change byte order and up-cast + # fields + t = [('a', 'f8')] + assert_(np.can_cast(a.dtype, t, casting='safe')) + c = a.astype(t, casting='safe') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that 'same_kind' casting can change byte order and + # change field widths within a "kind" + t = [('a', 'f4')] + assert_(np.can_cast(a.dtype, t, casting='same_kind')) + c = a.astype(t, casting='same_kind') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that casting fails if the casting rule should fail on + # any of the fields + t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')] + assert_(not np.can_cast(a.dtype, t, casting=casting)) + t = [('a', '>i4'), ('b', ' false + for n in range(3): + v = np.array(b'', (dtype, n)) + assert_equal(bool(v), False) + assert_equal(bool(v[()]), False) + assert_equal(v.astype(bool), False) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.False_) + + # anything else -> true + for n in range(1, 4): + for val in [b'a', b'0', b' ']: + v = np.array(val, (dtype, n)) + assert_equal(bool(v), True) + assert_equal(bool(v[()]), True) + assert_equal(v.astype(bool), True) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.True_) + + def test_cast_from_void(self): + self._test_cast_from_flexible(np.void) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_unicode(self): + self._test_cast_from_flexible(np.unicode_) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_bytes(self): + self._test_cast_from_flexible(np.bytes_) + + +class TestZeroSizeFlexible: + @staticmethod + def _zeros(shape, dtype=str): + dtype = np.dtype(dtype) + if dtype == np.void: + return np.zeros(shape, dtype=(dtype, 0)) + + # not constructable directly + dtype = np.dtype([('x', dtype, 0)]) + return np.zeros(shape, dtype=dtype)['x'] + + def test_create(self): + zs = self._zeros(10, bytes) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, np.void) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, str) + assert_equal(zs.itemsize, 0) + + def _test_sort_partition(self, name, kinds, **kwargs): + # Previously, these would all hang + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + sort_method = getattr(zs, name) + sort_func = getattr(np, name) + for kind in kinds: + sort_method(kind=kind, **kwargs) + sort_func(zs, kind=kind, **kwargs) + + def test_sort(self): + self._test_sort_partition('sort', kinds='qhs') + + def test_argsort(self): + self._test_sort_partition('argsort', kinds='qhs') + + def test_partition(self): + self._test_sort_partition('partition', kinds=['introselect'], kth=2) + + def test_argpartition(self): + self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) + + def test_resize(self): + # previously an error + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + zs.resize(25) + zs.resize((10, 10)) + + def test_view(self): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + + # viewing as itself should be allowed + assert_equal(zs.view(dt).dtype, np.dtype(dt)) + + # viewing as any non-empty type gives an empty result + assert_equal(zs.view((dt, 1)).shape, (0,)) + + def test_dumps(self): + zs = self._zeros(10, int) + assert_equal(zs, pickle.loads(zs.dumps())) + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + p = pickle.dumps(zs, protocol=proto) + zs2 = pickle.loads(p) + + assert_equal(zs.dtype, zs2.dtype) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_pickle_with_buffercallback(self): + array = np.arange(10) + buffers = [] + bytes_string = pickle.dumps(array, buffer_callback=buffers.append, + protocol=5) + array_from_buffer = pickle.loads(bytes_string, buffers=buffers) + # when using pickle protocol 5 with buffer callbacks, + # array_from_buffer is reconstructed from a buffer holding a view + # to the initial array's data, so modifying an element in array + # should modify it in array_from_buffer too. + array[0] = -1 + assert array_from_buffer[0] == -1, array_from_buffer[0] + + +class TestMethods: + + sort_kinds = ['quicksort', 'heapsort', 'stable'] + + def test_all_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[True, False, True], + [False, False, False], + [True, False, True]]) + wh_lower = np.array([[False], + [False], + [True]]) + for _ax in [0, None]: + assert_equal(a.all(axis=_ax, where=wh_lower), + np.all(a[wh_lower[:,0],:], axis=_ax)) + assert_equal(np.all(a, axis=_ax, where=wh_lower), + a[wh_lower[:,0],:].all(axis=_ax)) + + assert_equal(a.all(where=wh_full), True) + assert_equal(np.all(a, where=wh_full), True) + assert_equal(a.all(where=False), True) + assert_equal(np.all(a, where=False), True) + + def test_any_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[False, True, False], + [True, True, True], + [False, False, False]]) + wh_middle = np.array([[False], + [True], + [False]]) + for _ax in [0, None]: + assert_equal(a.any(axis=_ax, where=wh_middle), + np.any(a[wh_middle[:,0],:], axis=_ax)) + assert_equal(np.any(a, axis=_ax, where=wh_middle), + a[wh_middle[:,0],:].any(axis=_ax)) + assert_equal(a.any(where=wh_full), False) + assert_equal(np.any(a, where=wh_full), False) + assert_equal(a.any(where=False), False) + assert_equal(np.any(a, where=False), False) + + def test_compress(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = arr.compress([0, 1, 0, 1, 0], axis=1) + assert_equal(out, tgt) + + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=1) + assert_equal(out, tgt) + + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1]) + assert_equal(out, 1) + + def test_choose(self): + x = 2*np.ones((3,), dtype=int) + y = 3*np.ones((3,), dtype=int) + x2 = 2*np.ones((2, 3), dtype=int) + y2 = 3*np.ones((2, 3), dtype=int) + ind = np.array([0, 0, 1]) + + A = ind.choose((x, y)) + assert_equal(A, [2, 2, 3]) + + A = ind.choose((x2, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + A = ind.choose((x, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + oned = np.ones(1) + # gh-12031, caused SEGFAULT + assert_raises(TypeError, oned.choose,np.void(0), [oned]) + + out = np.array(0) + ret = np.choose(np.array(1), [10, 20, 30], out=out) + assert out is ret + assert_equal(out[()], 20) + + # gh-6272 check overlap on out + x = np.arange(5) + y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') + assert_equal(y, np.array([0, 1, 2])) + + def test_prod(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + assert_raises(ArithmeticError, a.prod) + assert_raises(ArithmeticError, a2.prod, axis=1) + else: + assert_equal(a.prod(axis=0), 26400) + assert_array_equal(a2.prod(axis=0), + np.array([50, 36, 84, 180], ctype)) + assert_array_equal(a2.prod(axis=-1), + np.array([24, 1890, 600], ctype)) + + def test_repeat(self): + m = np.array([1, 2, 3, 4, 5, 6]) + m_rect = m.reshape((2, 3)) + + A = m.repeat([1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + A = m.repeat(2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + A = m_rect.repeat([2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = m_rect.repeat([1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + A = m_rect.repeat(2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = m_rect.repeat(2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + def test_reshape(self): + arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(arr.reshape(2, 6), tgt) + + tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + assert_equal(arr.reshape(3, 4), tgt) + + tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] + assert_equal(arr.reshape((3, 4), order='F'), tgt) + + tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] + assert_equal(arr.T.reshape((3, 4), order='C'), tgt) + + def test_round(self): + def check_round(arr, expected, *round_args): + assert_equal(arr.round(*round_args), expected) + # With output array + out = np.zeros_like(arr) + res = arr.round(*round_args, out=out) + assert_equal(out, expected) + assert out is res + + check_round(np.array([1.2, 1.5]), [1, 2]) + check_round(np.array(1.5), 2) + check_round(np.array([12.2, 15.5]), [10, 20], -1) + check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) + # Complex rounding + check_round(np.array([4.5 + 1.5j]), [4 + 2j]) + check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + + def test_squeeze(self): + a = np.array([[[1], [2], [3]]]) + assert_equal(a.squeeze(), [1, 2, 3]) + assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) + assert_raises(ValueError, a.squeeze, axis=(1,)) + assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) + + def test_transpose(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(a.transpose(), [[1, 3], [2, 4]]) + assert_raises(ValueError, lambda: a.transpose(0)) + assert_raises(ValueError, lambda: a.transpose(0, 0)) + assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) + + def test_sort(self): + # test ordering for floats and complex containing nans. It is only + # necessary to check the less-than comparison, so sorts that + # only follow the insertion sort path are sufficient. We only + # test doubles and complex doubles as the logic is the same. + + # check doubles + msg = "Test real sort order with nans" + a = np.array([np.nan, 1, 0]) + b = np.sort(a) + assert_equal(b, a[::-1], msg) + # check complex + msg = "Test complex sort order with nans" + a = np.zeros(9, dtype=np.complex128) + a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] + a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] + b = np.sort(a) + assert_equal(b, a[::-1], msg) + + # all c scalar sorts use the same code with different types + # so it suffices to run a quick check with one type. The number + # of sorted items must be greater than ~50 to check the actual + # algorithm because quick and merge sort fall over to insertion + # sort for small arrays. + + @pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + np.longdouble]) + def test_sort_unsigned(self, dtype): + a = np.arange(101, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = "scalar sort, kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', + [np.int8, np.int16, np.int32, np.int64, np.float16, + np.float32, np.float64, np.longdouble]) + def test_sort_signed(self, dtype): + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = "scalar sort, kind=%s" % (kind) + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble]) + @pytest.mark.parametrize('part', ['real', 'imag']) + def test_sort_complex(self, part, dtype): + # test complex sorts. These use the same code as the scalars + # but the compare function differs. + cdtype = { + np.single: np.csingle, + np.double: np.cdouble, + np.longdouble: np.clongdouble, + }[dtype] + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + ai = (a * (1+1j)).astype(cdtype) + bi = (b * (1+1j)).astype(cdtype) + setattr(ai, part, 1) + setattr(bi, part, 1) + for kind in self.sort_kinds: + msg = "complex sort, %s part == 1, kind=%s" % (part, kind) + c = ai.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + c = bi.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + + def test_sort_complex_byte_swapping(self): + # test sorting of complex arrays requiring byte-swapping, gh-5441 + for endianness in '<>': + for dt in np.typecodes['Complex']: + arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) + c = arr.copy() + c.sort() + msg = 'byte-swapped complex sort, dtype={0}'.format(dt) + assert_equal(c, arr, msg) + + @pytest.mark.parametrize('dtype', [np.bytes_, np.unicode_]) + def test_sort_string(self, dtype): + # np.array will perform the encoding to bytes for us in the bytes test + a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = "kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_object(self): + # test object array sorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_structured(self): + # test record array sorts. + dt = np.dtype([('f', float), ('i', int)]) + a = np.array([(i, i) for i in range(101)], dtype=dt) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]']) + def test_sort_time(self, dtype): + # test datetime64 and timedelta64 sorts. + a = np.arange(0, 101, dtype=dtype) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = "kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_axis(self): + # check axis handling. This should be the same for all type + # specific sorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 0], [3, 2]]) + c = np.array([[2, 3], [0, 1]]) + d = a.copy() + d.sort(axis=0) + assert_equal(d, b, "test sort with axis=0") + d = a.copy() + d.sort(axis=1) + assert_equal(d, c, "test sort with axis=1") + d = a.copy() + d.sort() + assert_equal(d, c, "test sort with default axis") + + def test_sort_size_0(self): + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = 'test empty array sort with axis={0}'.format(axis) + assert_equal(np.sort(a, axis=axis), a, msg) + msg = 'test empty array sort with axis=None' + assert_equal(np.sort(a, axis=None), a.ravel(), msg) + + def test_sort_bad_ordering(self): + # test generic class with bogus ordering, + # should not segfault. + class Boom: + def __lt__(self, other): + return True + + a = np.array([Boom()] * 100, dtype=object) + for kind in self.sort_kinds: + msg = "kind=%s" % kind + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_void_sort(self): + # gh-8210 - previously segfaulted + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view('V4') + arr[::-1].sort() + + dt = np.dtype([('val', 'i4', (1,))]) + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view(dt) + arr[::-1].sort() + + def test_sort_raises(self): + #gh-9404 + arr = np.array([0, datetime.now(), 1], dtype=object) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + #gh-3879 + class Raiser: + def raises_anything(*args, **kwargs): + raise TypeError("SOMETHING ERRORED") + __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything + arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) + np.random.shuffle(arr) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + + def test_sort_degraded(self): + # test degraded dataset would take minutes to run with normal qsort + d = np.arange(1000000) + do = d.copy() + x = d + # create a median of 3 killer where each median is the sorted second + # last element of the quicksort partition + while x.size > 3: + mid = x.size // 2 + x[mid], x[-2] = x[-2], x[mid] + x = x[:-2] + + assert_equal(np.sort(d), do) + assert_equal(d[np.argsort(d)], do) + + def test_copy(self): + def assert_fortran(arr): + assert_(arr.flags.fortran) + assert_(arr.flags.f_contiguous) + assert_(not arr.flags.c_contiguous) + + def assert_c(arr): + assert_(not arr.flags.fortran) + assert_(not arr.flags.f_contiguous) + assert_(arr.flags.c_contiguous) + + a = np.empty((2, 2), order='F') + # Test copying a Fortran array + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_fortran(a.copy('A')) + + # Now test starting with a C array. + a = np.empty((2, 2), order='C') + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_c(a.copy('A')) + + def test_sort_order(self): + # Test sorting an array with fields + x1 = np.array([21, 32, 14]) + x2 = np.array(['my', 'first', 'name']) + x3 = np.array([3.1, 4.5, 6.2]) + r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') + + r.sort(order=['id']) + assert_equal(r.id, np.array([14, 21, 32])) + assert_equal(r.word, np.array(['name', 'my', 'first'])) + assert_equal(r.number, np.array([6.2, 3.1, 4.5])) + + r.sort(order=['word']) + assert_equal(r.id, np.array([32, 21, 14])) + assert_equal(r.word, np.array(['first', 'my', 'name'])) + assert_equal(r.number, np.array([4.5, 3.1, 6.2])) + + r.sort(order=['number']) + assert_equal(r.id, np.array([21, 32, 14])) + assert_equal(r.word, np.array(['my', 'first', 'name'])) + assert_equal(r.number, np.array([3.1, 4.5, 6.2])) + + assert_raises_regex(ValueError, 'duplicate', + lambda: r.sort(order=['id', 'id'])) + + if sys.byteorder == 'little': + strtype = '>i2' + else: + strtype = '': + for dt in np.typecodes['Complex']: + arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt) + msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) + assert_equal(arr.argsort(), + np.arange(len(arr), dtype=np.intp), msg) + + # test string argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)]) + b = a[::-1].copy() + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = "string argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test unicode argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = "unicode argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test object array argsorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = "object argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test structured array argsorts. + dt = np.dtype([('f', float), ('i', int)]) + a = np.array([(i, i) for i in range(101)], dtype=dt) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = "structured array argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test datetime64 argsorts. + a = np.arange(0, 101, dtype='datetime64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = "datetime64 argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test timedelta64 argsorts. + a = np.arange(0, 101, dtype='timedelta64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = "timedelta64 argsort, kind=%s" % kind + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # check axis handling. This should be the same for all type + # specific argsorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 1], [0, 0]]) + c = np.array([[1, 0], [1, 0]]) + assert_equal(a.copy().argsort(axis=0), b) + assert_equal(a.copy().argsort(axis=1), c) + assert_equal(a.copy().argsort(), c) + + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = 'test empty array argsort with axis={0}'.format(axis) + assert_equal(np.argsort(a, axis=axis), + np.zeros_like(a, dtype=np.intp), msg) + msg = 'test empty array argsort with axis=None' + assert_equal(np.argsort(a, axis=None), + np.zeros_like(a.ravel(), dtype=np.intp), msg) + + # check that stable argsorts are stable + r = np.arange(100) + # scalars + a = np.zeros(100) + assert_equal(a.argsort(kind='m'), r) + # complex + a = np.zeros(100, dtype=complex) + assert_equal(a.argsort(kind='m'), r) + # string + a = np.array(['aaaaaaaaa' for i in range(100)]) + assert_equal(a.argsort(kind='m'), r) + # unicode + a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_) + assert_equal(a.argsort(kind='m'), r) + + def test_sort_unicode_kind(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.sort, kind=k) + assert_raises(ValueError, d.argsort, kind=k) + + def test_searchsorted(self): + # test for floats and complex containing nans. The logic is the + # same for all float types so only test double types for now. + # The search sorted routines use the compare functions for the + # array type, so this checks if that is consistent with the sort + # order. + + # check double + a = np.array([0, 1, np.nan]) + msg = "Test real searchsorted with nans, side='l'" + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(3), msg) + msg = "Test real searchsorted with nans, side='r'" + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 4), msg) + # check keyword arguments + a.searchsorted(v=1) + # check double complex + a = np.zeros(9, dtype=np.complex128) + a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] + a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] + msg = "Test complex searchsorted with nans, side='l'" + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(9), msg) + msg = "Test complex searchsorted with nans, side='r'" + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 10), msg) + msg = "Test searchsorted with little endian, side='l'" + a = np.array([0, 128], dtype=' p[:, i]).all(), + msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) + aae(p, d1[np.arange(d1.shape[0])[:, None], + np.argpartition(d1, i, axis=1, kind=k)]) + + p = np.partition(d0, i, axis=0, kind=k) + aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) + # array_less does not seem to work right + at((p[:i, :] <= p[i, :]).all(), + msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) + at((p[i + 1:, :] > p[i, :]).all(), + msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) + aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), + np.arange(d0.shape[1])[None, :]]) + + # check inplace + dc = d.copy() + dc.partition(i, kind=k) + assert_equal(dc, np.partition(d, i, kind=k)) + dc = d0.copy() + dc.partition(i, axis=0, kind=k) + assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) + dc = d1.copy() + dc.partition(i, axis=1, kind=k) + assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) + + def assert_partitioned(self, d, kth): + prev = 0 + for k in np.sort(kth): + assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) + assert_((d[k:] >= d[k]).all(), + msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) + prev = k + 1 + + def test_partition_iterative(self): + d = np.arange(17) + kth = (0, 1, 2, 429, 231) + assert_raises(ValueError, d.partition, kth) + assert_raises(ValueError, d.argpartition, kth) + d = np.arange(10).reshape((2, 5)) + assert_raises(ValueError, d.partition, kth, axis=0) + assert_raises(ValueError, d.partition, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=None) + + d = np.array([3, 4, 2, 1]) + p = np.partition(d, (0, 3)) + self.assert_partitioned(p, (0, 3)) + self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + assert_array_equal(p, np.partition(d, (-3, -1))) + assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) + + d = np.arange(17) + np.random.shuffle(d) + d.partition(range(d.size)) + assert_array_equal(np.arange(17), d) + np.random.shuffle(d) + assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) + + # test unsorted kth + d = np.arange(17) + np.random.shuffle(d) + keys = np.array([1, 3, 8, -2]) + np.random.shuffle(d) + p = np.partition(d, keys) + self.assert_partitioned(p, keys) + p = d[np.argpartition(d, keys)] + self.assert_partitioned(p, keys) + np.random.shuffle(keys) + assert_array_equal(np.partition(d, keys), p) + assert_array_equal(d[np.argpartition(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + self.assert_partitioned(np.partition(d, [5]*4), [5]) + self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), + [5]*4 + [6, 13]) + self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) + self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], + [5]*4 + [6, 13]) + + d = np.arange(12) + np.random.shuffle(d) + d1 = np.tile(np.arange(12), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + + kth = (1, 6, 7, -1) + p = np.partition(d1, kth, axis=1) + pa = d1[np.arange(d1.shape[0])[:, None], + d1.argpartition(kth, axis=1)] + assert_array_equal(p, pa) + for i in range(d1.shape[0]): + self.assert_partitioned(p[i,:], kth) + p = np.partition(d0, kth, axis=0) + pa = d0[np.argpartition(d0, kth, axis=0), + np.arange(d0.shape[1])[None,:]] + assert_array_equal(p, pa) + for i in range(d0.shape[1]): + self.assert_partitioned(p[:, i], kth) + + def test_partition_cdtype(self): + d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.9, 38)], + dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype) + ops = { + 'add': (np.add, True, float), + 'sub': (np.subtract, True, float), + 'mul': (np.multiply, True, float), + 'truediv': (np.true_divide, True, float), + 'floordiv': (np.floor_divide, True, float), + 'mod': (np.remainder, True, float), + 'divmod': (np.divmod, False, float), + 'pow': (np.power, True, int), + 'lshift': (np.left_shift, True, int), + 'rshift': (np.right_shift, True, int), + 'and': (np.bitwise_and, True, int), + 'xor': (np.bitwise_xor, True, int), + 'or': (np.bitwise_or, True, int), + 'matmul': (np.matmul, False, float), + # 'ge': (np.less_equal, False), + # 'gt': (np.less, False), + # 'le': (np.greater_equal, False), + # 'lt': (np.greater, False), + # 'eq': (np.equal, False), + # 'ne': (np.not_equal, False), + } + + class Coerced(Exception): + pass + + def array_impl(self): + raise Coerced + + def op_impl(self, other): + return "forward" + + def rop_impl(self, other): + return "reverse" + + def iop_impl(self, other): + return "in-place" + + def array_ufunc_impl(self, ufunc, method, *args, **kwargs): + return ("__array_ufunc__", ufunc, method, args, kwargs) + + # Create an object with the given base, in the given module, with a + # bunch of placeholder __op__ methods, and optionally a + # __array_ufunc__ and __array_priority__. + def make_obj(base, array_priority=False, array_ufunc=False, + alleged_module="__main__"): + class_namespace = {"__array__": array_impl} + if array_priority is not False: + class_namespace["__array_priority__"] = array_priority + for op in ops: + class_namespace["__{0}__".format(op)] = op_impl + class_namespace["__r{0}__".format(op)] = rop_impl + class_namespace["__i{0}__".format(op)] = iop_impl + if array_ufunc is not False: + class_namespace["__array_ufunc__"] = array_ufunc + eval_namespace = {"base": base, + "class_namespace": class_namespace, + "__name__": alleged_module, + } + MyType = eval("type('MyType', (base,), class_namespace)", + eval_namespace) + if issubclass(MyType, np.ndarray): + # Use this range to avoid special case weirdnesses around + # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. + return np.arange(3, 7).reshape(2, 2).view(MyType) + else: + return MyType() + + def check(obj, binop_override_expected, ufunc_override_expected, + inplace_override_expected, check_scalar=True): + for op, (ufunc, has_inplace, dtype) in ops.items(): + err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' + % (op, ufunc, has_inplace, dtype)) + check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] + if check_scalar: + check_objs.append(check_objs[0][0]) + for arr in check_objs: + arr_method = getattr(arr, "__{0}__".format(op)) + + def first_out_arg(result): + if op == "divmod": + assert_(isinstance(result, tuple)) + return result[0] + else: + return result + + # arr __op__ obj + if binop_override_expected: + assert_equal(arr_method(obj), NotImplemented, err_msg) + elif ufunc_override_expected: + assert_equal(arr_method(obj)[0], "__array_ufunc__", + err_msg) + else: + if (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_method(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_method, obj, err_msg=err_msg) + # obj __op__ arr + arr_rmethod = getattr(arr, "__r{0}__".format(op)) + if ufunc_override_expected: + res = arr_rmethod(obj) + assert_equal(res[0], "__array_ufunc__", + err_msg=err_msg) + assert_equal(res[1], ufunc, err_msg=err_msg) + else: + if (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_rmethod(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + # __array_ufunc__ = "asdf" creates a TypeError + assert_raises((TypeError, Coerced), + arr_rmethod, obj, err_msg=err_msg) + + # arr __iop__ obj + # array scalars don't have in-place operators + if has_inplace and isinstance(arr, np.ndarray): + arr_imethod = getattr(arr, "__i{0}__".format(op)) + if inplace_override_expected: + assert_equal(arr_method(obj), NotImplemented, + err_msg=err_msg) + elif ufunc_override_expected: + res = arr_imethod(obj) + assert_equal(res[0], "__array_ufunc__", err_msg) + assert_equal(res[1], ufunc, err_msg) + assert_(type(res[-1]["out"]) is tuple, err_msg) + assert_(res[-1]["out"][0] is arr, err_msg) + else: + if (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + assert_(arr_imethod(obj) is arr, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_imethod, obj, + err_msg=err_msg) + + op_fn = getattr(operator, op, None) + if op_fn is None: + op_fn = getattr(operator, op + "_", None) + if op_fn is None: + op_fn = getattr(builtins, op) + assert_equal(op_fn(obj, arr), "forward", err_msg) + if not isinstance(obj, np.ndarray): + if binop_override_expected: + assert_equal(op_fn(arr, obj), "reverse", err_msg) + elif ufunc_override_expected: + assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", + err_msg) + if ufunc_override_expected: + assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", + err_msg) + + # No array priority, no array_ufunc -> nothing called + check(make_obj(object), False, False, False) + # Negative array priority, no array_ufunc -> nothing called + # (has to be very negative, because scalar priority is -1000000.0) + check(make_obj(object, array_priority=-2**30), False, False, False) + # Positive array priority, no array_ufunc -> binops and iops only + check(make_obj(object, array_priority=1), True, False, True) + # ndarray ignores array_priority for ndarray subclasses + check(make_obj(np.ndarray, array_priority=1), False, False, False, + check_scalar=False) + # Positive array_priority and array_ufunc -> array_ufunc only + check(make_obj(object, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + check(make_obj(np.ndarray, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + # array_ufunc set to None -> defer binops only + check(make_obj(object, array_ufunc=None), True, False, False) + check(make_obj(np.ndarray, array_ufunc=None), True, False, False, + check_scalar=False) + + def test_ufunc_override_normalize_signature(self): + # gh-5674 + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return kw + + a = SomeClass() + kw = np.add(a, [1]) + assert_('sig' not in kw and 'signature' not in kw) + kw = np.add(a, [1], sig='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + kw = np.add(a, [1], signature='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + + def test_array_ufunc_index(self): + # Check that index is set appropriately, also if only an output + # is passed on (latter is another regression tests for github bug 4753) + # This also checks implicitly that 'out' is always a tuple. + class CheckIndex: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + for i, a in enumerate(inputs): + if a is self: + return i + # calls below mean we must be in an output. + for j, a in enumerate(kw['out']): + if a is self: + return (j,) + + a = CheckIndex() + dummy = np.arange(2.) + # 1 input, 1 output + assert_equal(np.sin(a), 0) + assert_equal(np.sin(dummy, a), (0,)) + assert_equal(np.sin(dummy, out=a), (0,)) + assert_equal(np.sin(dummy, out=(a,)), (0,)) + assert_equal(np.sin(a, a), 0) + assert_equal(np.sin(a, out=a), 0) + assert_equal(np.sin(a, out=(a,)), 0) + # 1 input, 2 outputs + assert_equal(np.modf(dummy, a), (0,)) + assert_equal(np.modf(dummy, None, a), (1,)) + assert_equal(np.modf(dummy, dummy, a), (1,)) + assert_equal(np.modf(dummy, out=(a, None)), (0,)) + assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) + assert_equal(np.modf(dummy, out=(None, a)), (1,)) + assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) + assert_equal(np.modf(a, out=(dummy, a)), 0) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs + np.modf(dummy, out=a) + + assert_raises(ValueError, np.modf, dummy, out=(a,)) + + # 2 inputs, 1 output + assert_equal(np.add(a, dummy), 0) + assert_equal(np.add(dummy, a), 1) + assert_equal(np.add(dummy, dummy, a), (0,)) + assert_equal(np.add(dummy, a, a), 1) + assert_equal(np.add(dummy, dummy, out=a), (0,)) + assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) + assert_equal(np.add(a, dummy, out=a), 0) + + def test_out_override(self): + # regression test for github bug 4753 + class OutClass(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + if 'out' in kw: + tmp_kw = kw.copy() + tmp_kw.pop('out') + func = getattr(ufunc, method) + kw['out'][0][...] = func(*inputs, **tmp_kw) + + A = np.array([0]).view(OutClass) + B = np.array([5]) + C = np.array([6]) + np.multiply(C, B, A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + A[0] = 0 + np.multiply(C, B, out=A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + + def test_pow_override_with_errors(self): + # regression test for gh-9112 + class PowerOnly(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + if ufunc is not np.power: + raise NotImplementedError + return "POWER!" + # explicit cast to float, to ensure the fast power path is taken. + a = np.array(5., dtype=np.float64).view(PowerOnly) + assert_equal(a ** 2.5, "POWER!") + with assert_raises(NotImplementedError): + a ** 0.5 + with assert_raises(NotImplementedError): + a ** 0 + with assert_raises(NotImplementedError): + a ** 1 + with assert_raises(NotImplementedError): + a ** -1 + with assert_raises(NotImplementedError): + a ** 2 + + def test_pow_array_object_dtype(self): + # test pow on arrays of object dtype + class SomeClass: + def __init__(self, num=None): + self.num = num + + # want to ensure a fast pow path is not taken + def __mul__(self, other): + raise AssertionError('__mul__ should not be called') + + def __div__(self, other): + raise AssertionError('__div__ should not be called') + + def __pow__(self, exp): + return SomeClass(num=self.num ** exp) + + def __eq__(self, other): + if isinstance(other, SomeClass): + return self.num == other.num + + __rpow__ = __pow__ + + def pow_for(exp, arr): + return np.array([x ** exp for x in arr]) + + obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) + + assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) + assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) + assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) + assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) + assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + + def test_pos_array_ufunc_override(self): + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*[i.view(np.ndarray) for + i in inputs], **kwargs) + tst = np.array('foo').view(A) + with assert_raises(TypeError): + +tst + + +class TestTemporaryElide: + # elision is only triggered on relatively large arrays + + def test_extension_incref_elide(self): + # test extension (e.g. cython) calling PyNumber_* slots without + # increasing the reference counts + # + # def incref_elide(a): + # d = input.copy() # refcount 1 + # return d, d + d # PyNumber_Add without increasing refcount + from numpy.core._multiarray_tests import incref_elide + d = np.ones(100000) + orig, res = incref_elide(d) + d + d + # the return original should not be changed to an inplace operation + assert_array_equal(orig, d) + assert_array_equal(res, d + d) + + def test_extension_incref_elide_stack(self): + # scanning if the refcount == 1 object is on the python stack to check + # that we are called directly from python is flawed as object may still + # be above the stack pointer and we have no access to the top of it + # + # def incref_elide_l(d): + # return l[4] + l[4] # PyNumber_Add without increasing refcount + from numpy.core._multiarray_tests import incref_elide_l + # padding with 1 makes sure the object on the stack is not overwritten + l = [1, 1, 1, 1, np.ones(100000)] + res = incref_elide_l(l) + # the return original should not be changed to an inplace operation + assert_array_equal(l[4], np.ones(100000)) + assert_array_equal(res, l[4] + l[4]) + + def test_temporary_with_cast(self): + # check that we don't elide into a temporary which would need casting + d = np.ones(200000, dtype=np.int64) + assert_equal(((d + d) + 2**222).dtype, np.dtype('O')) + + r = ((d + d) / 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = np.true_divide((d + d), 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) / 2.) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) // 2) + assert_equal(r.dtype, np.dtype(np.int64)) + + # commutative elision into the astype result + f = np.ones(100000, dtype=np.float32) + assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) + + # no elision into lower type + d = f.astype(np.float64) + assert_equal(((f + f) + d).dtype, d.dtype) + l = np.ones(100000, dtype=np.longdouble) + assert_equal(((d + d) + l).dtype, l.dtype) + + # test unary abs with different output dtype + for dt in (np.complex64, np.complex128, np.clongdouble): + c = np.ones(100000, dtype=dt) + r = abs(c * 2.0) + assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) + + def test_elide_broadcast(self): + # test no elision on broadcast to higher dimension + # only triggers elision code path in debug mode as triggering it in + # normal mode needs 256kb large matching dimension, so a lot of memory + d = np.ones((2000, 1), dtype=int) + b = np.ones((2000), dtype=bool) + r = (1 - d) + b + assert_equal(r, 1) + assert_equal(r.shape, (2000, 2000)) + + def test_elide_scalar(self): + # check inplace op does not create ndarray from scalars + a = np.bool_() + assert_(type(~(a & a)) is np.bool_) + + def test_elide_scalar_readonly(self): + # The imaginary part of a real array is readonly. This needs to go + # through fast_scalar_power which is only called for powers of + # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for + # elision which can be gotten for the imaginary part of a real + # array. Should not error. + a = np.empty(100000, dtype=np.float64) + a.imag ** 2 + + def test_elide_readonly(self): + # don't try to elide readonly temporaries + r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 + assert_equal(r, 0) + + def test_elide_updateifcopy(self): + a = np.ones(2**20)[::2] + b = a.flat.__array__() + 1 + del b + assert_equal(a, 1) + + +class TestCAPI: + def test_IsPythonScalar(self): + from numpy.core._multiarray_tests import IsPythonScalar + assert_(IsPythonScalar(b'foobar')) + assert_(IsPythonScalar(1)) + assert_(IsPythonScalar(2**80)) + assert_(IsPythonScalar(2.)) + assert_(IsPythonScalar("a")) + + +class TestSubscripting: + def test_test_zero_rank(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x[0], np.int_)) + assert_(type(x[0, ...]) is np.ndarray) + + +class TestPickling: + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, + reason=('this tests the error messages when trying to' + 'protocol 5 although it is not available')) + def test_correct_protocol5_error_message(self): + array = np.arange(10) + + if sys.version_info[:2] in ((3, 6), (3, 7)): + # For the specific case of python3.6 and 3.7, raise a clear import + # error about the pickle5 backport when trying to use protocol=5 + # without the pickle5 package + with pytest.raises(ImportError): + array.__reduce_ex__(5) + + def test_record_array_with_object_dtype(self): + my_object = object() + + arr_with_object = np.array( + [(my_object, 1, 2.0)], + dtype=[('a', object), ('b', int), ('c', float)]) + arr_without_object = np.array( + [('xxx', 1, 2.0)], + dtype=[('a', str), ('b', int), ('c', float)]) + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_arr_with_object = pickle.loads( + pickle.dumps(arr_with_object, protocol=proto)) + depickled_arr_without_object = pickle.loads( + pickle.dumps(arr_without_object, protocol=proto)) + + assert_equal(arr_with_object.dtype, + depickled_arr_with_object.dtype) + assert_equal(arr_without_object.dtype, + depickled_arr_without_object.dtype) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_f_contiguous_array(self): + f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') + buffers = [] + + # When using pickle protocol 5, Fortran-contiguous arrays can be + # serialized using out-of-band buffers + bytes_string = pickle.dumps(f_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_f_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(f_contiguous_array, depickled_f_contiguous_array) + + def test_non_contiguous_array(self): + non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] + assert not non_contiguous_array.flags.c_contiguous + assert not non_contiguous_array.flags.f_contiguous + + # make sure non-contiguous arrays can be pickled-depickled + # using any protocol + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_non_contiguous_array = pickle.loads( + pickle.dumps(non_contiguous_array, protocol=proto)) + + assert_equal(non_contiguous_array, depickled_non_contiguous_array) + + def test_roundtrip(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + carray = np.array([[2, 9], [7, 0], [3, 8]]) + DATA = [ + carray, + np.transpose(carray), + np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), + ('c', float)]) + ] + + refs = [weakref.ref(a) for a in DATA] + for a in DATA: + assert_equal( + a, pickle.loads(pickle.dumps(a, protocol=proto)), + err_msg="%r" % a) + del a, DATA, carray + break_cycles() + # check for reference leaks (gh-12793) + for ref in refs: + assert ref() is None + + def _loads(self, obj): + return pickle.loads(obj, encoding='latin1') + + # version 0 pickles, using protocol=2 to pickle + # version 0 doesn't have a version field + def test_version0_int8(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' + a = np.array([1, 2, 3, 4], dtype=np.int8) + p = self._loads(s) + assert_equal(a, p) + + def test_version0_float32(self): + s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + def test_mixed(self): + g1 = np.array(["spam", "spa", "spammer", "and eggs"]) + g2 = "spam" + assert_array_equal(g1 == g2, [x == g2 for x in g1]) + assert_array_equal(g1 != g2, [x != g2 for x in g1]) + assert_array_equal(g1 < g2, [x < g2 for x in g1]) + assert_array_equal(g1 > g2, [x > g2 for x in g1]) + assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) + assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) + + def test_unicode(self): + g1 = np.array([u"This", u"is", u"example"]) + g2 = np.array([u"This", u"was", u"example"]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + +class TestArgmax: + + nan_arr = [ + ([0, 1, 2, 3, np.nan], 4), + ([0, 1, 2, np.nan, 3], 3), + ([np.nan, 0, 1, 2, 3], 0), + ([np.nan, 0, np.nan, 2, 3], 0), + ([0, 1, 2, 3, complex(0, np.nan)], 4), + ([0, 1, 2, 3, complex(np.nan, 0)], 4), + ([0, 1, 2, complex(np.nan, 0), 3], 3), + ([0, 1, 2, complex(0, np.nan), 3], 3), + ([complex(0, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), + ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), + + ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), + ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), + ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), + + ([np.datetime64('1923-04-14T12:43:12'), + np.datetime64('1994-06-21T14:43:15'), + np.datetime64('2001-10-15T04:10:32'), + np.datetime64('1995-11-25T16:02:16'), + np.datetime64('2005-01-04T03:14:12'), + np.datetime64('2041-12-03T14:05:03')], 5), + ([np.datetime64('1935-09-14T04:40:11'), + np.datetime64('1949-10-12T12:32:11'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('2015-11-20T12:20:59'), + np.datetime64('1932-09-23T10:10:13'), + np.datetime64('2014-10-10T03:50:30')], 3), + # Assorted tests with NaTs + ([np.datetime64('NaT'), + np.datetime64('NaT'), + np.datetime64('2010-01-03T05:14:12'), + np.datetime64('NaT'), + np.datetime64('2015-09-23T10:10:13'), + np.datetime64('1932-10-10T03:50:30')], 0), + ([np.datetime64('2059-03-14T12:43:12'), + np.datetime64('1996-09-21T14:43:15'), + np.datetime64('NaT'), + np.datetime64('2022-12-25T16:02:16'), + np.datetime64('1963-10-04T03:14:12'), + np.datetime64('2013-05-08T18:15:23')], 2), + ([np.timedelta64(2, 's'), + np.timedelta64(1, 's'), + np.timedelta64('NaT', 's'), + np.timedelta64(3, 's')], 2), + ([np.timedelta64('NaT', 's')] * 3, 0), + + ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), + timedelta(days=-1, seconds=23)], 0), + ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), + timedelta(days=5, seconds=14)], 1), + ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), + timedelta(days=10, seconds=43)], 2), + + ([False, False, False, False, True], 4), + ([False, False, False, True, False], 3), + ([True, False, False, False, False], 0), + ([True, False, True, False, False], 0), + ] + + def test_all(self): + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + for i in range(a.ndim): + amax = a.max(i) + aargmax = a.argmax(i) + axes = list(range(a.ndim)) + axes.remove(i) + assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes)))) + + def test_combinations(self): + for arr, pos in self.nan_arr: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + max_val = np.max(arr) + + assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) + assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr) + + def test_output_shape(self): + # see also gh-616 + a = np.ones((10, 5)) + # Check some simple shape mismatches + out = np.ones(11, dtype=np.int_) + assert_raises(ValueError, a.argmax, -1, out) + + out = np.ones((2, 5), dtype=np.int_) + assert_raises(ValueError, a.argmax, -1, out) + + # these could be relaxed possibly (used to allow even the previous) + out = np.ones((1, 10), dtype=np.int_) + assert_raises(ValueError, a.argmax, -1, out) + + out = np.ones(10, dtype=np.int_) + a.argmax(-1, out=out) + assert_equal(out, a.argmax(-1)) + + @pytest.mark.parametrize('ndim', [0, 1]) + def test_ret_is_out(self, ndim): + a = np.ones((4,) + (3,)*ndim) + out = np.empty((3,)*ndim, dtype=np.intp) + ret = a.argmax(axis=0, out=out) + assert ret is out + + def test_argmax_unicode(self): + d = np.zeros(6031, dtype='= cmin)) + assert_(np.all(x <= cmax)) + + def _clip_type(self, type_group, array_max, + clip_min, clip_max, inplace=False, + expected_min=None, expected_max=None): + if expected_min is None: + expected_min = clip_min + if expected_max is None: + expected_max = clip_max + + for T in np.sctypes[type_group]: + if sys.byteorder == 'little': + byte_orders = ['=', '>'] + else: + byte_orders = ['<', '='] + + for byteorder in byte_orders: + dtype = np.dtype(T).newbyteorder(byteorder) + + x = (np.random.random(1000) * array_max).astype(dtype) + if inplace: + # The tests that call us pass clip_min and clip_max that + # might not fit in the destination dtype. They were written + # assuming the previous unsafe casting, which now must be + # passed explicitly to avoid a warning. + x.clip(clip_min, clip_max, x, casting='unsafe') + else: + x = x.clip(clip_min, clip_max) + byteorder = '=' + + if x.dtype.byteorder == '|': + byteorder = '|' + assert_equal(x.dtype.byteorder, byteorder) + self._check_range(x, expected_min, expected_max) + return x + + def test_basic(self): + for inplace in [False, True]: + self._clip_type( + 'float', 1024, -12.8, 100.2, inplace=inplace) + self._clip_type( + 'float', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'int', 1024, -120, 100, inplace=inplace) + self._clip_type( + 'int', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'uint', 1024, 0, 0, inplace=inplace) + self._clip_type( + 'uint', 1024, -120, 100, inplace=inplace, expected_min=0) + + def test_record_array(self): + rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], + dtype=[('x', '= 3)) + x = val.clip(min=3) + assert_(np.all(x >= 3)) + x = val.clip(max=4) + assert_(np.all(x <= 4)) + + def test_nan(self): + input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) + result = input_arr.clip(-1, 1) + expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) + assert_array_equal(result, expected) + + +class TestCompress: + def test_axis(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = np.compress([0, 1, 0, 1, 0], arr, axis=1) + assert_equal(out, tgt) + + def test_truncate(self): + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=1) + assert_equal(out, tgt) + + def test_flatten(self): + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr) + assert_equal(out, 1) + + +class TestPutmask: + def tst_basic(self, x, T, mask, val): + np.putmask(x, mask, val) + assert_equal(x[mask], np.array(val, T)) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(1000)*100 + mask = x < 40 + + for val in [-100, 0, 15]: + for types in np.sctypes.values(): + for T in types: + if T not in unchecked_types: + self.tst_basic(x.copy().astype(T), T, mask, val) + + # Also test string of a length which uses an untypical length + dt = np.dtype("S3") + self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3]) + + def test_mask_size(self): + assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) + + @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', '= 2, 3) + + +class TestTake: + def tst_basic(self, x): + ind = list(range(x.shape[0])) + assert_array_equal(x.take(ind, axis=0), x) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(24)*100 + x.shape = 2, 3, 4 + for types in np.sctypes.values(): + for T in types: + if T not in unchecked_types: + self.tst_basic(x.copy().astype(T)) + + # Also test string of a length which uses an untypical length + self.tst_basic(x.astype("S3")) + + def test_raise(self): + x = np.random.random(24)*100 + x.shape = 2, 3, 4 + assert_raises(IndexError, x.take, [0, 1, 2], axis=0) + assert_raises(IndexError, x.take, [-3], axis=0) + assert_array_equal(x.take([-1], axis=0)[0], x[1]) + + def test_clip(self): + x = np.random.random(24)*100 + x.shape = 2, 3, 4 + assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) + assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) + + def test_wrap(self): + x = np.random.random(24)*100 + x.shape = 2, 3, 4 + assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) + assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) + assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) + + @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', ' 16MB + d = np.zeros(4 * 1024 ** 2) + d.tofile(tmp_filename) + assert_equal(os.path.getsize(tmp_filename), d.nbytes) + assert_array_equal(d, np.fromfile(tmp_filename)) + # check offset + with open(tmp_filename, "r+b") as f: + f.seek(d.nbytes) + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + # check append mode (gh-8329) + open(tmp_filename, "w").close() # delete file contents + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_array_equal(d, np.fromfile(tmp_filename)) + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + + def test_io_open_buffered_fromfile(self, x, tmp_filename): + # gh-6632 + x.tofile(tmp_filename) + with io.open(tmp_filename, 'rb', buffering=-1) as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_file_position_after_fromfile(self, tmp_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE//8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE*8] + + for size in sizes: + with open(tmp_filename, 'wb') as f: + f.seek(size-1) + f.write(b'\0') + + for mode in ['rb', 'r+b']: + err_msg = "%d %s" % (size, mode) + + with open(tmp_filename, mode) as f: + f.read(2) + np.fromfile(f, dtype=np.float64, count=1) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_file_position_after_tofile(self, tmp_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE//8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE*8] + + for size in sizes: + err_msg = "%d" % (size,) + + with open(tmp_filename, 'wb') as f: + f.seek(size-1) + f.write(b'\0') + f.seek(10) + f.write(b'12') + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) + + with open(tmp_filename, 'r+b') as f: + f.read(2) + f.seek(0, 1) # seek between read&write required by ANSI C + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_load_object_array_fromfile(self, tmp_filename): + # gh-12300 + with open(tmp_filename, 'w') as f: + # Ensure we have a file with consistent contents + pass + + with open(tmp_filename, 'rb') as f: + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, f, dtype=object) + + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, tmp_filename, dtype=object) + + def test_fromfile_offset(self, x, tmp_filename): + with open(tmp_filename, 'wb') as f: + x.tofile(f) + + with open(tmp_filename, 'rb') as f: + y = np.fromfile(f, dtype=x.dtype, offset=0) + assert_array_equal(y, x.flat) + + with open(tmp_filename, 'rb') as f: + count_items = len(x.flat) // 8 + offset_items = len(x.flat) // 4 + offset_bytes = x.dtype.itemsize * offset_items + y = np.fromfile( + f, dtype=x.dtype, count=count_items, offset=offset_bytes + ) + assert_array_equal( + y, x.flat[offset_items:offset_items+count_items] + ) + + # subsequent seeks should stack + offset_bytes = x.dtype.itemsize + z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes) + assert_array_equal(z, x.flat[offset_items+count_items+1:]) + + with open(tmp_filename, 'wb') as f: + x.tofile(f, sep=",") + + with open(tmp_filename, 'rb') as f: + assert_raises_regex( + TypeError, + "'offset' argument only permitted for binary files", + np.fromfile, tmp_filename, dtype=x.dtype, + sep=",", offset=1) + + @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") + def test_fromfile_bad_dup(self, x, tmp_filename): + def dup_str(fd): + return 'abc' + + def dup_bigint(fd): + return 2**68 + + old_dup = os.dup + try: + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + os.dup = dup + assert_raises(exc, np.fromfile, f) + finally: + os.dup = old_dup + + def _check_from(self, s, value, filename, **kw): + if 'sep' not in kw: + y = np.frombuffer(s, **kw) + else: + y = np.fromstring(s, **kw) + assert_array_equal(y, value) + + with open(filename, 'wb') as f: + f.write(s) + y = np.fromfile(filename, **kw) + assert_array_equal(y, value) + + @pytest.fixture(params=["period", "comma"]) + def decimal_sep_localization(self, request): + """ + Including this fixture in a test will automatically + execute it with both types of decimal separator. + + So:: + + def test_decimal(decimal_sep_localization): + pass + + is equivalent to the following two tests:: + + def test_decimal_period_separator(): + pass + + def test_decimal_comma_separator(): + with CommaDecimalPointLocale(): + pass + """ + if request.param == "period": + yield + elif request.param == "comma": + with CommaDecimalPointLocale(): + yield + else: + assert False, request.param + + def test_nan(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + tmp_filename, + sep=' ') + + def test_inf(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"inf +inf -inf infinity -Infinity iNfInItY -inF", + [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], + tmp_filename, + sep=' ') + + def test_numbers(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"1.234 -1.234 .3 .3e55 -123133.1231e+133", + [1.234, -1.234, .3, .3e55, -123133.1231e+133], + tmp_filename, + sep=' ') + + def test_binary(self, tmp_filename): + self._check_from( + b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', + np.array([1, 2, 3, 4]), + tmp_filename, + dtype=' 1 minute on mechanical hard drive + def test_big_binary(self): + """Test workarounds for 32-bit limited fwrite, fseek, and ftell + calls in windows. These normally would hang doing something like this. + See http://projects.scipy.org/numpy/ticket/1660""" + if sys.platform != 'win32': + return + try: + # before workarounds, only up to 2**32-1 worked + fourgbplus = 2**32 + 2**16 + testbytes = np.arange(8, dtype=np.int8) + n = len(testbytes) + flike = tempfile.NamedTemporaryFile() + f = flike.file + np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) + flike.seek(0) + a = np.fromfile(f, dtype=np.int8) + flike.close() + assert_(len(a) == fourgbplus) + # check only start and end for speed: + assert_((a[:n] == testbytes).all()) + assert_((a[-n:] == testbytes).all()) + except (MemoryError, ValueError): + pass + + def test_string(self, tmp_filename): + self._check_from(b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, sep=',') + + def test_counted_string(self, tmp_filename, decimal_sep_localization): + self._check_from( + b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=4, sep=',') + self._check_from( + b'1,2,3,4', [1., 2., 3.], tmp_filename, count=3, sep=',') + self._check_from( + b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=-1, sep=',') + + def test_string_with_ws(self, tmp_filename): + self._check_from( + b'1 2 3 4 ', [1, 2, 3, 4], tmp_filename, dtype=int, sep=' ') + + def test_counted_string_with_ws(self, tmp_filename): + self._check_from( + b'1 2 3 4 ', [1, 2, 3], tmp_filename, count=3, dtype=int, + sep=' ') + + def test_ascii(self, tmp_filename, decimal_sep_localization): + self._check_from( + b'1 , 2 , 3 , 4', [1., 2., 3., 4.], tmp_filename, sep=',') + self._check_from( + b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, dtype=float, sep=',') + + def test_malformed(self, tmp_filename, decimal_sep_localization): + with assert_warns(DeprecationWarning): + self._check_from( + b'1.234 1,234', [1.234, 1.], tmp_filename, sep=' ') + + def test_long_sep(self, tmp_filename): + self._check_from( + b'1_x_3_x_4_x_5', [1, 3, 4, 5], tmp_filename, sep='_x_') + + def test_dtype(self, tmp_filename): + v = np.array([1, 2, 3, 4], dtype=np.int_) + self._check_from(b'1,2,3,4', v, tmp_filename, sep=',', dtype=np.int_) + + def test_dtype_bool(self, tmp_filename): + # can't use _check_from because fromstring can't handle True/False + v = np.array([True, False, True, False], dtype=np.bool_) + s = b'1,0,-2.3,0' + with open(tmp_filename, 'wb') as f: + f.write(s) + y = np.fromfile(tmp_filename, sep=',', dtype=np.bool_) + assert_(y.dtype == '?') + assert_array_equal(y, v) + + def test_tofile_sep(self, tmp_filename, decimal_sep_localization): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + with open(tmp_filename, 'w') as f: + x.tofile(f, sep=',') + with open(tmp_filename, 'r') as f: + s = f.read() + #assert_equal(s, '1.51,2.0,3.51,4.0') + y = np.array([float(p) for p in s.split(',')]) + assert_array_equal(x,y) + + def test_tofile_format(self, tmp_filename, decimal_sep_localization): + x = np.array([1.51, 2, 3.51, 4], dtype=float) + with open(tmp_filename, 'w') as f: + x.tofile(f, sep=',', format='%.2f') + with open(tmp_filename, 'r') as f: + s = f.read() + assert_equal(s, '1.51,2.00,3.51,4.00') + + def test_tofile_cleanup(self, tmp_filename): + x = np.zeros((10), dtype=object) + with open(tmp_filename, 'wb') as f: + assert_raises(IOError, lambda: x.tofile(f, sep='')) + # Dup-ed file handle should be closed or remove will fail on Windows OS + os.remove(tmp_filename) + + # Also make sure that we close the Python handle + assert_raises(IOError, lambda: x.tofile(tmp_filename)) + os.remove(tmp_filename) + + def test_fromfile_subarray_binary(self, tmp_filename): + # Test subarray dtypes which are absorbed into the shape + x = np.arange(24, dtype="i4").reshape(2, 3, 4) + x.tofile(tmp_filename) + res = np.fromfile(tmp_filename, dtype="(3,4)i4") + assert_array_equal(x, res) + + x_str = x.tobytes() + with assert_warns(DeprecationWarning): + # binary fromstring is deprecated + res = np.fromstring(x_str, dtype="(3,4)i4") + assert_array_equal(x, res) + + def test_parsing_subarray_unsupported(self, tmp_filename): + # We currently do not support parsing subarray dtypes + data = "12,42,13," * 50 + with pytest.raises(ValueError): + expected = np.fromstring(data, dtype="(3,)i", sep=",") + + with open(tmp_filename, "w") as f: + f.write(data) + + with pytest.raises(ValueError): + np.fromfile(tmp_filename, dtype="(3,)i", sep=",") + + def test_read_shorter_than_count_subarray(self, tmp_filename): + # Test that requesting more values does not cause any problems + # in conjunction with subarray dimensions being absorbed into the + # array dimension. + expected = np.arange(511 * 10, dtype="i").reshape(-1, 10) + + binary = expected.tobytes() + with pytest.raises(ValueError): + with pytest.warns(DeprecationWarning): + np.fromstring(binary, dtype="(10,)i", count=10000) + + expected.tofile(tmp_filename) + res = np.fromfile(tmp_filename, dtype="(10,)i", count=10000) + assert_array_equal(res, expected) + + +class TestFromBuffer: + @pytest.mark.parametrize('byteorder', ['<', '>']) + @pytest.mark.parametrize('dtype', [float, int, complex]) + def test_basic(self, byteorder, dtype): + dt = np.dtype(dtype).newbyteorder(byteorder) + x = (np.random.random((4, 7)) * 5).astype(dt) + buf = x.tobytes() + assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) + + def test_empty(self): + assert_array_equal(np.frombuffer(b''), np.array([])) + + +class TestFlat: + def setup(self): + a0 = np.arange(20.0) + a = a0.reshape(4, 5) + a0.shape = (4, 5) + a.flags.writeable = False + self.a = a + self.b = a[::2, ::2] + self.a0 = a0 + self.b0 = a0[::2, ::2] + + def test_contiguous(self): + testpassed = False + try: + self.a.flat[12] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.a.flat[12] == 12.0) + + def test_discontiguous(self): + testpassed = False + try: + self.b.flat[4] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.b.flat[4] == 12.0) + + def test___array__(self): + c = self.a.flat.__array__() + d = self.b.flat.__array__() + e = self.a0.flat.__array__() + f = self.b0.flat.__array__() + + assert_(c.flags.writeable is False) + assert_(d.flags.writeable is False) + # for 1.14 all are set to non-writeable on the way to replacing the + # UPDATEIFCOPY array returned for non-contiguous arrays. + assert_(e.flags.writeable is True) + assert_(f.flags.writeable is False) + with assert_warns(DeprecationWarning): + assert_(c.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + assert_(d.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + assert_(e.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + # UPDATEIFCOPY is removed. + assert_(f.flags.updateifcopy is False) + assert_(c.flags.writebackifcopy is False) + assert_(d.flags.writebackifcopy is False) + assert_(e.flags.writebackifcopy is False) + assert_(f.flags.writebackifcopy is False) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount(self): + # includes regression test for reference count error gh-13165 + inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None] + indtype = np.dtype(np.intp) + rc_indtype = sys.getrefcount(indtype) + for ind in inds: + rc_ind = sys.getrefcount(ind) + for _ in range(100): + try: + self.a.flat[ind] + except IndexError: + pass + assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) + assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50) + + def test_index_getset(self): + it = np.arange(10).reshape(2, 1, 5).flat + with pytest.raises(AttributeError): + it.index = 10 + + for _ in it: + pass + # Check the value of `.index` is updated correctly (see also gh-19153) + # If the type was incorrect, this would show up on big-endian machines + assert it.index == it.base.size + + +class TestResize: + + @_no_tracing + def test_basic(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + if IS_PYPY: + x.resize((5, 5), refcheck=False) + else: + x.resize((5, 5)) + assert_array_equal(x.flat[:9], + np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) + assert_array_equal(x[9:].flat, 0) + + def test_check_reference(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + y = x + assert_raises(ValueError, x.resize, (5, 1)) + del y # avoid pyflakes unused variable warning. + + @_no_tracing + def test_int_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, refcheck=False) + else: + x.resize(3) + assert_array_equal(x, np.eye(3)[0,:]) + + def test_none_shape(self): + x = np.eye(3) + x.resize(None) + assert_array_equal(x, np.eye(3)) + x.resize() + assert_array_equal(x, np.eye(3)) + + def test_0d_shape(self): + # to it multiple times to test it does not break alloc cache gh-9216 + for i in range(10): + x = np.empty((1,)) + x.resize(()) + assert_equal(x.shape, ()) + assert_equal(x.size, 1) + x = np.empty(()) + x.resize((1,)) + assert_equal(x.shape, (1,)) + assert_equal(x.size, 1) + + def test_invalid_arguments(self): + assert_raises(TypeError, np.eye(3).resize, 'hi') + assert_raises(ValueError, np.eye(3).resize, -1) + assert_raises(TypeError, np.eye(3).resize, order=1) + assert_raises(TypeError, np.eye(3).resize, refcheck='hi') + + @_no_tracing + def test_freeform_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, 2, 1, refcheck=False) + else: + x.resize(3, 2, 1) + assert_(x.shape == (3, 2, 1)) + + @_no_tracing + def test_zeros_appended(self): + x = np.eye(3) + if IS_PYPY: + x.resize(2, 3, 3, refcheck=False) + else: + x.resize(2, 3, 3) + assert_array_equal(x[0], np.eye(3)) + assert_array_equal(x[1], np.zeros((3, 3))) + + @_no_tracing + def test_obj_obj(self): + # check memory is initialized on resize, gh-4857 + a = np.ones(10, dtype=[('k', object, 2)]) + if IS_PYPY: + a.resize(15, refcheck=False) + else: + a.resize(15,) + assert_equal(a.shape, (15,)) + assert_array_equal(a['k'][-5:], 0) + assert_array_equal(a['k'][:-5], 1) + + def test_empty_view(self): + # check that sizes containing a zero don't trigger a reallocate for + # already empty arrays + x = np.zeros((10, 0), int) + x_view = x[...] + x_view.resize((0, 10)) + x_view.resize((0, 100)) + + def test_check_weakref(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + xref = weakref.ref(x) + assert_raises(ValueError, x.resize, (5, 1)) + del xref # avoid pyflakes unused variable warning. + + +class TestRecord: + def test_field_rename(self): + dt = np.dtype([('f', float), ('i', int)]) + dt.names = ['p', 'q'] + assert_equal(dt.names, ['p', 'q']) + + def test_multiple_field_name_occurrence(self): + def test_dtype_init(): + np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) + + # Error raised when multiple fields have the same name + assert_raises(ValueError, test_dtype_init) + + def test_bytes_fields(self): + # Bytes are not allowed in field names and not recognized in titles + # on Py3 + assert_raises(TypeError, np.dtype, [(b'a', int)]) + assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) + + dt = np.dtype([((b'a', 'b'), int)]) + assert_raises(TypeError, dt.__getitem__, b'a') + + x = np.array([(1,), (2,), (3,)], dtype=dt) + assert_raises(IndexError, x.__getitem__, b'a') + + y = x[0] + assert_raises(IndexError, y.__getitem__, b'a') + + def test_multiple_field_name_unicode(self): + def test_dtype_unicode(): + np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) + + # Error raised when multiple fields have the same name(unicode included) + assert_raises(ValueError, test_dtype_unicode) + + def test_fromarrays_unicode(self): + # A single name string provided to fromarrays() is allowed to be unicode + # on both Python 2 and 3: + x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4') + assert_equal(x['a'][0], 0) + assert_equal(x['b'][0], 1) + + def test_unicode_order(self): + # Test that we can sort with order as a unicode field name in both Python 2 and + # 3: + name = u'b' + x = np.array([1, 3, 2], dtype=[(name, int)]) + x.sort(order=name) + assert_equal(x[u'b'], np.array([1, 2, 3])) + + def test_field_names(self): + # Test unicode and 8-bit / byte strings can be used + a = np.zeros((1,), dtype=[('f1', 'i4'), + ('f2', 'i4'), + ('f3', [('sf1', 'i4')])]) + # byte string indexing fails gracefully + assert_raises(IndexError, a.__setitem__, b'f1', 1) + assert_raises(IndexError, a.__getitem__, b'f1') + assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) + assert_raises(IndexError, a['f1'].__getitem__, b'sf1') + b = a.copy() + fn1 = str('f1') + b[fn1] = 1 + assert_equal(b[fn1], 1) + fnn = str('not at all') + assert_raises(ValueError, b.__setitem__, fnn, 1) + assert_raises(ValueError, b.__getitem__, fnn) + b[0][fn1] = 2 + assert_equal(b[fn1], 2) + # Subfield + assert_raises(ValueError, b[0].__setitem__, fnn, 1) + assert_raises(ValueError, b[0].__getitem__, fnn) + # Subfield + fn3 = str('f3') + sfn1 = str('sf1') + b[fn3][sfn1] = 1 + assert_equal(b[fn3][sfn1], 1) + assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) + assert_raises(ValueError, b[fn3].__getitem__, fnn) + # multiple subfields + fn2 = str('f2') + b[fn2] = 3 + + assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) + assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) + + # non-ascii unicode field indexing is well behaved + assert_raises(ValueError, a.__setitem__, u'\u03e0', 1) + assert_raises(ValueError, a.__getitem__, u'\u03e0') + + def test_record_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + a.flags.writeable = False + b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) + b.flags.writeable = False + c = np.array([(1, 2), (3, 4)], dtype='i1,i2') + c.flags.writeable = False + assert_(hash(a[0]) == hash(a[1])) + assert_(hash(a[0]) == hash(b[0])) + assert_(hash(a[0]) != hash(b[1])) + assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) + + def test_record_no_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + assert_raises(TypeError, hash, a[0]) + + def test_empty_structure_creation(self): + # make sure these do not raise errors (gh-5631) + np.array([()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + + def test_multifield_indexing_view(self): + a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')]) + v = a[['a', 'c']] + assert_(v.base is a) + assert_(v.dtype == np.dtype({'names': ['a', 'c'], + 'formats': ['i4', 'u4'], + 'offsets': [0, 8]})) + v[:] = (4,5) + assert_equal(a[0].item(), (4, 1, 5)) + +class TestView: + def test_basic(self): + x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], + dtype=[('r', np.int8), ('g', np.int8), + ('b', np.int8), ('a', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype=' 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + A = np.zeros((0, 3)) + for f in self.funcs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(A, axis=axis)).all()) + assert_(len(w) > 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(A, axis=axis), np.zeros([])) + + def test_mean_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * mat.shape[axis] + assert_almost_equal(res, tgt) + for axis in [None]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * np.prod(mat.shape) + assert_almost_equal(res, tgt) + + def test_mean_float16(self): + # This fail if the sum inside mean is done in float16 instead + # of float32. + assert_(_mean(np.ones(100000, dtype='float16')) == 1) + + def test_mean_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.core._exceptions.AxisError): + np.arange(10).mean(axis=2) + + def test_mean_where(self): + a = np.arange(16).reshape((4, 4)) + wh_full = np.array([[False, True, False, True], + [True, False, True, False], + [True, True, False, False], + [False, False, True, True]]) + wh_partial = np.array([[False], + [True], + [True], + [False]]) + _cases = [(1, True, [1.5, 5.5, 9.5, 13.5]), + (0, wh_full, [6., 5., 10., 9.]), + (1, wh_full, [2., 5., 8.5, 14.5]), + (0, wh_partial, [6., 7., 8., 9.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.mean(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.mean(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[1.5, 5.5], [9.5, 13.5]] + assert_allclose(a3d.mean(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.mean(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + with pytest.warns(RuntimeWarning) as w: + assert_allclose(a.mean(axis=1, where=wh_partial), + np.array([np.nan, 5.5, 9.5, np.nan])) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.mean(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.mean(a, where=False), np.nan) + + def test_var_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize(('complex_dtype', 'ndec'), ( + ('complex64', 6), + ('complex128', 7), + ('clongdouble', 7), + )) + def test_var_complex_values(self, complex_dtype, ndec): + # Test fast-paths for every builtin complex type + for axis in [0, 1, None]: + mat = self.cmat.copy().astype(complex_dtype) + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt, decimal=ndec) + + def test_var_dimensions(self): + # _var paths for complex number introduce additions on views that + # increase dimensions. Ensure this generalizes to higher dims + mat = np.stack([self.cmat]*3) + for axis in [0, 1, 2, -1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_var_complex_byteorder(self): + # Test that var fast-path does not cause failures for complex arrays + # with non-native byteorder + cmat = self.cmat.copy().astype('complex128') + cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) + assert_almost_equal(cmat.var(), cmat_swapped.var()) + + def test_var_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.core._exceptions.AxisError): + np.arange(10).var(axis=2) + + def test_var_where(self): + a = np.arange(25).reshape((5, 5)) + wh_full = np.array([[False, True, False, True, True], + [True, False, True, True, False], + [True, True, False, False, True], + [False, True, True, False, True], + [True, False, True, True, False]]) + wh_partial = np.array([[False], + [True], + [True], + [False], + [True]]) + _cases = [(0, True, [50., 50., 50., 50., 50.]), + (1, True, [2., 2., 2., 2., 2.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.var(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.var(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.25, 0.25], [0.25, 0.25]] + assert_allclose(a3d.var(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.var(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(np.var(a, axis=1, where=wh_full), + np.var(a[wh_full].reshape((5, 3)), axis=1)) + assert_allclose(np.var(a, axis=0, where=wh_partial), + np.var(a[wh_partial[:,0]], axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.var(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.var(a, where=False), np.nan) + + def test_std_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + tgt = np.sqrt(_var(mat, axis=axis)) + res = _std(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_std_where(self): + a = np.arange(25).reshape((5,5))[::-1] + whf = np.array([[False, True, False, True, True], + [True, False, True, False, True], + [True, True, False, True, False], + [True, False, True, True, False], + [False, True, False, True, True]]) + whp = np.array([[False], + [False], + [True], + [True], + [False]]) + _cases = [ + (0, True, 7.07106781*np.ones((5))), + (1, True, 1.41421356*np.ones((5))), + (0, whf, + np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])), + (0, whp, 2.5*np.ones((5))) + ] + for _ax, _wh, _res in _cases: + assert_allclose(a.std(axis=_ax, where=_wh), _res) + assert_allclose(np.std(a, axis=_ax, where=_wh), _res) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.5, 0.5], [0.5, 0.5]] + assert_allclose(a3d.std(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.std(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(a.std(axis=1, where=whf), + np.std(a[whf].reshape((5,3)), axis=1)) + assert_allclose(np.std(a, axis=1, where=whf), + (a[whf].reshape((5,3))).std(axis=1)) + assert_allclose(a.std(axis=0, where=whp), + np.std(a[whp[:,0]], axis=0)) + assert_allclose(np.std(a, axis=0, where=whp), + (a[whp[:,0]]).std(axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.std(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.std(a, where=False), np.nan) + + def test_subclass(self): + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + + def __array_finalize__(self, obj): + self.info = getattr(obj, "info", '') + + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + res = dat.mean(1) + assert_(res.info == dat.info) + res = dat.std(1) + assert_(res.info == dat.info) + res = dat.var(1) + assert_(res.info == dat.info) + + +class TestVdot: + def test_basic(self): + dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + dt_complex = np.typecodes['Complex'] + + # test real + a = np.eye(3) + for dt in dt_numeric + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test complex + a = np.eye(3) * 1j + for dt in dt_complex + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test boolean + b = np.eye(3, dtype=bool) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), True) + + def test_vdot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.vdot(a, a) + + # integer arrays are exact + assert_equal(np.vdot(a, b), res) + assert_equal(np.vdot(b, a), res) + assert_equal(np.vdot(b, b), res) + + def test_vdot_uncontiguous(self): + for size in [2, 1000]: + # Different sizes match different branches in vdot. + a = np.zeros((size, 2, 2)) + b = np.zeros((size, 2, 2)) + a[:, 0, 0] = np.arange(size) + b[:, 0, 0] = np.arange(size) + 1 + # Make a and b uncontiguous: + a = a[..., 0] + b = b[..., 0] + + assert_equal(np.vdot(a, b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy()), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy(), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy('F'), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy('F')), + np.vdot(a.flatten(), b.flatten())) + + +class TestDot: + def setup(self): + np.random.seed(128) + self.A = np.random.rand(4, 2) + self.b1 = np.random.rand(2, 1) + self.b2 = np.random.rand(2) + self.b3 = np.random.rand(1, 2) + self.b4 = np.random.rand(4) + self.N = 7 + + def test_dotmatmat(self): + A = self.A + res = np.dot(A.transpose(), A) + tgt = np.array([[1.45046013, 0.86323640], + [0.86323640, 0.84934569]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec(self): + A, b1 = self.A, self.b1 + res = np.dot(A, b1) + tgt = np.array([[0.32114320], [0.04889721], + [0.15696029], [0.33612621]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec2(self): + A, b2 = self.A, self.b2 + res = np.dot(A, b2) + tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat(self): + A, b4 = self.A, self.b4 + res = np.dot(b4, A) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat2(self): + b3, A = self.b3, self.A + res = np.dot(b3, A.transpose()) + tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat3(self): + A, b4 = self.A, self.b4 + res = np.dot(A.transpose(), b4) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecouter(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b1, b3) + tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecinner(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b3, b1) + tgt = np.array([[ 0.23129668]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect1(self): + b1 = np.ones((3, 1)) + b2 = [5.3] + res = np.dot(b1, b2) + tgt = np.array([5.3, 5.3, 5.3]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect2(self): + b1 = np.ones((3, 1)).transpose() + b2 = [6.2] + res = np.dot(b2, b1) + tgt = np.array([6.2, 6.2, 6.2]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar(self): + np.random.seed(100) + b1 = np.random.rand(1, 1) + b2 = np.random.rand(1, 4) + res = np.dot(b1, b2) + tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar2(self): + np.random.seed(100) + b1 = np.random.rand(4, 1) + b2 = np.random.rand(1, 1) + res = np.dot(b1, b2) + tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_all(self): + dims = [(), (1,), (1, 1)] + dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] + for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): + b1 = np.zeros(dim1) + b2 = np.zeros(dim2) + res = np.dot(b1, b2) + tgt = np.zeros(dim) + assert_(res.shape == tgt.shape) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_vecobject(self): + class Vec: + def __init__(self, sequence=None): + if sequence is None: + sequence = [] + self.array = np.array(sequence) + + def __add__(self, other): + out = Vec() + out.array = self.array + other.array + return out + + def __sub__(self, other): + out = Vec() + out.array = self.array - other.array + return out + + def __mul__(self, other): # with scalar + out = Vec(self.array.copy()) + out.array *= other + return out + + def __rmul__(self, other): + return self*other + + U_non_cont = np.transpose([[1., 1.], [1., 2.]]) + U_cont = np.ascontiguousarray(U_non_cont) + x = np.array([Vec([1., 0.]), Vec([0., 1.])]) + zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) + zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) + assert_equal(zeros[0].array, zeros_test[0].array) + assert_equal(zeros[1].array, zeros_test[1].array) + + def test_dot_2args(self): + from numpy.core.multiarray import dot + + a = np.array([[1, 2], [3, 4]], dtype=float) + b = np.array([[1, 0], [1, 1]], dtype=float) + c = np.array([[3, 2], [7, 4]], dtype=float) + + d = dot(a, b) + assert_allclose(c, d) + + def test_dot_3args(self): + from numpy.core.multiarray import dot + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 32)) + for i in range(12): + dot(f, v, r) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(r), 2) + r2 = dot(f, v, out=None) + assert_array_equal(r2, r) + assert_(r is dot(f, v, out=r)) + + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = dot(f, v) + assert_(r is dot(f, v, r)) + assert_array_equal(r2, r) + + def test_dot_3args_errors(self): + from numpy.core.multiarray import dot + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 31)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32, 1024)) + assert_raises(ValueError, dot, f, v, r) + assert_raises(ValueError, dot, f, v, r.T) + + r = np.empty((1024, 64)) + assert_raises(ValueError, dot, f, v, r[:, ::2]) + assert_raises(ValueError, dot, f, v, r[:, :32]) + + r = np.empty((1024, 32), dtype=np.float32) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024, 32), dtype=int) + assert_raises(ValueError, dot, f, v, r) + + def test_dot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.dot(a, a) + + # integer arrays are exact + assert_equal(np.dot(a, b), res) + assert_equal(np.dot(b, a), res) + assert_equal(np.dot(b, b), res) + + def test_accelerate_framework_sgemv_fix(self): + + def aligned_array(shape, align, dtype, order='C'): + d = dtype(0) + N = np.prod(shape) + tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) + address = tmp.__array_interface__["data"][0] + for offset in range(align): + if (address + offset) % align == 0: + break + tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) + return tmp.reshape(shape, order=order) + + def as_aligned(arr, align, dtype, order='C'): + aligned = aligned_array(arr.shape, align, dtype, order) + aligned[:] = arr[:] + return aligned + + def assert_dot_close(A, X, desired): + assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) + + m = aligned_array(100, 15, np.float32) + s = aligned_array((100, 100), 15, np.float32) + np.dot(s, m) # this will always segfault if the bug is present + + testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F')) + for align, m, n, a_order in testdata: + # Calculation in double precision + A_d = np.random.rand(m, n) + X_d = np.random.rand(n) + desired = np.dot(A_d, X_d) + # Calculation with aligned single precision + A_f = as_aligned(A_d, align, np.float32, order=a_order) + X_f = as_aligned(X_d, align, np.float32) + assert_dot_close(A_f, X_f, desired) + # Strided A rows + A_d_2 = A_d[::2] + desired = np.dot(A_d_2, X_d) + A_f_2 = A_f[::2] + assert_dot_close(A_f_2, X_f, desired) + # Strided A columns, strided X vector + A_d_22 = A_d_2[:, ::2] + X_d_2 = X_d[::2] + desired = np.dot(A_d_22, X_d_2) + A_f_22 = A_f_2[:, ::2] + X_f_2 = X_f[::2] + assert_dot_close(A_f_22, X_f_2, desired) + # Check the strides are as expected + if a_order == 'F': + assert_equal(A_f_22.strides, (8, 8 * m)) + else: + assert_equal(A_f_22.strides, (8 * n, 8)) + assert_equal(X_f_2.strides, (8,)) + # Strides in A rows + cols only + X_f_2c = as_aligned(X_f_2, align, np.float32) + assert_dot_close(A_f_22, X_f_2c, desired) + # Strides just in A cols + A_d_12 = A_d[:, ::2] + desired = np.dot(A_d_12, X_d_2) + A_f_12 = A_f[:, ::2] + assert_dot_close(A_f_12, X_f_2c, desired) + # Strides in A cols and X + assert_dot_close(A_f_12, X_f_2, desired) + + + +class MatmulCommon: + """Common tests for '@' operator and numpy.matmul. + + """ + # Should work with these types. Will want to add + # "O" at some point + types = "?bhilqBHILQefdgFDGO" + + def test_exceptions(self): + dims = [ + ((1,), (2,)), # mismatched vector vector + ((2, 1,), (2,)), # mismatched matrix vector + ((2,), (1, 2)), # mismatched vector matrix + ((1, 2), (3, 1)), # mismatched matrix matrix + ((1,), ()), # vector scalar + ((), (1)), # scalar vector + ((1, 1), ()), # matrix scalar + ((), (1, 1)), # scalar matrix + ((2, 2, 1), (3, 1, 2)), # cannot broadcast + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + assert_raises(ValueError, self.matmul, a, b) + + def test_shapes(self): + dims = [ + ((1, 1), (2, 1, 1)), # broadcast first argument + ((2, 1, 1), (1, 1)), # broadcast second argument + ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + res = self.matmul(a, b) + assert_(res.shape == (2, 1, 1)) + + # vector vector returns scalars. + for dt in self.types: + a = np.ones((2,), dtype=dt) + b = np.ones((2,), dtype=dt) + c = self.matmul(a, b) + assert_(np.array(c).shape == ()) + + def test_result_types(self): + mat = np.ones((1,1)) + vec = np.ones((1,)) + for dt in self.types: + m = mat.astype(dt) + v = vec.astype(dt) + for arg in [(m, v), (v, m), (m, m)]: + res = self.matmul(*arg) + assert_(res.dtype == dt) + + # vector vector returns scalars + if dt != "O": + res = self.matmul(v, v) + assert_(type(res) is np.dtype(dt).type) + + def test_scalar_output(self): + vec1 = np.array([2]) + vec2 = np.array([3, 4]).reshape(1, -1) + tgt = np.array([6, 8]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt) + res = self.matmul(v2.T, v1) + assert_equal(res, tgt) + + # boolean type + vec = np.array([True, True], dtype='?').reshape(1, -1) + res = self.matmul(vec[:, 0], vec) + assert_equal(res, True) + + def test_vector_vector_values(self): + vec1 = np.array([1, 2]) + vec2 = np.array([3, 4]).reshape(-1, 1) + tgt1 = np.array([11]) + tgt2 = np.array([[3, 6], [4, 8]]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt1) + # no broadcast, we must make v1 into a 2d ndarray + res = self.matmul(v2, v1.reshape(1, -1)) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, True], dtype='?') + res = self.matmul(vec, vec) + assert_equal(res, True) + + def test_vector_matrix_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([7, 10]) + tgt2 = np.stack([tgt1]*2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(v, m1) + assert_equal(res, tgt1) + res = self.matmul(v, m2) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1]*2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_vector_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([5, 11]) + tgt2 = np.stack([tgt1]*2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(m1, v) + assert_equal(res, tgt1) + res = self.matmul(m2, v) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1]*2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1]*2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_matrix_values(self): + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.array([[1, 0], [1, 1]]) + mat12 = np.stack([mat1, mat2], axis=0) + mat21 = np.stack([mat2, mat1], axis=0) + tgt11 = np.array([[7, 10], [15, 22]]) + tgt12 = np.array([[3, 2], [7, 4]]) + tgt21 = np.array([[1, 2], [4, 6]]) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + for dt in self.types[1:]: + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + m12 = mat12.astype(dt) + m21 = mat21.astype(dt) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + # boolean type + m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_) + m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_) + m12 = np.stack([m1, m2], axis=0) + m21 = np.stack([m2, m1], axis=0) + tgt11 = m1 + tgt12 = m1 + tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + +class TestMatmul(MatmulCommon): + matmul = np.matmul + + def test_out_arg(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + tgt = np.dot(a, b) + + # test as positional argument + msg = "out positional argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out) + assert_array_equal(out, tgt, err_msg=msg) + + # test as keyword argument + msg = "out keyword argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out=out) + assert_array_equal(out, tgt, err_msg=msg) + + # test out with not allowed type cast (safe casting) + msg = "Cannot cast ufunc .* output" + out = np.zeros((5, 2), dtype=np.int32) + assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out) + + # test out with type upcast to complex + out = np.zeros((5, 2), dtype=np.complex128) + c = self.matmul(a, b, out=out) + assert_(c is out) + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning, '') + c = c.astype(tgt.dtype) + assert_array_equal(c, tgt) + + def test_empty_out(self): + # Check that the output cannot be broadcast, so that it cannot be + # size zero when the outer dimensions (iterator size) has size zero. + arr = np.ones((0, 1, 1)) + out = np.ones((1, 1, 1)) + assert self.matmul(arr, arr).shape == (0, 1, 1) + + with pytest.raises(ValueError, match=r"non-broadcastable"): + self.matmul(arr, arr, out=out) + + def test_out_contiguous(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + v = np.array([1, 3], dtype=float) + tgt = np.dot(a, b) + tgt_mv = np.dot(a, v) + + # test out non-contiguous + out = np.ones((5, 2, 2), dtype=float) + c = self.matmul(a, b, out=out[..., 0]) + assert c.base is out + assert_array_equal(c, tgt) + c = self.matmul(a, v, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + c = self.matmul(v, a.T, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + + # test out contiguous in only last dim + out = np.ones((10, 2), dtype=float) + c = self.matmul(a, b, out=out[::2, :]) + assert_array_equal(c, tgt) + + # test transposes of out, args + out = np.ones((5, 2), dtype=float) + c = self.matmul(b.T, a.T, out=out.T) + assert_array_equal(out, tgt) + + m1 = np.arange(15.).reshape(5, 3) + m2 = np.arange(21.).reshape(3, 7) + m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous + vc = np.arange(10.) + vr = np.arange(6.) + m0 = np.zeros((3, 0)) + @pytest.mark.parametrize('args', ( + # matrix-matrix + (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), + # matrix-matrix-transpose, contiguous and non + (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T), + (m3, m3.T), (m3.T, m3), + # matrix-matrix non-contiguous + (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T), + # vector-matrix, matrix-vector, contiguous + (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T), + # vector-matrix, matrix-vector, vector non-contiguous + (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T), + # vector-matrix, matrix-vector, matrix non-contiguous + (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T), + # vector-matrix, matrix-vector, both non-contiguous + (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T), + # size == 0 + (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T), + )) + def test_dot_equivalent(self, args): + r1 = np.matmul(*args) + r2 = np.dot(*args) + assert_equal(r1, r2) + + r3 = np.matmul(args[0].copy(), args[1].copy()) + assert_equal(r1, r3) + + def test_matmul_object(self): + import fractions + + f = np.vectorize(fractions.Fraction) + def random_ints(): + return np.random.randint(1, 1000, size=(10, 3, 3)) + M1 = f(random_ints(), random_ints()) + M2 = f(random_ints(), random_ints()) + + M3 = self.matmul(M1, M2) + + [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]] + + assert_allclose(N3, self.matmul(N1, N2)) + + def test_matmul_object_type_scalar(self): + from fractions import Fraction as F + v = np.array([F(2,3), F(5,7)]) + res = self.matmul(v, v) + assert_(type(res) is F) + + def test_matmul_empty(self): + a = np.empty((3, 0), dtype=object) + b = np.empty((0, 3), dtype=object) + c = np.zeros((3, 3)) + assert_array_equal(np.matmul(a, b), c) + + def test_matmul_exception_multiply(self): + # test that matmul fails if `__mul__` is missing + class add_not_multiply(): + def __add__(self, other): + return self + a = np.full((3,3), add_not_multiply()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_exception_add(self): + # test that matmul fails if `__add__` is missing + class multiply_not_add(): + def __mul__(self, other): + return self + a = np.full((3,3), multiply_not_add()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_bool(self): + # gh-14439 + a = np.array([[1, 0],[1, 1]], dtype=bool) + assert np.max(a.view(np.uint8)) == 1 + b = np.matmul(a, a) + # matmul with boolean output should always be 0, 1 + assert np.max(b.view(np.uint8)) == 1 + + rg = np.random.default_rng(np.random.PCG64(43)) + d = rg.integers(2, size=4*5, dtype=np.int8) + d = d.reshape(4, 5) > 0 + out1 = np.matmul(d, d.reshape(5, 4)) + out2 = np.dot(d, d.reshape(5, 4)) + assert_equal(out1, out2) + + c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) + assert not np.any(c) + + +class TestMatmulOperator(MatmulCommon): + import operator + matmul = operator.matmul + + def test_array_priority_override(self): + + class A: + __array_priority__ = 1000 + + def __matmul__(self, other): + return "A" + + def __rmatmul__(self, other): + return "A" + + a = A() + b = np.ones(2) + assert_equal(self.matmul(a, b), "A") + assert_equal(self.matmul(b, a), "A") + + def test_matmul_raises(self): + assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) + assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) + assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc')) + +def test_matmul_inplace(): + # It would be nice to support in-place matmul eventually, but for now + # we don't have a working implementation, so better just to error out + # and nudge people to writing "a = a @ b". + a = np.eye(3) + b = np.eye(3) + assert_raises(TypeError, a.__imatmul__, b) + import operator + assert_raises(TypeError, operator.imatmul, a, b) + assert_raises(TypeError, exec, "a @= b", globals(), locals()) + +def test_matmul_axes(): + a = np.arange(3*4*5).reshape(3, 4, 5) + c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) + assert c.shape == (3, 4, 4) + d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) + assert d.shape == (4, 4, 3) + e = np.swapaxes(d, 0, 2) + assert_array_equal(e, c) + f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) + assert f.shape == (4, 5) + + +class TestInner: + + def test_inner_type_mismatch(self): + c = 1. + A = np.array((1,1), dtype='i,i') + + assert_raises(TypeError, np.inner, c, A) + assert_raises(TypeError, np.inner, A, c) + + def test_inner_scalar_and_vector(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + vec = np.array([1, 2], dtype=dt) + desired = np.array([3, 6], dtype=dt) + assert_equal(np.inner(vec, sca), desired) + assert_equal(np.inner(sca, vec), desired) + + def test_vecself(self): + # Ticket 844. + # Inner product of a vector with itself segfaults or give + # meaningless result + a = np.zeros(shape=(1, 80), dtype=np.float64) + p = np.inner(a, a) + assert_almost_equal(p, 0, decimal=14) + + def test_inner_product_with_various_contiguities(self): + # github issue 6532 + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + # check an inner product involving a matrix transpose + A = np.array([[1, 2], [3, 4]], dtype=dt) + B = np.array([[1, 3], [2, 4]], dtype=dt) + C = np.array([1, 1], dtype=dt) + desired = np.array([4, 6], dtype=dt) + assert_equal(np.inner(A.T, C), desired) + assert_equal(np.inner(C, A.T), desired) + assert_equal(np.inner(B, C), desired) + assert_equal(np.inner(C, B), desired) + # check a matrix product + desired = np.array([[7, 10], [15, 22]], dtype=dt) + assert_equal(np.inner(A, B), desired) + # check the syrk vs. gemm paths + desired = np.array([[5, 11], [11, 25]], dtype=dt) + assert_equal(np.inner(A, A), desired) + assert_equal(np.inner(A, A.copy()), desired) + # check an inner product involving an aliased and reversed view + a = np.arange(5).astype(dt) + b = a[::-1] + desired = np.array(10, dtype=dt).item() + assert_equal(np.inner(b, a), desired) + + def test_3d_tensor(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + a = np.arange(24).reshape(2,3,4).astype(dt) + b = np.arange(24, 48).reshape(2,3,4).astype(dt) + desired = np.array( + [[[[ 158, 182, 206], + [ 230, 254, 278]], + + [[ 566, 654, 742], + [ 830, 918, 1006]], + + [[ 974, 1126, 1278], + [1430, 1582, 1734]]], + + [[[1382, 1598, 1814], + [2030, 2246, 2462]], + + [[1790, 2070, 2350], + [2630, 2910, 3190]], + + [[2198, 2542, 2886], + [3230, 3574, 3918]]]], + dtype=dt + ) + assert_equal(np.inner(a, b), desired) + assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) + + +class TestAlen: + def test_basic(self): + with pytest.warns(DeprecationWarning): + m = np.array([1, 2, 3]) + assert_equal(np.alen(m), 3) + + m = np.array([[1, 2, 3], [4, 5, 7]]) + assert_equal(np.alen(m), 2) + + m = [1, 2, 3] + assert_equal(np.alen(m), 3) + + m = [[1, 2, 3], [4, 5, 7]] + assert_equal(np.alen(m), 2) + + def test_singleton(self): + with pytest.warns(DeprecationWarning): + assert_equal(np.alen(5), 1) + + +class TestChoose: + def setup(self): + self.x = 2*np.ones((3,), dtype=int) + self.y = 3*np.ones((3,), dtype=int) + self.x2 = 2*np.ones((2, 3), dtype=int) + self.y2 = 3*np.ones((2, 3), dtype=int) + self.ind = [0, 0, 1] + + def test_basic(self): + A = np.choose(self.ind, (self.x, self.y)) + assert_equal(A, [2, 2, 3]) + + def test_broadcast1(self): + A = np.choose(self.ind, (self.x2, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + def test_broadcast2(self): + A = np.choose(self.ind, (self.x, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + @pytest.mark.parametrize("ops", + [(1000, np.array([1], dtype=np.uint8)), + (-1, np.array([1], dtype=np.uint8)), + (1., np.float32(3)), + (1., np.array([3], dtype=np.float32))],) + def test_output_dtype(self, ops): + expected_dt = np.result_type(*ops) + assert(np.choose([0], ops).dtype == expected_dt) + + +class TestRepeat: + def setup(self): + self.m = np.array([1, 2, 3, 4, 5, 6]) + self.m_rect = self.m.reshape((2, 3)) + + def test_basic(self): + A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + def test_broadcast1(self): + A = np.repeat(self.m, 2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + def test_axis_spec(self): + A = np.repeat(self.m_rect, [2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + def test_broadcast2(self): + A = np.repeat(self.m_rect, 2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, 2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + +# TODO: test for multidimensional +NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} + + +@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object']) +class TestNeighborhoodIter: + # Simple, 2d tests + def test_simple2d(self, dt): + # Test zero and one padding for simple data type + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), + np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), + np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), + np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), + np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), + np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), + np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) + assert_array_equal(l, r) + + def test_mirror2d(self, dt): + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), + np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Simple, 1d tests + def test_simple(self, dt): + # Test padding with constant values + x = np.linspace(1, 5, 5).astype(dt) + r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[4], NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test mirror modes + def test_mirror(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[1], NEIGH_MODE['mirror']) + assert_([i.dtype == dt for i in l]) + assert_array_equal(l, r) + + # Circular mode + def test_circular(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + +# Test stacking neighborhood iterators +class TestStackedNeighborhoodIter: + # Simple, 1d test: stacking 2 constant-padded neigh iterators + def test_simple_const(self): + dt = np.float64 + # Test zero and one padding for simple data type + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0], dtype=dt), + np.array([0], dtype=dt), + np.array([1], dtype=dt), + np.array([2], dtype=dt), + np.array([3], dtype=dt), + np.array([0], dtype=dt), + np.array([0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([1, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) + assert_array_equal(l, r) + + # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # mirror padding + def test_simple_mirror(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 1], dtype=dt), + np.array([1, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 3], dtype=dt), + np.array([3, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # circular padding + def test_simple_circular(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 3, 1], dtype=dt), + np.array([3, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 1], dtype=dt), + np.array([3, 1, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator + # being strictly within the array + def test_simple_strict_within(self): + dt = np.float64 + # Stacking zero on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + +class TestWarnings: + + def test_complex_warning(self): + x = np.array([1, 2]) + y = np.array([1-2j, 1+2j]) + + with warnings.catch_warnings(): + warnings.simplefilter("error", np.ComplexWarning) + assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) + assert_equal(x, [1, 2]) + + +class TestMinScalarType: + + def test_usigned_shortshort(self): + dt = np.min_scalar_type(2**8-1) + wanted = np.dtype('uint8') + assert_equal(wanted, dt) + + def test_usigned_short(self): + dt = np.min_scalar_type(2**16-1) + wanted = np.dtype('uint16') + assert_equal(wanted, dt) + + def test_usigned_int(self): + dt = np.min_scalar_type(2**32-1) + wanted = np.dtype('uint32') + assert_equal(wanted, dt) + + def test_usigned_longlong(self): + dt = np.min_scalar_type(2**63-1) + wanted = np.dtype('uint64') + assert_equal(wanted, dt) + + def test_object(self): + dt = np.min_scalar_type(2**64) + wanted = np.dtype('O') + assert_equal(wanted, dt) + + +from numpy.core._internal import _dtype_from_pep3118 + + +class TestPEP3118Dtype: + def _check(self, spec, wanted): + dt = np.dtype(wanted) + actual = _dtype_from_pep3118(spec) + assert_equal(actual, dt, + err_msg="spec %r != dtype %r" % (spec, wanted)) + + def test_native_padding(self): + align = np.dtype('i').alignment + for j in range(8): + if j == 0: + s = 'bi' + else: + s = 'b%dxi' % j + self._check('@'+s, {'f0': ('i1', 0), + 'f1': ('i', align*(1 + j//align))}) + self._check('='+s, {'f0': ('i1', 0), + 'f1': ('i', 1+j)}) + + def test_native_padding_2(self): + # Native padding should work also for structs and sub-arrays + self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) + self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) + + def test_trailing_padding(self): + # Trailing padding should be included, *and*, the item size + # should match the alignment if in aligned mode + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return align*(1 + (n-1)//align) + + base = dict(formats=['i'], names=['f0']) + + self._check('ix', dict(itemsize=aligned(size + 1), **base)) + self._check('ixx', dict(itemsize=aligned(size + 2), **base)) + self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) + self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) + self._check('i7x', dict(itemsize=aligned(size + 7), **base)) + + self._check('^ix', dict(itemsize=size + 1, **base)) + self._check('^ixx', dict(itemsize=size + 2, **base)) + self._check('^ixxx', dict(itemsize=size + 3, **base)) + self._check('^ixxxx', dict(itemsize=size + 4, **base)) + self._check('^i7x', dict(itemsize=size + 7, **base)) + + def test_native_padding_3(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), + ('sub', np.dtype('b,i')), ('c', 'i')], + align=True) + self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) + + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), + ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) + self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) + + def test_padding_with_array_inside_struct(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), + ('d', 'i')], + align=True) + self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) + + def test_byteorder_inside_struct(self): + # The byte order after @T{=i} should be '=', not '@'. + # Check this by noting the absence of native alignment. + self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), + 'f1': ('i', 5)}) + + def test_intra_padding(self): + # Natively aligned sub-arrays may require some internal padding + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return (align*(1 + (n-1)//align)) + + self._check('(3)T{ix}', (dict( + names=['f0'], + formats=['i'], + offsets=[0], + itemsize=aligned(size + 1) + ), (3,))) + + def test_char_vs_string(self): + dt = np.dtype('c') + self._check('c', dt) + + dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) + self._check('4c4s', dt) + + def test_field_order(self): + # gh-9053 - previously, we relied on dictionary key order + self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) + self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) + + def test_unnamed_fields(self): + self._check('ii', [('f0', 'i'), ('f1', 'i')]) + self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) + + self._check('i', 'i') + self._check('i:f0:', [('f0', 'i')]) + + +class TestNewBufferProtocol: + """ Test PEP3118 buffers """ + + def _check_roundtrip(self, obj): + obj = np.asarray(obj) + x = memoryview(obj) + y = np.asarray(x) + y2 = np.array(x) + assert_(not y.flags.owndata) + assert_(y2.flags.owndata) + + assert_equal(y.dtype, obj.dtype) + assert_equal(y.shape, obj.shape) + assert_array_equal(obj, y) + + assert_equal(y2.dtype, obj.dtype) + assert_equal(y2.shape, obj.shape) + assert_array_equal(obj, y2) + + def test_roundtrip(self): + x = np.array([1, 2, 3, 4, 5], dtype='i4') + self._check_roundtrip(x) + + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + self._check_roundtrip(x) + + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + self._check_roundtrip(x) + + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b'xxx', True, 1.0)], + dtype=dt) + self._check_roundtrip(x) + + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='>i2') + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='') + x = np.zeros(4, dtype=dt) + self._check_roundtrip(x) + + def test_roundtrip_scalar(self): + # Issue #4015. + self._check_roundtrip(0) + + def test_invalid_buffer_format(self): + # datetime64 cannot be used fully in a buffer yet + # Should be fixed in the next Numpy major release + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(3, dt) + assert_raises((ValueError, BufferError), memoryview, a) + assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) + + def test_export_simple_1d(self): + x = np.array([1, 2, 3, 4, 5], dtype='i') + y = memoryview(x) + assert_equal(y.format, 'i') + assert_equal(y.shape, (5,)) + assert_equal(y.ndim, 1) + assert_equal(y.strides, (4,)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_simple_nd(self): + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + y = memoryview(x) + assert_equal(y.format, 'd') + assert_equal(y.shape, (2, 2)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (16, 8)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 8) + + def test_export_discontiguous(self): + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] + y = memoryview(x) + assert_equal(y.format, 'f') + assert_equal(y.shape, (3, 3)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (36, 4)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_record(self): + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b' ', True, 1.0)], + dtype=dt) + y = memoryview(x) + assert_equal(y.shape, (1,)) + assert_equal(y.ndim, 1) + assert_equal(y.suboffsets, ()) + + sz = sum([np.dtype(b).itemsize for a, b in dt]) + if np.dtype('l').itemsize == 4: + assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + else: + assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides + if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): + assert_equal(y.strides, (sz,)) + assert_equal(y.itemsize, sz) + + def test_export_subarray(self): + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) + y = memoryview(x) + assert_equal(y.format, 'T{(2,2)i:a:}') + assert_equal(y.shape, ()) + assert_equal(y.ndim, 0) + assert_equal(y.strides, ()) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 16) + + def test_export_endian(self): + x = np.array([1, 2, 3], dtype='>i') + y = memoryview(x) + if sys.byteorder == 'little': + assert_equal(y.format, '>i') + else: + assert_equal(y.format, 'i') + + x = np.array([1, 2, 3], dtype=' np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + # Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) + + # Unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + # Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), + "type %s and %s failed" % (dt1, dt2)) + + def test_to_bool_scalar(self): + assert_equal(bool(np.array([False])), False) + assert_equal(bool(np.array([True])), True) + assert_equal(bool(np.array([[42]])), True) + assert_raises(ValueError, bool, np.array([1, 2])) + + class NotConvertible: + def __bool__(self): + raise NotImplementedError + + assert_raises(NotImplementedError, bool, np.array(NotConvertible())) + assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) + + self_containing = np.array([None]) + self_containing[0] = self_containing + try: + Error = RecursionError + except NameError: + Error = RuntimeError # python < 3.5 + assert_raises(Error, bool, self_containing) # previously stack overflow + self_containing[0] = None # resolve circular reference + + def test_to_int_scalar(self): + # gh-9972 means that these aren't always the same + int_funcs = (int, lambda x: x.__int__()) + for int_func in int_funcs: + assert_equal(int_func(np.array(0)), 0) + assert_equal(int_func(np.array([1])), 1) + assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1, 2])) + + # gh-9972 + assert_equal(4, int_func(np.array('4'))) + assert_equal(5, int_func(np.bytes_(b'5'))) + assert_equal(6, int_func(np.unicode_(u'6'))) + + class HasTrunc: + def __trunc__(self): + return 3 + assert_equal(3, int_func(np.array(HasTrunc()))) + assert_equal(3, int_func(np.array([HasTrunc()]))) + + class NotConvertible: + def __int__(self): + raise NotImplementedError + assert_raises(NotImplementedError, + int_func, np.array(NotConvertible())) + assert_raises(NotImplementedError, + int_func, np.array([NotConvertible()])) + + +class TestWhere: + def test_basic(self): + dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, + np.longdouble, np.clongdouble] + for dt in dts: + c = np.ones(53, dtype=bool) + assert_equal(np.where( c, dt(0), dt(1)), dt(0)) + assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) + assert_equal(np.where(True, dt(0), dt(1)), dt(0)) + assert_equal(np.where(False, dt(0), dt(1)), dt(1)) + d = np.ones_like(c).astype(dt) + e = np.zeros_like(d) + r = d.astype(dt) + c[7] = False + r[7] = e[7] + assert_equal(np.where(c, e, e), e) + assert_equal(np.where(c, d, e), r) + assert_equal(np.where(c, d, e[0]), r) + assert_equal(np.where(c, d[0], e), r) + assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) + assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) + assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) + assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) + assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) + assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) + assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) + + def test_exotic(self): + # object + assert_array_equal(np.where(True, None, None), np.array(None)) + # zero sized + m = np.array([], dtype=bool).reshape(0, 3) + b = np.array([], dtype=np.float64).reshape(0, 3) + assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) + + # object cast + d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, + 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, + 1.267, 0.229, -1.39, 0.487]) + nan = float('NaN') + e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, + 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], + dtype=object) + m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, + 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) + + r = e[:] + r[np.where(m)] = d[np.where(m)] + assert_array_equal(np.where(m, d, e), r) + + r = e[:] + r[np.where(~m)] = d[np.where(~m)] + assert_array_equal(np.where(m, e, d), r) + + assert_array_equal(np.where(m, e, e), e) + + # minimal dtype result with NaN scalar (e.g required by pandas) + d = np.array([1., 2.], dtype=np.float32) + e = float('NaN') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('-Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + # also check upcast + e = float(1e150) + assert_equal(np.where(True, d, e).dtype, np.float64) + + def test_ndim(self): + c = [True, False] + a = np.zeros((2, 25)) + b = np.ones((2, 25)) + r = np.where(np.array(c)[:,np.newaxis], a, b) + assert_array_equal(r[0], a[0]) + assert_array_equal(r[1], b[0]) + + a = a.T + b = b.T + r = np.where(c, a, b) + assert_array_equal(r[:,0], a[:,0]) + assert_array_equal(r[:,1], b[:,0]) + + def test_dtype_mix(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + a = np.uint32(1) + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + a = a.astype(np.float32) + b = b.astype(np.int64) + assert_equal(np.where(c, a, b), r) + + # non bool mask + c = c.astype(int) + c[c != 0] = 34242324 + assert_equal(np.where(c, a, b), r) + # invert + tmpmask = c != 0 + c[c == 0] = 41247212 + c[tmpmask] = 0 + assert_equal(np.where(c, b, a), r) + + def test_foreign(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + a = np.ones(1, dtype='>i4') + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + b = b.astype('>f8') + assert_equal(np.where(c, a, b), r) + + a = a.astype('i4') + assert_equal(np.where(c, a, b), r) + + def test_error(self): + c = [True, True] + a = np.ones((4, 5)) + b = np.ones((5, 5)) + assert_raises(ValueError, np.where, c, a, a) + assert_raises(ValueError, np.where, c[0], a, b) + + def test_string(self): + # gh-4778 check strings are properly filled with nulls + a = np.array("abc") + b = np.array("x" * 753) + assert_equal(np.where(True, a, b), "abc") + assert_equal(np.where(False, b, a), "abc") + + # check native datatype sized strings + a = np.array("abcd") + b = np.array("x" * 8) + assert_equal(np.where(True, a, b), "abcd") + assert_equal(np.where(False, b, a), "abcd") + + def test_empty_result(self): + # pass empty where result through an assignment which reads the data of + # empty arrays, error detectable with valgrind, see gh-8922 + x = np.zeros((1, 1)) + ibad = np.vstack(np.where(x == 99.)) + assert_array_equal(ibad, + np.atleast_2d(np.array([[],[]], dtype=np.intp))) + + def test_largedim(self): + # invalid read regression gh-9304 + shape = [10, 2, 3, 4, 5, 6] + np.random.seed(2) + array = np.random.rand(*shape) + + for i in range(10): + benchmark = array.nonzero() + result = array.nonzero() + assert_array_equal(benchmark, result) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) + + def check_array(self, dtype): + elem_size = dtype(0).itemsize + + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) + + def test_array_int32(self): + self.check_array(np.int32) + + def test_array_int64(self): + self.check_array(np.int64) + + def test_array_float32(self): + self.check_array(np.float32) + + def test_array_float64(self): + self.check_array(np.float64) + + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + + @_no_tracing + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestHashing: + + def test_arrays_not_hashable(self): + x = np.ones(3) + assert_raises(TypeError, hash, x) + + def test_collections_hashable(self): + x = np.array([]) + assert_(not isinstance(x, collections.abc.Hashable)) + + +class TestArrayPriority: + # This will go away when __array_priority__ is settled, meanwhile + # it serves to check unintended changes. + op = operator + binary_ops = [ + op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, + op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, + op.ge, op.lt, op.le, op.ne, op.eq + ] + + class Foo(np.ndarray): + __array_priority__ = 100. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Bar(np.ndarray): + __array_priority__ = 101. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Other: + __array_priority__ = 1000. + + def _all(self, other): + return self.__class__() + + __add__ = __radd__ = _all + __sub__ = __rsub__ = _all + __mul__ = __rmul__ = _all + __pow__ = __rpow__ = _all + __div__ = __rdiv__ = _all + __mod__ = __rmod__ = _all + __truediv__ = __rtruediv__ = _all + __floordiv__ = __rfloordiv__ = _all + __and__ = __rand__ = _all + __xor__ = __rxor__ = _all + __or__ = __ror__ = _all + __lshift__ = __rlshift__ = _all + __rshift__ = __rrshift__ = _all + __eq__ = _all + __ne__ = _all + __gt__ = _all + __ge__ = _all + __lt__ = _all + __le__ = _all + + def test_ndarray_subclass(self): + a = np.array([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_ndarray_other(self): + a = np.array([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + def test_subclass_subclass(self): + a = self.Foo([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_subclass_other(self): + a = self.Foo([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + +class TestBytestringArrayNonzero: + + def test_empty_bstring_array_is_falsey(self): + assert_(not np.array([''], dtype=str)) + + def test_whitespace_bstring_array_is_falsey(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0\0' + assert_(not a) + + def test_all_null_bstring_array_is_falsey(self): + a = np.array(['spam'], dtype=str) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_bstring_array_is_truthy(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0 \0' + assert_(a) + + +class TestUnicodeEncoding: + """ + Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping + issues, etc + """ + def test_round_trip(self): + """ Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """ + # gh-15363 + arr = np.zeros(shape=(), dtype="U1") + for i in range(1, sys.maxunicode + 1): + expected = chr(i) + arr[()] = expected + assert arr[()] == expected + assert arr.item() == expected + + def test_assign_scalar(self): + # gh-3258 + l = np.array(['aa', 'bb']) + l[:] = np.unicode_('cc') + assert_equal(l, ['cc', 'cc']) + + def test_fill_scalar(self): + # gh-7227 + l = np.array(['aa', 'bb']) + l.fill(np.unicode_('cc')) + assert_equal(l, ['cc', 'cc']) + + +class TestUnicodeArrayNonzero: + + def test_empty_ustring_array_is_falsey(self): + assert_(not np.array([''], dtype=np.unicode_)) + + def test_whitespace_ustring_array_is_falsey(self): + a = np.array(['eggs'], dtype=np.unicode_) + a[0] = ' \0\0' + assert_(not a) + + def test_all_null_ustring_array_is_falsey(self): + a = np.array(['eggs'], dtype=np.unicode_) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_ustring_array_is_truthy(self): + a = np.array(['eggs'], dtype=np.unicode_) + a[0] = ' \0 \0' + assert_(a) + + +class TestFormat: + + def test_0d(self): + a = np.array(np.pi) + assert_equal('{:0.3g}'.format(a), '3.14') + assert_equal('{:0.3g}'.format(a[()]), '3.14') + + def test_1d_no_format(self): + a = np.array([np.pi]) + assert_equal('{}'.format(a), str(a)) + + def test_1d_format(self): + # until gh-5543, ensure that the behaviour matches what it used to be + a = np.array([np.pi]) + assert_raises(TypeError, '{:30}'.format, a) + +from numpy.testing import IS_PYPY + +class TestCTypes: + + def test_ctypes_is_available(self): + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(ctypes, test_arr.ctypes._ctypes) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + + def test_ctypes_is_not_available(self): + from numpy.core import _internal + _internal.ctypes = None + try: + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_(isinstance(test_arr.ctypes._ctypes, + _internal._missing_ctypes)) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + finally: + _internal.ctypes = ctypes + + def _make_readonly(x): + x.flags.writeable = False + return x + + @pytest.mark.parametrize('arr', [ + np.array([1, 2, 3]), + np.array([['one', 'two'], ['three', 'four']]), + np.array((1, 2), dtype='i4,i4'), + np.zeros((2,), dtype= + np.dtype(dict( + formats=['2, [44, 55]) + assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) + # hit one of the failing paths + assert_raises(ValueError, np.place, a, a>20, []) + + def test_put_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + np.put(a, [0, 2], [44, 55]) + assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) + + def test_putmask_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + # uses arr_putmask + np.putmask(a, a>2, a**2) + assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) + + def test_take_mode_raise(self): + a = np.arange(6, dtype='int') + out = np.empty(2, dtype='int') + np.take(a, [0, 2], out=out, mode='raise') + assert_equal(out, np.array([0, 2])) + + def test_choose_mod_raise(self): + a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) + out = np.empty((3,3), dtype='int') + choices = [-10, 10] + np.choose(a, choices, out=out, mode='raise') + assert_equal(out, np.array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]])) + + def test_flatiter__array__(self): + a = np.arange(9).reshape(3,3) + b = a.T.flat + c = b.__array__() + # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics + del c + + def test_dot_out(self): + # if HAVE_CBLAS, will use WRITEBACKIFCOPY + a = np.arange(9, dtype=float).reshape(3,3) + b = np.dot(a, a, out=a) + assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) + + def test_view_assign(self): + from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve + + arr = np.arange(9).reshape(3, 3).T + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_resolve(arr_wb) + # arr changes after resolve, even though we assigned to arr_wb + assert_equal(arr, -100) + # after resolve, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, -100) + + @pytest.mark.leaks_references( + reason="increments self in dealloc; ignore since deprecated path.") + def test_dealloc_warning(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + arr = np.arange(9).reshape(3, 3) + v = arr.T + _multiarray_tests.npy_abuse_writebackifcopy(v) + assert len(sup.log) == 1 + + def test_view_discard_refcount(self): + from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard + + arr = np.arange(9).reshape(3, 3).T + orig = arr.copy() + if HAS_REFCOUNT: + arr_cnt = sys.getrefcount(arr) + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_discard(arr_wb) + # arr remains unchanged after discard + assert_equal(arr, orig) + # after discard, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + if HAS_REFCOUNT: + assert_equal(arr_cnt, sys.getrefcount(arr)) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, orig) + + +class TestArange: + def test_infinite(self): + assert_raises_regex( + ValueError, "size exceeded", + np.arange, 0, np.inf + ) + + def test_nan_step(self): + assert_raises_regex( + ValueError, "cannot compute length", + np.arange, 0, 1, np.nan + ) + + def test_zero_step(self): + assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) + + # empty range + assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) + + def test_require_range(self): + assert_raises(TypeError, np.arange) + assert_raises(TypeError, np.arange, step=3) + assert_raises(TypeError, np.arange, dtype='int64') + assert_raises(TypeError, np.arange, start=4) + + def test_start_stop_kwarg(self): + keyword_stop = np.arange(stop=3) + keyword_zerotostop = np.arange(start=0, stop=3) + keyword_start_stop = np.arange(start=3, stop=9) + + assert len(keyword_stop) == 3 + assert len(keyword_zerotostop) == 3 + assert len(keyword_start_stop) == 6 + assert_array_equal(keyword_stop, keyword_zerotostop) + + +class TestArrayFinalize: + """ Tests __array_finalize__ """ + + def test_receives_base(self): + # gh-11237 + class SavesBase(np.ndarray): + def __array_finalize__(self, obj): + self.saved_base = self.base + + a = np.array(1).view(SavesBase) + assert_(a.saved_base is a.base) + + def test_bad_finalize(self): + class BadAttributeArray(np.ndarray): + @property + def __array_finalize__(self): + raise RuntimeError("boohoo!") + + with pytest.raises(RuntimeError, match="boohoo!"): + np.arange(10).view(BadAttributeArray) + + def test_lifetime_on_error(self): + # gh-11237 + class RaisesInFinalize(np.ndarray): + def __array_finalize__(self, obj): + # crash, but keep this object alive + raise Exception(self) + + # a plain object can't be weakref'd + class Dummy: pass + + # get a weak reference to an object within an array + obj_arr = np.array(Dummy()) + obj_ref = weakref.ref(obj_arr[()]) + + # get an array that crashed in __array_finalize__ + with assert_raises(Exception) as e: + obj_arr.view(RaisesInFinalize) + + obj_subarray = e.exception.args[0] + del e + assert_(isinstance(obj_subarray, RaisesInFinalize)) + + # reference should still be held by obj_arr + break_cycles() + assert_(obj_ref() is not None, "object should not already be dead") + + del obj_arr + break_cycles() + assert_(obj_ref() is not None, "obj_arr should not hold the last reference") + + del obj_subarray + break_cycles() + assert_(obj_ref() is None, "no references should remain") + + +def test_orderconverter_with_nonASCII_unicode_ordering(): + # gh-7475 + a = np.arange(5) + assert_raises(ValueError, a.flatten, order=u'\xe2') + + +def test_equal_override(): + # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which + # did not respect overrides with __array_priority__ or __array_ufunc__. + # The PR fixed this for __array_priority__ and __array_ufunc__ = None. + class MyAlwaysEqual: + def __eq__(self, other): + return "eq" + + def __ne__(self, other): + return "ne" + + class MyAlwaysEqualOld(MyAlwaysEqual): + __array_priority__ = 10000 + + class MyAlwaysEqualNew(MyAlwaysEqual): + __array_ufunc__ = None + + array = np.array([(0, 1), (2, 3)], dtype='i4,i4') + for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew: + my_always_equal = my_always_equal_cls() + assert_equal(my_always_equal == array, 'eq') + assert_equal(array == my_always_equal, 'eq') + assert_equal(my_always_equal != array, 'ne') + assert_equal(array != my_always_equal, 'ne') + + +@pytest.mark.parametrize( + ["fun", "npfun"], + [ + (_multiarray_tests.npy_cabs, np.absolute), + (_multiarray_tests.npy_carg, np.angle) + ] +) +@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__()) +def test_npymath_complex(fun, npfun, x, y, test_dtype): + # Smoketest npymath functions + z = test_dtype(complex(x, y)) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + + +def test_npymath_real(): + # Smoketest npymath functions + from numpy.core._multiarray_tests import ( + npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) + + funcs = {npy_log10: np.log10, + npy_cosh: np.cosh, + npy_sinh: np.sinh, + npy_tan: np.tan, + npy_tanh: np.tanh} + vals = (1, np.inf, -np.inf, np.nan) + types = (np.float32, np.float64, np.longdouble) + + with np.errstate(all='ignore'): + for fun, npfun in funcs.items(): + for x, t in itertools.product(vals, types): + z = t(x) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + +def test_uintalignment_and_alignment(): + # alignment code needs to satisfy these requirements: + # 1. numpy structs match C struct layout + # 2. ufuncs/casting is safe wrt to aligned access + # 3. copy code is safe wrt to "uint alidned" access + # + # Complex types are the main problem, whose alignment may not be the same + # as their "uint alignment". + # + # This test might only fail on certain platforms, where uint64 alignment is + # not equal to complex64 alignment. The second 2 tests will only fail + # for DEBUG=1. + + d1 = np.dtype('u1,c8', align=True) + d2 = np.dtype('u4,c8', align=True) + d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True) + + assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False) + + # check that C struct matches numpy struct size + s = _multiarray_tests.get_struct_alignments() + for d, (alignment, size) in zip([d1,d2,d3], s): + assert_equal(d.alignment, alignment) + assert_equal(d.itemsize, size) + + # check that ufuncs don't complain in debug mode + # (this is probably OK if the aligned flag is true above) + src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often + np.exp(src) # assert fails? + + # check that copy code doesn't complain in debug mode + dst = np.zeros((2,2), dtype='c8') + dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails? + +class TestAlignment: + # adapted from scipy._lib.tests.test__util.test__aligned_zeros + # Checks that unusual memory alignments don't trip up numpy. + # In particular, check RELAXED_STRIDES don't trip alignment assertions in + # NDEBUG mode for size-0 arrays (gh-12503) + + def check(self, shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError() + + def test_various_alignments(self): + for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: + if dtype == 'O': + # object dtype can't be misaligned + continue + for shape in [n, (1, 2, 3, n)]: + self.check(shape, np.dtype(dtype), order, align) + + def test_strided_loop_alignments(self): + # particularly test that complex64 and float128 use right alignment + # code-paths, since these are particularly problematic. It is useful to + # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run. + for align in [1, 2, 4, 8, 12, 16, None]: + xf64 = _aligned_zeros(3, np.float64) + + xc64 = _aligned_zeros(3, np.complex64, align=align) + xf128 = _aligned_zeros(3, np.longdouble, align=align) + + # test casting, both to and from misaligned + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning, "Casting complex values") + xc64.astype('f8') + xf64.astype(np.complex64) + test = xc64 + xf64 + + xf128.astype('f8') + xf64.astype(np.longdouble) + test = xf128 + xf64 + + test = xf128 + xc64 + + # test copy, both to and from misaligned + # contig copy + xf64[:] = xf64.copy() + xc64[:] = xc64.copy() + xf128[:] = xf128.copy() + # strided copy + xf64[::2] = xf64[::2].copy() + xc64[::2] = xc64[::2].copy() + xf128[::2] = xf128[::2].copy() + +def test_getfield(): + a = np.arange(32, dtype='uint16') + if sys.byteorder == 'little': + i = 0 + j = 1 + else: + i = 1 + j = 0 + b = a.getfield('int8', i) + assert_equal(b, a) + b = a.getfield('int8', j) + assert_equal(b, 0) + pytest.raises(ValueError, a.getfield, 'uint8', -1) + pytest.raises(ValueError, a.getfield, 'uint8', 16) + pytest.raises(ValueError, a.getfield, 'uint64', 0) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_nditer.py b/MLPY/Lib/site-packages/numpy/core/tests/test_nditer.py new file mode 100644 index 0000000000000000000000000000000000000000..85dd3cdbe1ff6275c8d938a04562595d5e57c162 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_nditer.py @@ -0,0 +1,3274 @@ +import sys +import pytest + +import textwrap +import subprocess + +import numpy as np +import numpy.core._multiarray_tests as _multiarray_tests +from numpy import array, arange, nditer, all +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises, + HAS_REFCOUNT, suppress_warnings + ) + + +def iter_multi_index(i): + ret = [] + while not i.finished: + ret.append(i.multi_index) + i.iternext() + return ret + +def iter_indices(i): + ret = [] + while not i.finished: + ret.append(i.index) + i.iternext() + return ret + +def iter_iterindices(i): + ret = [] + while not i.finished: + ret.append(i.iterindex) + i.iternext() + return ret + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_iter_refcount(): + # Make sure the iterator doesn't leak + + # Basic + a = arange(6) + dt = np.dtype('f4').newbyteorder() + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='unsafe', + op_dtypes=[dt]) as it: + assert_(not it.iterationneedsapi) + assert_(sys.getrefcount(a) > rc_a) + assert_(sys.getrefcount(dt) > rc_dt) + # del 'it' + it = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + # With a copy + a = arange(6, dtype='f4') + dt = np.dtype('f4') + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + it = nditer(a, [], + [['readwrite']], + op_dtypes=[dt]) + rc2_a = sys.getrefcount(a) + rc2_dt = sys.getrefcount(dt) + it2 = it.copy() + assert_(sys.getrefcount(a) > rc2_a) + assert_(sys.getrefcount(dt) > rc2_dt) + it = None + assert_equal(sys.getrefcount(a), rc2_a) + assert_equal(sys.getrefcount(dt), rc2_dt) + it2 = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + del it2 # avoid pyflakes unused variable warning + +def test_iter_best_order(): + # The iterator should always find the iteration order + # with increasing memory addresses + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, [], [['readonly']]) + assert_equal([x for x in i], a) + # Fortran-order + i = nditer(aview.T, [], [['readonly']]) + assert_equal([x for x in i], a) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) + assert_equal([x for x in i], a) + +def test_iter_c_order(): + # Test forcing C order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='C') + assert_equal([x for x in i], aview.ravel(order='C')) + # Fortran-order + i = nditer(aview.T, order='C') + assert_equal([x for x in i], aview.T.ravel(order='C')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='C') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='C')) + +def test_iter_f_order(): + # Test forcing F order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='F') + assert_equal([x for x in i], aview.ravel(order='F')) + # Fortran-order + i = nditer(aview.T, order='F') + assert_equal([x for x in i], aview.T.ravel(order='F')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='F') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='F')) + +def test_iter_c_or_f_order(): + # Test forcing any contiguous (C or F) order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='A') + assert_equal([x for x in i], aview.ravel(order='A')) + # Fortran-order + i = nditer(aview.T, order='A') + assert_equal([x for x in i], aview.T.ravel(order='A')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='A') + assert_equal([x for x in i], + aview.swapaxes(0, 1).ravel(order='A')) + +def test_nditer_multi_index_set(): + # Test the multi_index set + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + + # Removes the iteration on two first elements of a[0] + it.multi_index = (0, 2,) + + assert_equal([i for i in it], [2, 3, 4, 5]) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_nditer_multi_index_set_refcount(): + # Test if the reference count on index variable is decreased + + index = 0 + i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index']) + + start_count = sys.getrefcount(index) + i.multi_index = (index,) + end_count = sys.getrefcount(index) + + assert_equal(start_count, end_count) + +def test_iter_best_order_multi_index_1d(): + # The multi-indices should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) + # 1D reversed order + i = nditer(a[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) + +def test_iter_best_order_multi_index_2d(): + # The multi-indices should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) + +def test_iter_best_order_multi_index_3d(): + # The multi-indices should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), + (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), + (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), + (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), + (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), + (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), + (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), + (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), + (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + +def test_iter_best_order_c_index_1d(): + # The C index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_c_index_2d(): + # The C index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) + +def test_iter_best_order_c_index_3d(): + # The C index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + +def test_iter_best_order_f_index_1d(): + # The Fortran index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_f_index_2d(): + # The Fortran index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + +def test_iter_best_order_f_index_3d(): + # The Fortran index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + +def test_iter_no_inner_full_coalesce(): + # Check no_inner iterators which coalesce into a single inner loop + + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + size = np.prod(shape) + a = arange(size) + # Test each combination of forward and backwards indexing + for dirs in range(2**len(shape)): + dirs_index = [slice(None)]*len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Fortran-order + i = nditer(aview.T, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), + ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + +def test_iter_no_inner_dim_coalescing(): + # Check no_inner iterators whose dimensions may not coalesce completely + + # Skipping the last element in a dimension prevents coalescing + # with the next-bigger dimension + a = arange(24).reshape(2, 3, 4)[:,:, :-1] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (3,)) + a = arange(24).reshape(2, 3, 4)[:, :-1,:] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (8,)) + a = arange(24).reshape(2, 3, 4)[:-1,:,:] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (12,)) + + # Even with lots of 1-sized dimensions, should still coalesce + a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (24,)) + +def test_iter_dim_coalescing(): + # Check that the correct number of dimensions are coalesced + + # Tracking a multi-index disables coalescing + a = arange(24).reshape(2, 3, 4) + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # A tracked index can allow coalescing if it's compatible with the array + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['f_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # When C or F order is forced, coalescing may still occur + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, order='C') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='C') + assert_equal(i.ndim, 3) + i = nditer(a3d, order='F') + assert_equal(i.ndim, 3) + i = nditer(a3d.T, order='F') + assert_equal(i.ndim, 1) + i = nditer(a3d, order='A') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='A') + assert_equal(i.ndim, 1) + +def test_iter_broadcasting(): + # Standard NumPy broadcasting rules + + # 1D with scalar + i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (6,)) + + # 2D with scalar + i = nditer([arange(6).reshape(2, 3), np.int32(2)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 1D + i = nditer([arange(6).reshape(2, 3), arange(3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 2D + i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + + # 3D with scalar + i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 1D + i = nditer([arange(3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 2D + i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 3D + i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), + arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']]*3) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], + ['multi_index'], [['readonly']]*2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + +def test_iter_itershape(): + # Check that allocated outputs work with a specified shape + a = np.arange(6, dtype='i2').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (2, 3, 4)) + assert_equal(i.operands[1].strides, (24, 8, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (8, 24, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + order='F', + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (2, 6, 12)) + + # If we specify 1 in the itershape, it shouldn't allow broadcasting + # of that dimension to a bigger value + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, 1, 4)) + # Test bug that for no op_axes but itershape, they are NULLed correctly + i = np.nditer([np.ones(2), None, None], itershape=(2,)) + +def test_iter_broadcasting_errors(): + # Check that errors are thrown for bad broadcasting shapes + + # 1D with 1D + assert_raises(ValueError, nditer, [arange(2), arange(3)], + [], [['readonly']]*2) + # 2D with 1D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(2)], + [], [['readonly']]*2) + # 2D with 2D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], + [], [['readonly']]*2) + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], + [], [['readonly']]*2) + # 3D with 3D + assert_raises(ValueError, nditer, + [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], + [], [['readonly']]*2) + assert_raises(ValueError, nditer, + [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], + [], [['readonly']]*2) + + # Verify that the error message mentions the right shapes + try: + nditer([arange(2).reshape(1, 2, 1), + arange(3).reshape(1, 3), + arange(6).reshape(2, 3)], + [], + [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the 3rd operand + assert_(msg.find('(2,3)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,3)' % msg) + # The message should contain the broadcast shape + assert_(msg.find('(1,2,3)') >= 0, + 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) + + try: + nditer([arange(6).reshape(2, 3), arange(2)], + [], + [['readonly'], ['readonly']], + op_axes=[[0, 1], [0, np.newaxis]], + itershape=(4, 3)) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain "shape->remappedshape" for each operand + assert_(msg.find('(2,3)->(2,3)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg) + assert_(msg.find('(2,)->(2,newaxis)') >= 0, + ('Message "%s" doesn\'t contain remapped operand shape' + + '(2,)->(2,newaxis)') % msg) + # The message should contain the itershape parameter + assert_(msg.find('(4,3)') >= 0, + 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) + + try: + nditer([np.zeros((2, 1, 1)), np.zeros((2,))], + [], + [['writeonly', 'no_broadcast'], ['readonly']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the bad operand + assert_(msg.find('(2,1,1)') >= 0, + 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg) + # The message should contain the broadcast shape + assert_(msg.find('(2,1,2)') >= 0, + 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg) + +def test_iter_flags_errors(): + # Check that bad combinations of flags produce errors + + a = arange(6) + + # Not enough operands + assert_raises(ValueError, nditer, [], [], []) + # Too many operands + assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100) + # Bad global flag + assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) + # Bad op flag + assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) + # Bad order parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') + # Bad casting parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') + # op_flags must match ops + assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) + # Cannot track both a C and an F index + assert_raises(ValueError, nditer, a, + ['c_index', 'f_index'], [['readonly']]) + # Inner iteration and multi-indices/indices are incompatible + assert_raises(ValueError, nditer, a, + ['external_loop', 'multi_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'c_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'f_index'], [['readonly']]) + # Must specify exactly one of readwrite/readonly/writeonly per operand + assert_raises(ValueError, nditer, a, [], [[]]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, + [], [['readonly', 'writeonly', 'readwrite']]) + # Python scalars are always readonly + assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) + assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) + # Array scalars are always readonly + assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) + assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) + # Check readonly array + a.flags.writeable = False + assert_raises(ValueError, nditer, a, [], [['writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readwrite']]) + a.flags.writeable = True + # Multi-indices available only with the multi_index flag + i = nditer(arange(6), [], [['readonly']]) + assert_raises(ValueError, lambda i:i.multi_index, i) + # Index available only with an index flag + assert_raises(ValueError, lambda i:i.index, i) + # GotoCoords and GotoIndex incompatible with buffering or no_inner + + def assign_multi_index(i): + i.multi_index = (0,) + + def assign_index(i): + i.index = 0 + + def assign_iterindex(i): + i.iterindex = 0 + + def assign_iterrange(i): + i.iterrange = (0, 1) + i = nditer(arange(6), ['external_loop']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterindex, i) + assert_raises(ValueError, assign_iterrange, i) + i = nditer(arange(6), ['buffered']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterrange, i) + # Can't iterate if size is zero + assert_raises(ValueError, nditer, np.array([])) + +def test_iter_slice(): + a, b, c = np.arange(3), np.arange(3), np.arange(3.) + i = nditer([a, b, c], [], ['readwrite']) + with i: + i[0:2] = (3, 3) + assert_equal(a, [3, 1, 2]) + assert_equal(b, [3, 1, 2]) + assert_equal(c, [0, 1, 2]) + i[1] = 12 + assert_equal(i[0:2], [3, 12]) + +def test_iter_assign_mapping(): + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + it.operands[0][...] = 3 + it.operands[0][...] = 14 + assert_equal(a, 14) + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + x = it.operands[0][-1:1] + x[...] = 14 + it.operands[0][...] = -1234 + assert_equal(a, -1234) + # check for no warnings on dealloc + x = None + it = None + +def test_iter_nbo_align_contig(): + # Check that byte order, alignment, and contig changes work + + # Byte order change by requesting a specific dtype + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + i = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + with i: + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 2 + assert_equal(au, [2]*6) + del i # should not raise a warning + # Byte order change by requesting NBO + a = np.arange(6, dtype='f4') + au = a.byteswap().newbyteorder() + assert_(a.dtype.byteorder != au.dtype.byteorder) + with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], + casting='equiv') as i: + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 12345 + i.operands[0][:] = 2 + assert_equal(au, [2]*6) + + # Unaligned input + a = np.zeros((6*4+1,), dtype='i1')[1:] + a.dtype = 'f4' + a[:] = np.arange(6, dtype='f4') + assert_(not a.flags.aligned) + # Without 'aligned', shouldn't copy + i = nditer(a, [], [['readonly']]) + assert_(not i.operands[0].flags.aligned) + assert_equal(i.operands[0], a) + # With 'aligned', should make a copy + with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: + assert_(i.operands[0].flags.aligned) + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.operands[0], a) + i.operands[0][:] = 3 + assert_equal(a, [3]*6) + + # Discontiguous input + a = arange(12) + # If it is contiguous, shouldn't copy + i = nditer(a[:6], [], [['readonly']]) + assert_(i.operands[0].flags.contiguous) + assert_equal(i.operands[0], a[:6]) + # If it isn't contiguous, should buffer + i = nditer(a[::2], ['buffered', 'external_loop'], + [['readonly', 'contig']], + buffersize=10) + assert_(i[0].flags.contiguous) + assert_equal(i[0], a[::2]) + +def test_iter_array_cast(): + # Check that arrays are cast as requested + + # No cast 'f4' -> 'f4' + a = np.arange(6, dtype='f4').reshape(2, 3) + i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) + with i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + + # Byte-order cast ' '>f4' + a = np.arange(6, dtype='f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('>f4')) + + # Safe case 'f4' -> 'f8' + a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + # The memory layout of the temporary should match a (a is (48,4,16)) + # except negative strides get flipped to positive strides. + assert_equal(i.operands[0].strides, (96, 8, 32)) + a = a[::-1,:, ::-1] + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + assert_equal(i.operands[0].strides, (96, 8, 32)) + + # Same-kind cast 'f8' -> 'f4' -> 'f8' + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + assert_equal(i.operands[0].strides, (4, 16, 48)) + # Check that WRITEBACKIFCOPY is activated at exit + i.operands[0][2, 1, 1] = -12.5 + assert_(a[2, 1, 1] != -12.5) + assert_equal(a[2, 1, 1], -12.5) + + a = np.arange(6, dtype='i4')[::-2] + with nditer(a, [], + [['writeonly', 'updateifcopy']], + casting='unsafe', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0].dtype, np.dtype('f4')) + # Even though the stride was negative in 'a', it + # becomes positive in the temporary + assert_equal(i.operands[0].strides, (4,)) + i.operands[0][:] = [1, 2, 3] + assert_equal(a, [1, 2, 3]) + +def test_iter_array_cast_errors(): + # Check that invalid casts are caught + + # Need to enable copying for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly']], op_dtypes=[np.dtype('f8')]) + # Also need to allow casting for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='no', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='equiv', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='no', + op_dtypes=[np.dtype('f4')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + # ' '>f4' should not work with casting='no' + assert_raises(TypeError, nditer, arange(2, dtype='f4')]) + # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], + [['writeonly', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + +def test_iter_scalar_cast(): + # Check that scalars are cast as requested + + # No cast 'f4' -> 'f4' + i = nditer(np.float32(2.5), [], [['readonly']], + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Safe cast 'f4' -> 'f8' + i = nditer(np.float32(2.5), [], + [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.value.dtype, np.dtype('f8')) + assert_equal(i.value, 2.5) + # Same-kind cast 'f8' -> 'f4' + i = nditer(np.float64(2.5), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Unsafe cast 'f8' -> 'i4' + i = nditer(np.float64(3.0), [], + [['readonly', 'copy']], + casting='unsafe', + op_dtypes=[np.dtype('i4')]) + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.value.dtype, np.dtype('i4')) + assert_equal(i.value, 3) + # Readonly scalars may be cast even without setting COPY or BUFFERED + i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) + assert_equal(i[0].dtype, np.dtype('f8')) + assert_equal(i[0], 3.) + +def test_iter_scalar_cast_errors(): + # Check that invalid casts are caught + + # Need to allow copying/buffering for write casts of scalars to occur + assert_raises(TypeError, nditer, np.float32(2), [], + [['readwrite']], op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, 2.5, [], + [['readwrite']], op_dtypes=[np.dtype('f4')]) + # 'f8' -> 'f4' isn't a safe cast if the value would overflow + assert_raises(TypeError, nditer, np.float64(1e60), [], + [['readonly']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, np.float32(2), [], + [['readonly']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + +def test_iter_object_arrays_basic(): + # Check that object arrays work + + obj = {'a':3,'b':'d'} + a = np.array([[1, 2, 3], None, obj, None], dtype='O') + if HAS_REFCOUNT: + rc = sys.getrefcount(obj) + + # Need to allow references for object arrays + assert_raises(TypeError, nditer, a) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a, ['refs_ok'], ['readonly']) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a) + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readonly'], order='C') + assert_(i.iterationneedsapi) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readwrite'], order='C') + with i: + for x in i: + x[...] = None + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_(sys.getrefcount(obj) == rc-1) + assert_equal(a, np.array([None]*4, dtype='O')) + +def test_iter_object_arrays_conversions(): + # Conversions to/from objects + a = np.arange(6, dtype='O') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + a = np.arange(6, dtype='i4') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + # Non-contiguous object array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) + a = a['a'] + a[:] = np.arange(6) + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6)+1) + + #Non-contiguous value array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) + a = a['a'] + a[:] = np.arange(6) + 98172488 + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + ob = i[0][()] + if HAS_REFCOUNT: + rc = sys.getrefcount(ob) + for x in i: + x[...] += 1 + if HAS_REFCOUNT: + assert_(sys.getrefcount(ob) == rc-1) + assert_equal(a, np.arange(6)+98172489) + +def test_iter_common_dtype(): + # Check that the iterator finds a common data type correctly + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='same_kind') + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.dtypes[1], np.dtype('f4')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('u4')) + assert_equal(i.dtypes[1], np.dtype('u4')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']]*2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')) + assert_equal(i.dtypes[1], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), + array([2j], dtype='c8'), array([9], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']]*4, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + assert_equal(i.dtypes[3], np.dtype('c16')) + assert_equal(i.value, (3, -12, 2j, 9)) + + # When allocating outputs, other outputs aren't factored in + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.dtypes[1], np.dtype('i4')) + assert_equal(i.dtypes[2], np.dtype('c16')) + # But, if common data types are requested, they are + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], + ['common_dtype'], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + +def test_iter_copy_if_overlap(): + # Ensure the iterator makes copies on read/write overlap, if requested + + # Copy not needed, 1 op + for flag in ['readonly', 'writeonly', 'readwrite']: + a = arange(10) + i = nditer([a], ['copy_if_overlap'], [[flag]]) + with i: + assert_(i.operands[0] is a) + + # Copy needed, 2 ops, read-write overlap + x = arange(10) + a = x[1:] + b = x[:-1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy not needed with elementwise, 2 ops, exactly same arrays + x = arange(10) + a = x + b = x + i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], + ['readwrite', 'overlap_assume_elementwise']]) + with i: + assert_(i.operands[0] is a and i.operands[1] is b) + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) + + # Copy not needed, 2 ops, no overlap + x = arange(10) + a = x[::2] + b = x[1::2] + i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) + assert_(i.operands[0] is a and i.operands[1] is b) + + # Copy needed, 2 ops, read-write overlap + x = arange(4, dtype=np.int8) + a = x[3:] + b = x.view(np.int32)[:1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy needed, 3 ops, read-write overlap + for flag in ['writeonly', 'readwrite']: + x = np.ones([10, 10]) + a = x + b = x.T + c = x + with nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], [flag]]) as i: + a2, b2, c2 = i.operands + assert_(not np.shares_memory(a2, c2)) + assert_(not np.shares_memory(b2, c2)) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = x.T + c = x + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = np.ones([10, 10]) + c = x.T + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, write-only overlap + x = np.arange(7) + a = x[:3] + b = x[3:6] + c = x[4:7] + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['writeonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + +def test_iter_op_axes(): + # Check that custom axes work + + # Reverse the axes + a = arange(6).reshape(2, 3) + i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) + assert_(all([x == y for (x, y) in i])) + a = arange(24).reshape(2, 3, 4) + i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) + assert_(all([x == y for (x, y) in i])) + + # Broadcast 1D to any dimension + a = arange(1, 31).reshape(2, 3, 5) + b = arange(1, 3) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) + b = arange(1, 4) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) + b = arange(1, 6) + i = nditer([a, b], [], [['readonly']]*2, + op_axes=[None, [np.newaxis, np.newaxis, 0]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) + + # Inner product-style broadcasting + a = arange(24).reshape(2, 3, 4) + b = arange(40).reshape(5, 2, 4) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) + assert_equal(i.shape, (2, 3, 5, 2)) + + # Matrix product-style broadcasting + a = arange(12).reshape(3, 4) + b = arange(20).reshape(4, 5) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, -1], [-1, 1]]) + assert_equal(i.shape, (3, 5)) + +def test_iter_op_axes_errors(): + # Check that custom axes throws errors for bad inputs + + # Wrong number of items in op_axes + a = arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0], [1], [0]]) + # Out of bounds items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[2, 1], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [2, -1]]) + # Duplicate items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 0], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 1]]) + + # Different sized arrays in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [0, 1, 0]]) + + # Non-broadcastable dimensions in the result + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 0]]) + +def test_iter_copy(): + # Check that copying the iterator works correctly + a = arange(24).reshape(2, 3, 4) + + # Simple iterator + i = nditer(a) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Buffered iterator + i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (3, 9) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (2, 18) + next(i) + next(i) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Casting iterator + with nditer(a, ['buffered'], order='F', casting='unsafe', + op_dtypes='f8', buffersize=5) as i: + j = i.copy() + assert_equal([x[()] for x in j], a.ravel(order='F')) + + a = arange(24, dtype=' unstructured (any to object), and many other + # casts, which cause this to require all steps in the casting machinery + # one level down as well as the iterator copy (which uses NpyAuxData clone) + in_dtype = np.dtype([("a", np.dtype("i,")), + ("b", np.dtype(">i,d,S17,>d,(3)f,O,i1"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype(">i,>i,S17,>d,>U3,(3)d,i1,O"))]) + arr = np.ones(1000, dtype=in_dtype) + + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + expected = arr["a"].astype(out_dtype["a"]) + assert_array_equal(res1["a"], expected) + assert_array_equal(res2["a"], expected) + + for field in in_dtype["b"].names: + # Note that the .base avoids the subarray field + expected = arr["b"][field].astype(out_dtype["b"][field].base) + assert_array_equal(res1["b"][field], expected) + assert_array_equal(res2["b"][field], expected) + + +def test_iter_allocate_output_simple(): + # Check that the iterator will properly allocate outputs + + # Simple case + a = arange(6) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_buffered_readwrite(): + # Allocated output with buffering + delay_bufalloc + + a = arange(6) + i = nditer([a, None], ['buffered', 'delay_bufalloc'], + [['readonly'], ['allocate', 'readwrite']]) + with i: + i.operands[1][:] = 1 + i.reset() + for x in i: + x[1][...] += x[0][...] + assert_equal(i.operands[1], a+1) + +def test_iter_allocate_output_itorder(): + # The allocated output should match the iteration order + + # C-order input, best iteration order + a = arange(6, dtype='i4').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # F-order input, best iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).T + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # Non-contiguous input, C iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1) + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']], + order='C', + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, (32, 16, 4)) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_opaxes(): + # Specifying op_axes should work + + a = arange(24, dtype='i4').reshape(2, 3, 4) + i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']], + op_dtypes=[np.dtype('u4'), None], + op_axes=[[1, 2, 0], None]) + assert_equal(i.operands[0].shape, (4, 2, 3)) + assert_equal(i.operands[0].strides, (4, 48, 16)) + assert_equal(i.operands[0].dtype, np.dtype('u4')) + +def test_iter_allocate_output_types_promotion(): + # Check type promotion of automatic outputs + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f4')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('u4')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('i8')) + +def test_iter_allocate_output_types_byte_order(): + # Verify the rules for byte order changes + + # When there's just one input, the output type exactly matches + a = array([3], dtype='u4').newbyteorder() + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']]) + assert_equal(i.dtypes[0], i.dtypes[1]) + # With two or more inputs, the output type is in native byte order + i = nditer([a, a, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(i.dtypes[0] != i.dtypes[2]) + assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2]) + +def test_iter_allocate_output_types_scalar(): + # If the inputs are all scalars, the output should be a scalar + + i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], + [['writeonly', 'allocate']] + [['readonly']]*4) + assert_equal(i.operands[0].dtype, np.dtype('complex128')) + assert_equal(i.operands[0].ndim, 0) + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + class MyNDArray(np.ndarray): + __array_priority__ = 15 + + # subclass vs ndarray + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + b = np.arange(4).reshape(2, 2).T + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_equal(type(a), type(i.operands[2])) + assert_(type(b) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + + # If subtypes are disabled, we should get back an ndarray. + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_equal(type(b), type(i.operands[2])) + assert_(type(a) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + +def test_iter_allocate_output_errors(): + # Check that the iterator will throw errors for bad output allocations + + # Need an input if no output data type is specified + a = arange(6) + assert_raises(TypeError, nditer, [a, None], [], + [['writeonly'], ['writeonly', 'allocate']]) + # Allocated output should be flagged for writing + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['allocate', 'readonly']]) + # Allocated output can't have buffering without delayed bufalloc + assert_raises(ValueError, nditer, [a, None], ['buffered'], + ['allocate', 'readwrite']) + # Must specify at least one input + assert_raises(ValueError, nditer, [None, None], [], + [['writeonly', 'allocate'], + ['writeonly', 'allocate']], + op_dtypes=[np.dtype('f4'), np.dtype('f4')]) + # If using op_axes, must specify all the axes + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 1]]) + # If using op_axes, the axes must be within bounds + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 3, 1]]) + # If using op_axes, there can't be duplicates + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 2, 1, 0]]) + # Not all axes may be specified if a reduction. If there is a hole + # in op_axes, this is an error. + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], ["reduce_ok"], + [['readonly'], ['readwrite', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 2]]) + +def test_iter_remove_axis(): + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + i.remove_axis(1) + assert_equal([x for x in i], a[:, 0,:].ravel()) + + a = a[::-1,:,:] + i = nditer(a, ['multi_index']) + i.remove_axis(0) + assert_equal([x for x in i], a[0,:,:].ravel()) + +def test_iter_remove_multi_index_inner_loop(): + # Check that removing multi-index support works + + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + assert_equal(i.ndim, 3) + assert_equal(i.shape, (2, 3, 4)) + assert_equal(i.itviews[0].shape, (2, 3, 4)) + + # Removing the multi-index tracking causes all dimensions to coalesce + before = [x for x in i] + i.remove_multi_index() + after = [x for x in i] + + assert_equal(before, after) + assert_equal(i.ndim, 1) + assert_raises(ValueError, lambda i:i.shape, i) + assert_equal(i.itviews[0].shape, (24,)) + + # Removing the inner loop means there's just one iteration + i.reset() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, tuple()) + i.enable_external_loop() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, (24,)) + assert_equal(i.value, arange(24)) + +def test_iter_iterindex(): + # Make sure iterindex works + + buffersize = 5 + a = arange(24).reshape(4, 3, 2) + for flags in ([], ['buffered']): + i = nditer(a, flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + + i = nditer(a, flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 5 + assert_equal(iter_iterindices(i), list(range(5, 24))) + + i = nditer(a[::-1], flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 9 + assert_equal(iter_iterindices(i), list(range(9, 24))) + + i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 13 + assert_equal(iter_iterindices(i), list(range(13, 24))) + + i = nditer(a[::1, ::-1], flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 23 + assert_equal(iter_iterindices(i), list(range(23, 24))) + i.reset() + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + +def test_iter_iterrange(): + # Make sure getting and resetting the iterrange works + + buffersize = 5 + a = arange(24, dtype='i4').reshape(4, 3, 2) + a_fort = a.ravel(order='F') + + i = nditer(a, ['ranged'], ['readonly'], order='F', + buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + def get_array(i): + val = np.array([], dtype='f8') + for x in i: + val = np.concatenate((val, x)) + return val + + i = nditer(a, ['ranged', 'buffered', 'external_loop'], + ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal(get_array(i), a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal(get_array(i), a_fort[r[0]:r[1]]) + +def test_iter_buffering(): + # Test buffering with several buffer sizes and types + arrays = [] + # F-order swapped array + arrays.append(np.arange(24, + dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap()) + # Contiguous 1-dimensional array + arrays.append(np.arange(10, dtype='f4')) + # Unaligned array + a = np.zeros((4*16+1,), dtype='i1')[1:] + a.dtype = 'i4' + a[:] = np.arange(16, dtype='i4') + arrays.append(a) + # 4-D F-order array + arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T) + for a in arrays: + for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024): + vals = [] + i = nditer(a, ['buffered', 'external_loop'], + [['readonly', 'nbo', 'aligned']], + order='C', + casting='equiv', + buffersize=buffersize) + while not i.finished: + assert_(i[0].size <= buffersize) + vals.append(i[0].copy()) + i.iternext() + assert_equal(np.concatenate(vals), a.ravel(order='C')) + +def test_iter_write_buffering(): + # Test that buffering of writes is working + + # F-order swapped array + a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap() + i = nditer(a, ['buffered'], + [['readwrite', 'nbo', 'aligned']], + casting='equiv', + order='C', + buffersize=16) + x = 0 + with i: + while not i.finished: + i[0] = x + x += 1 + i.iternext() + assert_equal(a.ravel(order='C'), np.arange(24)) + +def test_iter_buffering_delayed_alloc(): + # Test that delaying buffer allocation works + + a = np.arange(6) + b = np.arange(1, dtype='f4') + i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'], + ['readwrite'], + casting='unsafe', + op_dtypes='f4') + assert_(i.has_delayed_bufalloc) + assert_raises(ValueError, lambda i:i.multi_index, i) + assert_raises(ValueError, lambda i:i[0], i) + assert_raises(ValueError, lambda i:i[0:2], i) + + def assign_iter(i): + i[0] = 0 + assert_raises(ValueError, assign_iter, i) + + i.reset() + assert_(not i.has_delayed_bufalloc) + assert_equal(i.multi_index, (0,)) + with i: + assert_equal(i[0], 0) + i[1] = 1 + assert_equal(i[0:2], [0, 1]) + assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6))) + +def test_iter_buffered_cast_simple(): + # Test that buffering can handle a simple cast + + a = np.arange(10, dtype='f4') + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f4')) + +def test_iter_buffered_cast_byteswapped(): + # Test that buffering can handle a cast which requires swap->cast->swap + + a = np.arange(10, dtype='f4').newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f4')) + + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning) + + a = np.arange(10, dtype='f8').newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='unsafe', + op_dtypes=[np.dtype('c8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2*np.arange(10, dtype='f8')) + +def test_iter_buffered_cast_byteswapped_complex(): + # Test that buffering can handle a cast which requires swap->cast->copy + + a = np.arange(10, dtype='c8').newbyteorder().byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype='c8') + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) + + a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f4')], + buffersize=7) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2*np.arange(10, dtype=np.longdouble)) + +def test_iter_buffered_cast_structured_type(): + # Tests buffering of structured types + + # simple -> struct type (duplicates the value) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.arange(3, dtype='f4') + 0.5 + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [np.array(x) for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + + # object -> struct type + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.zeros((3,), dtype='O') + a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) + a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) + a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) + if HAS_REFCOUNT: + rc = sys.getrefcount(a[0]) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [x.copy() for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)]*3]*2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)]*3]*2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + vals, i, x = [None]*3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(a[0]), rc) + + # single-field struct type -> simple + sdt = [('a', 'f4')] + a = np.array([(5.5,), (8,)], dtype=sdt) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4') + assert_equal([x_[()] for x_ in i], [5, 8]) + + # make sure multi-field struct type -> simple doesn't work + sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + assert_raises(TypeError, lambda: ( + nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4'))) + + # struct type -> struct type (field-wise copy) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + assert_equal([np.array(x_) for x_ in i], + [np.array((1, 2, 3), dtype=sdt2), + np.array((4, 5, 6), dtype=sdt2)]) + + +def test_iter_buffered_cast_structured_type_failure_with_cleanup(): + # make sure struct type -> struct type with different + # number of fields fails + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('b', 'O'), ('a', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + + for intent in ["readwrite", "readonly", "writeonly"]: + # If the following assert fails, the place where the error is raised + # within nditer may change. That is fine, but it may make sense for + # a new (hard to design) test to replace it. The `simple_arr` is + # designed to require a multi-step cast (due to having fields). + assert np.can_cast(a.dtype, sdt2, casting="unsafe") + simple_arr = np.array([1, 2], dtype="i,i") # requires clean up + with pytest.raises(ValueError): + nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent], + casting='unsafe', op_dtypes=["f,f", sdt2]) + + +def test_buffered_cast_error_paths(): + with pytest.raises(ValueError): + # The input is cast into an `S3` buffer + np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"], + casting="unsafe", flags=["buffered"]) + + # The `M8[ns]` is cast into the `S3` output + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + with pytest.raises(ValueError): + with it: + buf = next(it) + buf[...] = "a" # cannot be converted to int. + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.") +def test_buffered_cast_error_paths_unraisable(): + # The following gives an unraisable error. Pytest sometimes captures that + # (depending python and/or pytest version). So with Python>=3.8 this can + # probably be cleaned out in the future to check for + # pytest.PytestUnraisableExceptionWarning: + code = textwrap.dedent(""" + import numpy as np + + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + buf = next(it) + buf[...] = "a" + del buf, it # Flushing only happens during deallocate right now. + """) + res = subprocess.check_output([sys.executable, "-c", code], + stderr=subprocess.STDOUT, text=True) + assert "ValueError" in res + + +def test_iter_buffered_cast_subarray(): + # Tests buffering of subarrays + + # one element -> many (copies it to all) + sdt1 = [('a', 'f4')] + sdt2 = [('a', 'f8', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + for x, count in zip(i, list(range(6))): + assert_(np.all(x['a'] == count)) + + # one element -> many -> back (copies it to all) + sdt1 = [('a', 'O', (1, 1))] + sdt2 = [('a', 'O', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_(np.all(x['a'] == count)) + x['a'][0] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + x['a'] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'f8', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> one element (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> matching shape (straightforward copy) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a']) + count += 1 + + # vector -> smaller vector (truncates) + sdt1 = [('a', 'f8', (6,))] + sdt2 = [('a', 'f4', (2,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*6).reshape(6, 6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a'][:2]) + count += 1 + + # vector -> bigger vector (pads with zeros) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (6,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2], a[count]['a']) + assert_equal(x['a'][2:], [0, 0, 0, 0]) + count += 1 + + # vector -> matrix (broadcasts) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][0], a[count]['a']) + assert_equal(x['a'][1], a[count]['a']) + count += 1 + + # vector -> matrix (broadcasts and zero-pads) + sdt1 = [('a', 'f8', (2, 1))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2).reshape(6, 2, 1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) + assert_equal(x['a'][2,:], [0, 0]) + count += 1 + + # matrix -> matrix (truncates and zero-pads) + sdt1 = [('a', 'f8', (2, 3))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6*2*3).reshape(6, 2, 3) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) + assert_equal(x['a'][2,:], [0, 0]) + count += 1 + +def test_iter_buffering_badwriteback(): + # Writing back from a buffer cannot combine elements + + # a needs write buffering, but had a broadcast dimension + a = np.arange(6).reshape(2, 3, 1) + b = np.arange(12).reshape(2, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + + # But if a is readonly, it's fine + nditer([a, b], ['buffered', 'external_loop'], + [['readonly'], ['writeonly']], + order='C') + + # If a has just one element, it's fine too (constant 0 stride, a reduction) + a = np.arange(1).reshape(1, 1, 1) + nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['writeonly']], + order='C') + + # check that it fails on other dimensions too + a = np.arange(6).reshape(1, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + a = np.arange(4).reshape(2, 1, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + +def test_iter_buffering_string(): + # Safe casting disallows shrinking strings + a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) + assert_equal(a.dtype, np.dtype('S4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='S2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') + assert_equal(i[0], b'abc') + assert_equal(i[0].dtype, np.dtype('S6')) + + a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_) + assert_equal(a.dtype, np.dtype('U4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='U2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') + assert_equal(i[0], u'abc') + assert_equal(i[0].dtype, np.dtype('U6')) + +def test_iter_buffering_growinner(): + # Test that the inner loop grows when no buffering is needed + a = np.arange(30) + i = nditer(a, ['buffered', 'growinner', 'external_loop'], + buffersize=5) + # Should end up with just one inner loop here + assert_equal(i[0].size, a.size) + + +@pytest.mark.slow +def test_iter_buffered_reduce_reuse(): + # large enough array for all views, including negative strides. + a = np.arange(2*3**5)[3**5:3**5+1] + flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] + op_flags = [('readonly',), ('readwrite', 'allocate')] + op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] + # wrong dtype to force buffering + op_dtypes = [float, a.dtype] + + def get_params(): + for xs in range(-3**2, 3**2 + 1): + for ys in range(xs, 3**2 + 1): + for op_axes in op_axes_list: + # last stride is reduced and because of that not + # important for this test, as it is the inner stride. + strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) + arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) + + for skip in [0, 1]: + yield arr, op_axes, skip + + for arr, op_axes, skip in get_params(): + nditer2 = np.nditer([arr.copy(), None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + op_dtypes=op_dtypes) + with nditer2: + nditer2.operands[-1][...] = 0 + nditer2.reset() + nditer2.iterindex = skip + + for (a2_in, b2_in) in nditer2: + b2_in += a2_in.astype(np.int_) + + comp_res = nditer2.operands[-1] + + for bufsize in range(0, 3**3): + nditer1 = np.nditer([arr, None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + buffersize=bufsize, op_dtypes=op_dtypes) + with nditer1: + nditer1.operands[-1][...] = 0 + nditer1.reset() + nditer1.iterindex = skip + + for (a1_in, b1_in) in nditer1: + b1_in += a1_in.astype(np.int_) + + res = nditer1.operands[-1] + assert_array_equal(res, comp_res) + + +def test_iter_no_broadcast(): + # Test that the no_broadcast flag works + a = np.arange(24).reshape(2, 3, 4) + b = np.arange(6).reshape(2, 3, 1) + c = np.arange(12).reshape(3, 4) + + nditer([a, b, c], [], + [['readonly', 'no_broadcast'], + ['readonly'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) + + +class TestIterNested: + + def test_basic(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_reorder(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + + def test_flip_axes(self): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + + def test_broadcast(self): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) + + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + + def test_dtype_copy(self): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # writebackifcopy - using context manager + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + # writebackifcopy - using close() + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i.close() + j.close() + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_0d(self): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append([z for z in k]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_iter_nested_iters_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + +def test_iter_reduction_error(): + + a = np.arange(6) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + + a = np.arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, None], ['external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + +def test_iter_reduction(): + # Test doing reductions with the iterator + + a = np.arange(6) + i = nditer([a, None], ['reduce_ok'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + a = np.arange(6).reshape(2, 3) + i = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Reduction shape/strides for the output + assert_equal(i[1].shape, (6,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + # This is a tricky reduction case for the buffering double loop + # to handle + a = np.ones((2, 3, 5)) + it1 = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]]) + it2 = nditer([a, None], ['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]], buffersize=10) + with it1, it2: + it1.operands[1].fill(0) + it2.operands[1].fill(0) + it2.reset() + for x in it1: + x[1][...] += x[0] + for x in it2: + x[1][...] += x[0] + assert_equal(it1.operands[1], it2.operands[1]) + assert_equal(it2.operands[1].sum(), a.size) + +def test_iter_buffering_reduction(): + # Test doing buffered reductions with the iterator + + a = np.arange(6) + b = np.array(0., dtype='f8').byteswap().newbyteorder() + i = nditer([a, b], ['reduce_ok', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0], [-1]]) + with i: + assert_equal(i[1].dtype, np.dtype('f8')) + assert_(i[1].dtype != b.dtype) + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(b, np.sum(a)) + + a = np.arange(6).reshape(2, 3) + b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() + i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0, 1], [0, -1]]) + # Reduction shape/strides for the output + with i: + assert_equal(i[1].shape, (3,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + assert_equal(b, np.sum(a, axis=1)) + + # Iterator inner double loop was wrong on this one + p = np.arange(2) + 1 + it = np.nditer([p, None], + ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[-1, 0], [-1, -1]], + itershape=(2, 2)) + with it: + it.operands[1].fill(0) + it.reset() + assert_equal(it[0], [1, 2, 1, 2]) + + # Iterator inner loop should take argument contiguity into account + x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0) + x[...] = np.arange(x.size).reshape(x.shape) + y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) + y_base_copy = y_base.copy() + y = y_base[::2,:,None] + + it = np.nditer([y, x], + ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['readonly']]) + with it: + for a, b in it: + a.fill(2) + + assert_equal(y_base[1::2], y_base_copy[1::2]) + assert_equal(y_base[::2], 2) + +def test_iter_buffering_reduction_reuse_reduce_loops(): + # There was a bug triggering reuse of the reduce loop inappropriately, + # which caused processing to happen in unnecessarily small chunks + # and overran the buffer. + + a = np.zeros((2, 7)) + b = np.zeros((1, 7)) + it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], + op_flags=[['readonly'], ['readwrite']], + buffersize=5) + + with it: + bufsizes = [x.shape[0] for x, y in it] + assert_equal(bufsizes, [5, 2, 5, 2]) + assert_equal(sum(bufsizes), a.size) + +def test_iter_writemasked_badinput(): + a = np.zeros((2, 3)) + b = np.zeros((3,)) + m = np.array([[True, True, False], [False, True, False]]) + m2 = np.array([True, True, False]) + m3 = np.array([0, 1, 1], dtype='u1') + mbad1 = np.array([0, 1, 1], dtype='i1') + mbad2 = np.array([0, 1, 1], dtype='f4') + + # Need an 'arraymask' if any operand is 'writemasked' + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite', 'writemasked'], ['readonly']]) + + # A 'writemasked' operand must not be readonly + assert_raises(ValueError, nditer, [a, m], [], + [['readonly', 'writemasked'], ['readonly', 'arraymask']]) + + # 'writemasked' and 'arraymask' may not be used together + assert_raises(ValueError, nditer, [a, m], [], + [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) + + # 'arraymask' may only be specified once + assert_raises(ValueError, nditer, [a, m, m2], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask'], + ['readonly', 'arraymask']]) + + # An 'arraymask' with nothing 'writemasked' also doesn't make sense + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite'], ['readonly', 'arraymask']]) + + # A writemasked reduction requires a similarly smaller mask + assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # But this should work with a smaller/equal mask to the reduction operand + np.nditer([a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # The arraymask itself cannot be a reduction + assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readwrite', 'arraymask']]) + + # A uint8 mask is ok too + np.nditer([a, m3], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # An int8 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # A float32 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + +def _is_buffered(iterator): + try: + iterator.itviews + except ValueError: + return True + return False + +@pytest.mark.parametrize("a", + [np.zeros((3,), dtype='f8'), + np.zeros((9876, 3*5), dtype='f8')[::2, :], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], + # Also test with the last dimension strided (so it does not fit if + # there is repeated access) + np.zeros((9,), dtype='f8')[::3], + np.zeros((9876, 3*10), dtype='f8')[::2, ::5], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) +def test_iter_writemasked(a): + # Note, the slicing above is to ensure that nditer cannot combine multiple + # axes into one. The repetition is just to make things a bit more + # interesting. + shape = a.shape + reps = shape[-1] // 3 + msk = np.empty(shape, dtype=bool) + msk[...] = [True, True, False] * reps + + # When buffering is unused, 'writemasked' effectively does nothing. + # It's up to the user of the iterator to obey the requested semantics. + it = np.nditer([a, msk], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + with it: + for x, m in it: + x[...] = 1 + # Because we violated the semantics, all the values became 1 + assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape)) + + # Even if buffering is enabled, we still may be accessing the array + # directly. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # @seberg: I honestly don't currently understand why a "buffered" iterator + # would end up not using a buffer for the small array here at least when + # "writemasked" is used, that seems confusing... Check by testing for + # actual memory overlap! + is_buffered = True + with it: + for x, m in it: + x[...] = 2.5 + if np.may_share_memory(x, a): + is_buffered = False + + if not is_buffered: + # Because we violated the semantics, all the values became 2.5 + assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape)) + else: + # For large sizes, the iterator may be buffered: + assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape)) + a[...] = 2.5 + + # If buffering will definitely happening, for instance because of + # a cast, only the items selected by the mask will be copied back from + # the buffer. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['i8', None], + casting='unsafe') + with it: + for x, m in it: + x[...] = 3 + # Even though we violated the semantics, only the selected values + # were copied back + assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape)) + +def test_iter_writemasked_decref(): + # force casting (to make it interesting) by using a structured dtype. + arr = np.arange(10000).astype(">i,O") + original = arr.copy() + mask = np.random.randint(0, 2, size=10000).astype(bool) + + it = np.nditer([arr, mask], ['buffered', "refs_ok"], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=[" 0, self.ef[i:]) + assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) + assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) + assert_array_equal(-self.f[i:] < 0, self.ef[i:]) + assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) + r = self.f[i:] != 0 + assert_array_equal(r, self.ef[i:]) + r2 = self.f[i:] != np.zeros_like(self.f[i:]) + r3 = 0 != self.f[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) + assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) + assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) + assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) + assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + + def test_double(self): + # offset for alignment test + for i in range(2): + assert_array_equal(self.d[i:] > 0, self.ed[i:]) + assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) + assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) + assert_array_equal(-self.d[i:] < 0, self.ed[i:]) + assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) + r = self.d[i:] != 0 + assert_array_equal(r, self.ed[i:]) + r2 = self.d[i:] != np.zeros_like(self.d[i:]) + r3 = 0 != self.d[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) + assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) + assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) + assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) + assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + + +class TestSeterr: + def test_default(self): + err = np.geterr() + assert_equal(err, + dict(divide='warn', + invalid='warn', + over='warn', + under='ignore') + ) + + def test_set(self): + with np.errstate(): + err = np.seterr() + old = np.seterr(divide='print') + assert_(err == old) + new = np.seterr() + assert_(new['divide'] == 'print') + np.seterr(over='raise') + assert_(np.geterr()['over'] == 'raise') + assert_(new['divide'] == 'print') + np.seterr(**old) + assert_(np.geterr() == old) + + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_divide_err(self): + with np.errstate(divide='raise'): + with assert_raises(FloatingPointError): + np.array([1.]) / np.array([0.]) + + np.seterr(divide='ignore') + np.array([1.]) / np.array([0.]) + + def test_errobj(self): + olderrobj = np.geterrobj() + self.called = 0 + try: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(divide='warn'): + np.seterrobj([20000, 1, None]) + np.array([1.]) / np.array([0.]) + assert_equal(len(w), 1) + + def log_err(*args): + self.called += 1 + extobj_err = args + assert_(len(extobj_err) == 2) + assert_("divide" in extobj_err[0]) + + with np.errstate(divide='ignore'): + np.seterrobj([20000, 3, log_err]) + np.array([1.]) / np.array([0.]) + assert_equal(self.called, 1) + + np.seterrobj(olderrobj) + with np.errstate(divide='ignore'): + np.divide(1., 0., extobj=[20000, 3, log_err]) + assert_equal(self.called, 2) + finally: + np.seterrobj(olderrobj) + del self.called + + def test_errobj_noerrmask(self): + # errmask = 0 has a special code path for the default + olderrobj = np.geterrobj() + try: + # set errobj to something non default + np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, + umath.ERR_DEFAULT + 1, None]) + # call a ufunc + np.isnan(np.array([6])) + # same with the default, lots of times to get rid of possible + # pre-existing stack in the code + for i in range(10000): + np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT, + None]) + np.isnan(np.array([6])) + finally: + np.seterrobj(olderrobj) + + +class TestFloatExceptions: + def assert_raises_fpe(self, fpeerr, flop, x, y): + ftype = type(x) + try: + flop(x, y) + assert_(False, + "Type %s did not raise fpe error '%s'." % (ftype, fpeerr)) + except FloatingPointError as exc: + assert_(str(exc).find(fpeerr) >= 0, + "Type %s raised wrong fpe error '%s'." % (ftype, exc)) + + def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): + # Check that fpe exception is raised. + # + # Given a floating operation `flop` and two scalar values, check that + # the operation raises the floating point exception specified by + # `fpeerr`. Tests all variants with 0-d array scalars as well. + + self.assert_raises_fpe(fpeerr, flop, sc1, sc2) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) + self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) + + def test_floating_exceptions(self): + # Test basic arithmetic function errors + with np.errstate(all='raise'): + # Test for all real and complex float types + for typecode in np.typecodes['AllFloat']: + ftype = np.obj2sctype(typecode) + if np.dtype(ftype).kind == 'f': + # Get some extreme values for the type + fi = np.finfo(ftype) + ft_tiny = fi.tiny + ft_max = fi.max + ft_eps = fi.eps + underflow = 'underflow' + divbyzero = 'divide by zero' + else: + # 'c', complex, corresponding real dtype + rtype = type(ftype(0).real) + fi = np.finfo(rtype) + ft_tiny = ftype(fi.tiny) + ft_max = ftype(fi.max) + ft_eps = ftype(fi.eps) + # The complex types raise different exceptions + underflow = '' + divbyzero = '' + overflow = 'overflow' + invalid = 'invalid' + + self.assert_raises_fpe(underflow, + lambda a, b: a/b, ft_tiny, ft_max) + self.assert_raises_fpe(underflow, + lambda a, b: a*b, ft_tiny, ft_tiny) + self.assert_raises_fpe(overflow, + lambda a, b: a*b, ft_max, ftype(2)) + self.assert_raises_fpe(overflow, + lambda a, b: a/b, ft_max, ftype(0.5)) + self.assert_raises_fpe(overflow, + lambda a, b: a+b, ft_max, ft_max*ft_eps) + self.assert_raises_fpe(overflow, + lambda a, b: a-b, -ft_max, ft_max*ft_eps) + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) + self.assert_raises_fpe(divbyzero, + lambda a, b: a/b, ftype(1), ftype(0)) + self.assert_raises_fpe(invalid, + lambda a, b: a/b, ftype(np.inf), ftype(np.inf)) + self.assert_raises_fpe(invalid, + lambda a, b: a/b, ftype(0), ftype(0)) + self.assert_raises_fpe(invalid, + lambda a, b: a-b, ftype(np.inf), ftype(np.inf)) + self.assert_raises_fpe(invalid, + lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)) + self.assert_raises_fpe(invalid, + lambda a, b: a*b, ftype(0), ftype(np.inf)) + + def test_warnings(self): + # test warning code path + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(all="warn"): + np.divide(1, 0.) + assert_equal(len(w), 1) + assert_("divide by zero" in str(w[0].message)) + np.array(1e300) * np.array(1e300) + assert_equal(len(w), 2) + assert_("overflow" in str(w[-1].message)) + np.array(np.inf) - np.array(np.inf) + assert_equal(len(w), 3) + assert_("invalid value" in str(w[-1].message)) + np.array(1e-300) * np.array(1e-300) + assert_equal(len(w), 4) + assert_("underflow" in str(w[-1].message)) + + +class TestTypes: + def check_promotion_cases(self, promote_func): + # tests that the scalars get coerced correctly. + b = np.bool_(0) + i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) + u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) + f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) + c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) + + # coercion within the same kind + assert_equal(promote_func(i8, i16), np.dtype(np.int16)) + assert_equal(promote_func(i32, i8), np.dtype(np.int32)) + assert_equal(promote_func(i16, i64), np.dtype(np.int64)) + assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) + assert_equal(promote_func(f32, f64), np.dtype(np.float64)) + assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) + assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) + assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) + assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) + assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) + + # coercion between kinds + assert_equal(promote_func(b, i32), np.dtype(np.int32)) + assert_equal(promote_func(b, u8), np.dtype(np.uint8)) + assert_equal(promote_func(i8, u8), np.dtype(np.int16)) + assert_equal(promote_func(u8, i32), np.dtype(np.int32)) + assert_equal(promote_func(i64, u32), np.dtype(np.int64)) + assert_equal(promote_func(u64, i32), np.dtype(np.float64)) + assert_equal(promote_func(i32, f32), np.dtype(np.float64)) + assert_equal(promote_func(i64, f32), np.dtype(np.float64)) + assert_equal(promote_func(f32, i16), np.dtype(np.float32)) + assert_equal(promote_func(f32, u32), np.dtype(np.float64)) + assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) + assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) + assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) + + # coercion between scalars and 1-D arrays + assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) + assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) + assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) + assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) + assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8)) + assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32)) + assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32)) + assert_equal(promote_func(np.int32(-1), np.array([u64])), + np.dtype(np.float64)) + assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32)) + assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32)) + assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64)) + assert_equal(promote_func(fld, np.array([c64])), + np.dtype(np.complex64)) + assert_equal(promote_func(c64, np.array([f64])), + np.dtype(np.complex128)) + assert_equal(promote_func(np.complex64(3j), np.array([f64])), + np.dtype(np.complex128)) + + # coercion between scalars and 1-D arrays, where + # the scalar has greater kind than the array + assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) + assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) + assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) + + # uint and int are treated as the same "kind" for + # the purposes of array-scalar promotion. + assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16)) + + # float and complex are treated as the same "kind" for + # the purposes of array-scalar promotion, so that you can do + # (0j + float32array) to get a complex64 array instead of + # a complex128 array. + assert_equal(promote_func(np.array([f32]), c128), + np.dtype(np.complex64)) + + def test_coercion(self): + def res_type(a, b): + return np.add(a, b).dtype + + self.check_promotion_cases(res_type) + + # Use-case: float/complex scalar * bool/int8 array + # shouldn't narrow the float/complex type + for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: + b = 1.234 * a + assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + b = np.longdouble(1.234) * a + assert_equal(b.dtype, np.dtype(np.longdouble), + "array type %s" % a.dtype) + b = np.float64(1.234) * a + assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) + b = np.float32(1.234) * a + assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype) + b = np.float16(1.234) * a + assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype) + + b = 1.234j * a + assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + b = np.clongdouble(1.234j) * a + assert_equal(b.dtype, np.dtype(np.clongdouble), + "array type %s" % a.dtype) + b = np.complex128(1.234j) * a + assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype) + b = np.complex64(1.234j) * a + assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype) + + # The following use-case is problematic, and to resolve its + # tricky side-effects requires more changes. + # + # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is + # a float32, shouldn't promote to float64 + # + # a = np.array([1.0, 1.5], dtype=np.float32) + # t = np.array([True, False]) + # b = t*a + # assert_equal(b, [1.0, 0.0]) + # assert_equal(b.dtype, np.dtype('f4')) + # b = (1-t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + # + # Probably ~t (bitwise negation) is more proper to use here, + # but this is arguably less intuitive to understand at a glance, and + # would fail if 't' is actually an integer array instead of boolean: + # + # b = (~t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + + def test_result_type(self): + self.check_promotion_cases(np.result_type) + assert_(np.result_type(None) == np.dtype(None)) + + def test_promote_types_endian(self): + # promote_types should always return native-endian types + assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) + + assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) + assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) + assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) + + assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) + assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) + + def test_can_cast_and_promote_usertypes(self): + # The rational type defines safe casting for signed integers, + # boolean. Rational itself *does* cast safely to double. + # (rational does not actually cast to all signed integers, e.g. + # int64 can be both long and longlong and it registers only the first) + valid_types = ["int8", "int16", "int32", "int64", "bool"] + invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V" + + rational_dt = np.dtype(rational) + for numpy_dtype in valid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert np.can_cast(numpy_dtype, rational_dt) + assert np.promote_types(numpy_dtype, rational_dt) is rational_dt + + for numpy_dtype in invalid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert not np.can_cast(numpy_dtype, rational_dt) + with pytest.raises(TypeError): + np.promote_types(numpy_dtype, rational_dt) + + double_dt = np.dtype("double") + assert np.can_cast(rational_dt, double_dt) + assert np.promote_types(double_dt, rational_dt) is double_dt + + @pytest.mark.parametrize("swap", ["", "swap"]) + @pytest.mark.parametrize("string_dtype", ["U", "S"]) + def test_promote_types_strings(self, swap, string_dtype): + if swap == "swap": + promote_types = lambda a, b: np.promote_types(b, a) + else: + promote_types = np.promote_types + + S = string_dtype + + # Promote numeric with unsized string: + assert_equal(promote_types('bool', S), np.dtype(S+'5')) + assert_equal(promote_types('b', S), np.dtype(S+'4')) + assert_equal(promote_types('u1', S), np.dtype(S+'3')) + assert_equal(promote_types('u2', S), np.dtype(S+'5')) + assert_equal(promote_types('u4', S), np.dtype(S+'10')) + assert_equal(promote_types('u8', S), np.dtype(S+'20')) + assert_equal(promote_types('i1', S), np.dtype(S+'4')) + assert_equal(promote_types('i2', S), np.dtype(S+'6')) + assert_equal(promote_types('i4', S), np.dtype(S+'11')) + assert_equal(promote_types('i8', S), np.dtype(S+'21')) + # Promote numeric with sized string: + assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('b', S+'1'), np.dtype(S+'4')) + assert_equal(promote_types('b', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3')) + assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5')) + assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10')) + assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30')) + assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20')) + assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30')) + # Promote with object: + assert_equal(promote_types('O', S+'30'), np.dtype('O')) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V6"), np.dtype("V10")], + [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])], + [np.dtype("i8,i8"), np.dtype("i4,i4")], + ]) + def test_invalid_void_promotion(self, dtype1, dtype2): + # Mainly test structured void promotion, which currently allows + # byte-swapping, but nothing else: + with pytest.raises(TypeError): + np.promote_types(dtype1, dtype2) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V10"), np.dtype("V10")], + [np.dtype([("name1", "i8")])], + [np.dtype("i8,i8"), np.dtype("i8,>i8")], + ]) + def test_valid_void_promotion(self, dtype1, dtype2): + assert np.promote_types(dtype1, dtype2) is dtype1 + + @pytest.mark.parametrize("dtype", + list(np.typecodes["All"]) + + ["i,i", "S3", "S100", "U3", "U100", rational]) + def test_promote_identical_types_metadata(self, dtype): + # The same type passed in twice to promote types always + # preserves metadata + metadata = {1: 1} + dtype = np.dtype(dtype, metadata=metadata) + + res = np.promote_types(dtype, dtype) + assert res.metadata == dtype.metadata + + # byte-swapping preserves and makes the dtype native: + dtype = dtype.newbyteorder() + if dtype.isnative: + # The type does not have byte swapping + return + + res = np.promote_types(dtype, dtype) + if res.char in "?bhilqpBHILQPefdgFDGOmM" or dtype.type is rational: + # Metadata is lost for simple promotions (they create a new dtype) + assert res.metadata is None + else: + assert res.metadata == metadata + if dtype.kind != "V": + # the result is native (except for structured void) + assert res.isnative + + @pytest.mark.slow + @pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning') + @pytest.mark.parametrize(["dtype1", "dtype2"], + itertools.product( + list(np.typecodes["All"]) + + ["i,i", "S3", "S100", "U3", "U100", rational], + repeat=2)) + def test_promote_types_metadata(self, dtype1, dtype2): + """Metadata handling in promotion does not appear formalized + right now in NumPy. This test should thus be considered to + document behaviour, rather than test the correct definition of it. + + This test is very ugly, it was useful for rewriting part of the + promotion, but probably should eventually be replaced/deleted + (i.e. when metadata handling in promotion is better defined). + """ + metadata1 = {1: 1} + metadata2 = {2: 2} + dtype1 = np.dtype(dtype1, metadata=metadata1) + dtype2 = np.dtype(dtype2, metadata=metadata2) + + try: + res = np.promote_types(dtype1, dtype2) + except TypeError: + # Promotion failed, this test only checks metadata + return + + if res.char in "?bhilqpBHILQPefdgFDGOmM" or res.type is rational: + # All simple types lose metadata (due to using promotion table): + assert res.metadata is None + elif res == dtype1: + # If one result is the result, it is usually returned unchanged: + assert res is dtype1 + elif res == dtype2: + # dtype1 may have been cast to the same type/kind as dtype2. + # If the resulting dtype is identical we currently pick the cast + # version of dtype1, which lost the metadata: + if np.promote_types(dtype1, dtype2.kind) == dtype2: + res.metadata is None + else: + res.metadata == metadata2 + else: + assert res.metadata is None + + # Try again for byteswapped version + dtype1 = dtype1.newbyteorder() + assert dtype1.metadata == metadata1 + res_bs = np.promote_types(dtype1, dtype2) + if res_bs.names is not None: + # Structured promotion doesn't remove byteswap: + assert res_bs.newbyteorder() == res + else: + assert res_bs == res + assert res_bs.metadata == res.metadata + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V6"), np.dtype("V10")], + [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])], + [np.dtype("i8,i8"), np.dtype("i4,i4")], + ]) + def test_invalid_void_promotion(self, dtype1, dtype2): + # Mainly test structured void promotion, which currently allows + # byte-swapping, but nothing else: + with pytest.raises(TypeError): + np.promote_types(dtype1, dtype2) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V10"), np.dtype("V10")], + [np.dtype([("name1", "i8")])], + [np.dtype("i8,i8"), np.dtype("i8,>i8")], + ]) + def test_valid_void_promotion(self, dtype1, dtype2): + assert np.promote_types(dtype1, dtype2) is dtype1 + + def test_can_cast(self): + assert_(np.can_cast(np.int32, np.int64)) + assert_(np.can_cast(np.float64, complex)) + assert_(not np.can_cast(complex, float)) + + assert_(np.can_cast('i8', 'f8')) + assert_(not np.can_cast('i8', 'f4')) + assert_(np.can_cast('i4', 'S11')) + + assert_(np.can_cast('i8', 'i8', 'no')) + assert_(not np.can_cast('i8', 'no')) + + assert_(np.can_cast('i8', 'equiv')) + assert_(not np.can_cast('i8', 'equiv')) + + assert_(np.can_cast('i8', 'safe')) + assert_(not np.can_cast('i4', 'safe')) + + assert_(np.can_cast('i4', 'same_kind')) + assert_(not np.can_cast('u4', 'same_kind')) + + assert_(np.can_cast('u4', 'unsafe')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'S4')) + assert_(not np.can_cast('b', 'S3')) + + assert_(np.can_cast('u1', 'S3')) + assert_(not np.can_cast('u1', 'S2')) + assert_(np.can_cast('u2', 'S5')) + assert_(not np.can_cast('u2', 'S4')) + assert_(np.can_cast('u4', 'S10')) + assert_(not np.can_cast('u4', 'S9')) + assert_(np.can_cast('u8', 'S20')) + assert_(not np.can_cast('u8', 'S19')) + + assert_(np.can_cast('i1', 'S4')) + assert_(not np.can_cast('i1', 'S3')) + assert_(np.can_cast('i2', 'S6')) + assert_(not np.can_cast('i2', 'S5')) + assert_(np.can_cast('i4', 'S11')) + assert_(not np.can_cast('i4', 'S10')) + assert_(np.can_cast('i8', 'S21')) + assert_(not np.can_cast('i8', 'S20')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'U4')) + assert_(not np.can_cast('b', 'U3')) + + assert_(np.can_cast('u1', 'U3')) + assert_(not np.can_cast('u1', 'U2')) + assert_(np.can_cast('u2', 'U5')) + assert_(not np.can_cast('u2', 'U4')) + assert_(np.can_cast('u4', 'U10')) + assert_(not np.can_cast('u4', 'U9')) + assert_(np.can_cast('u8', 'U20')) + assert_(not np.can_cast('u8', 'U19')) + + assert_(np.can_cast('i1', 'U4')) + assert_(not np.can_cast('i1', 'U3')) + assert_(np.can_cast('i2', 'U6')) + assert_(not np.can_cast('i2', 'U5')) + assert_(np.can_cast('i4', 'U11')) + assert_(not np.can_cast('i4', 'U10')) + assert_(np.can_cast('i8', 'U21')) + assert_(not np.can_cast('i8', 'U20')) + + assert_raises(TypeError, np.can_cast, 'i4', None) + assert_raises(TypeError, np.can_cast, None, 'i4') + + # Also test keyword arguments + assert_(np.can_cast(from_=np.int32, to=np.int64)) + + def test_can_cast_simple_to_structured(self): + # Non-structured can only be cast to structured in 'unsafe' mode. + assert_(not np.can_cast('i4', 'i4,i4')) + assert_(not np.can_cast('i4', 'i4,i2')) + assert_(np.can_cast('i4', 'i4,i4', casting='unsafe')) + assert_(np.can_cast('i4', 'i4,i2', casting='unsafe')) + # Even if there is just a single field which is OK. + assert_(not np.can_cast('i2', [('f1', 'i4')])) + assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind')) + assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe')) + # It should be the same for recursive structured or subarrays. + assert_(not np.can_cast('i2', [('f1', 'i4,i4')])) + assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe')) + assert_(not np.can_cast('i2', [('f1', '(2,3)i4')])) + assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe')) + + def test_can_cast_structured_to_simple(self): + # Need unsafe casting for structured to simple. + assert_(not np.can_cast([('f1', 'i4')], 'i4')) + assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe')) + assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe')) + # Since it is unclear what is being cast, multiple fields to + # single should not work even for unsafe casting. + assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe')) + # But a single field inside a single field is OK. + assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4')) + assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe')) + # And a subarray is fine too - it will just take the first element + # (arguably not very consistently; might also take the first field). + assert_(not np.can_cast([('f0', '(3,)i4')], 'i4')) + assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe')) + # But a structured subarray with multiple fields should fail. + assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', + casting='unsafe')) + + def test_can_cast_values(self): + # gh-5917 + for dt in np.sctypes['int'] + np.sctypes['uint']: + ii = np.iinfo(dt) + assert_(np.can_cast(ii.min, dt)) + assert_(np.can_cast(ii.max, dt)) + assert_(not np.can_cast(ii.min - 1, dt)) + assert_(not np.can_cast(ii.max + 1, dt)) + + for dt in np.sctypes['float']: + fi = np.finfo(dt) + assert_(np.can_cast(fi.min, dt)) + assert_(np.can_cast(fi.max, dt)) + + +# Custom exception class to test exception propagation in fromiter +class NIterError(Exception): + pass + + +class TestFromiter: + def makegen(self): + return (x**2 for x in range(24)) + + def test_types(self): + ai32 = np.fromiter(self.makegen(), np.int32) + ai64 = np.fromiter(self.makegen(), np.int64) + af = np.fromiter(self.makegen(), float) + assert_(ai32.dtype == np.dtype(np.int32)) + assert_(ai64.dtype == np.dtype(np.int64)) + assert_(af.dtype == np.dtype(float)) + + def test_lengths(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(len(a) == len(expected)) + assert_(len(a20) == 20) + assert_raises(ValueError, np.fromiter, + self.makegen(), int, len(expected) + 10) + + def test_values(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(np.alltrue(a == expected, axis=0)) + assert_(np.alltrue(a20 == expected[:20], axis=0)) + + def load_data(self, n, eindex): + # Utility method for the issue 2592 tests. + # Raise an exception at the desired index in the iterator. + for e in range(n): + if e == eindex: + raise NIterError('error at index %s' % eindex) + yield e + + def test_2592(self): + # Test iteration exceptions are correctly raised. + count, eindex = 10, 5 + assert_raises(NIterError, np.fromiter, + self.load_data(count, eindex), dtype=int, count=count) + + def test_2592_edge(self): + # Test iter. exceptions, edge case (exception at end of iterator). + count = 10 + eindex = count-1 + assert_raises(NIterError, np.fromiter, + self.load_data(count, eindex), dtype=int, count=count) + + +class TestNonzero: + def test_nonzero_trivial(self): + assert_equal(np.count_nonzero(np.array([])), 0) + assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) + assert_equal(np.nonzero(np.array([])), ([],)) + + assert_equal(np.count_nonzero(np.array([0])), 0) + assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0) + assert_equal(np.nonzero(np.array([0])), ([],)) + + assert_equal(np.count_nonzero(np.array([1])), 1) + assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1) + assert_equal(np.nonzero(np.array([1])), ([0],)) + + def test_nonzero_zerod(self): + assert_equal(np.count_nonzero(np.array(0)), 0) + assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0) + with assert_warns(DeprecationWarning): + assert_equal(np.nonzero(np.array(0)), ([],)) + + assert_equal(np.count_nonzero(np.array(1)), 1) + assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1) + with assert_warns(DeprecationWarning): + assert_equal(np.nonzero(np.array(1)), ([0],)) + + def test_nonzero_onedim(self): + x = np.array([1, 0, 2, -1, 0, 0, 8]) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) + + # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], + # dtype=[('a', 'i4'), ('b', 'i2')]) + x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) + assert_equal(np.count_nonzero(x['a']), 3) + assert_equal(np.count_nonzero(x['b']), 4) + assert_equal(np.count_nonzero(x['c']), 3) + assert_equal(np.count_nonzero(x['d']), 4) + assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) + assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) + + def test_nonzero_twodim(self): + x = np.array([[0, 1, 0], [2, 0, 3]]) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) + + x = np.eye(3) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) + + x = np.array([[(0, 1), (0, 0), (1, 11)], + [(1, 1), (1, 0), (0, 0)], + [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) + assert_equal(np.count_nonzero(x['a']), 4) + assert_equal(np.count_nonzero(x['b']), 5) + assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) + assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) + + assert_(not x['a'].T.flags.aligned) + assert_equal(np.count_nonzero(x['a'].T), 4) + assert_equal(np.count_nonzero(x['b'].T), 5) + assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) + assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) + + def test_sparse(self): + # test special sparse condition boolean code path + for i in range(20): + c = np.zeros(200, dtype=bool) + c[i::20] = True + assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) + + c = np.zeros(400, dtype=bool) + c[10 + i:20 + i] = True + c[20 + i*2] = True + assert_equal(np.nonzero(c)[0], + np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2]))) + + def test_return_type(self): + class C(np.ndarray): + pass + + for view in (C, np.ndarray): + for nd in range(1, 4): + shape = tuple(range(2, 2+nd)) + x = np.arange(np.prod(shape)).reshape(shape).view(view) + for nzx in (np.nonzero(x), x.nonzero()): + for nzx_i in nzx: + assert_(type(nzx_i) is np.ndarray) + assert_(nzx_i.flags.writeable) + + def test_count_nonzero_axis(self): + # Basic check of functionality + m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]]) + + expected = np.array([1, 1, 1, 1, 1]) + assert_equal(np.count_nonzero(m, axis=0), expected) + + expected = np.array([2, 3]) + assert_equal(np.count_nonzero(m, axis=1), expected) + + assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1)) + assert_raises(TypeError, np.count_nonzero, m, axis='foo') + assert_raises(np.AxisError, np.count_nonzero, m, axis=3) + assert_raises(TypeError, np.count_nonzero, + m, axis=np.array([[1], [2]])) + + def test_count_nonzero_axis_all_dtypes(self): + # More thorough test that the axis argument is respected + # for all dtypes and responds correctly when presented with + # either integer or tuple arguments for axis + msg = "Mismatch for dtype: %s" + + def assert_equal_w_dt(a, b, err_msg): + assert_equal(a.dtype, b.dtype, err_msg=err_msg) + assert_equal(a, b, err_msg=err_msg) + + for dt in np.typecodes['All']: + err_msg = msg % (np.dtype(dt).name,) + + if dt != 'V': + if dt != 'M': + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + + else: # np.zeros doesn't work for np.datetime64 + m = np.array(['1970-01-01'] * 9) + m = m.reshape((3, 3)) + + m[0, 0] = '1970-01-12' + m[1, 0] = '1970-01-12' + m = m.astype(dt) + + expected = np.array([2, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([1, 1, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(2) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + if dt == 'V': + # There are no 'nonzero' objects for np.void, so the testing + # setup is slightly different for this dtype + m = np.array([np.void(1)] * 6).reshape((2, 3)) + + expected = np.array([0, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(0) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + def test_count_nonzero_axis_consistent(self): + # Check that the axis behaviour for valid axes in + # non-special cases is consistent (and therefore + # correct) by checking it against an integer array + # that is then casted to the generic object dtype + from itertools import combinations, permutations + + axis = (0, 1, 2, 3) + size = (5, 5, 5, 5) + msg = "Mismatch for axis: %s" + + rng = np.random.RandomState(1234) + m = rng.randint(-100, 100, size=size) + n = m.astype(object) + + for length in range(len(axis)): + for combo in combinations(axis, length): + for perm in permutations(combo): + assert_equal( + np.count_nonzero(m, axis=perm), + np.count_nonzero(n, axis=perm), + err_msg=msg % (perm,)) + + def test_countnonzero_axis_empty(self): + a = np.array([[0, 0, 1], [1, 0, 1]]) + assert_equal(np.count_nonzero(a, axis=()), a.astype(bool)) + + def test_countnonzero_keepdims(self): + a = np.array([[0, 0, 1, 0], + [0, 3, 5, 0], + [7, 9, 2, 0]]) + assert_equal(np.count_nonzero(a, axis=0, keepdims=True), + [[1, 2, 3, 0]]) + assert_equal(np.count_nonzero(a, axis=1, keepdims=True), + [[1], [2], [3]]) + assert_equal(np.count_nonzero(a, keepdims=True), + [[6]]) + + def test_array_method(self): + # Tests that the array method + # call to nonzero works + m = np.array([[1, 0, 0], [4, 0, 6]]) + tgt = [[0, 1, 1], [0, 0, 2]] + + assert_equal(m.nonzero(), tgt) + + def test_nonzero_invalid_object(self): + # gh-9295 + a = np.array([np.array([1, 2]), 3], dtype=object) + assert_raises(ValueError, np.nonzero, a) + + class BoolErrors: + def __bool__(self): + raise ValueError("Not allowed") + + assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) + + def test_nonzero_sideeffect_safety(self): + # gh-13631 + class FalseThenTrue: + _val = False + def __bool__(self): + try: + return self._val + finally: + self._val = True + + class TrueThenFalse: + _val = True + def __bool__(self): + try: + return self._val + finally: + self._val = False + + # result grows on the second pass + a = np.array([True, FalseThenTrue()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[True], [FalseThenTrue()]]) + assert_raises(RuntimeError, np.nonzero, a) + + # result shrinks on the second pass + a = np.array([False, TrueThenFalse()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[False], [TrueThenFalse()]]) + assert_raises(RuntimeError, np.nonzero, a) + + def test_nonzero_exception_safe(self): + # gh-13930 + + class ThrowsAfter: + def __init__(self, iters): + self.iters_left = iters + + def __bool__(self): + if self.iters_left == 0: + raise ValueError("called `iters` times") + + self.iters_left -= 1 + return True + + """ + Test that a ValueError is raised instead of a SystemError + + If the __bool__ function is called after the error state is set, + Python (cpython) will raise a SystemError. + """ + + # assert that an exception in first pass is handled correctly + a = np.array([ThrowsAfter(5)]*10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for 1-dimensional loop + a = np.array([ThrowsAfter(15)]*10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for n-dimensional loop + a = np.array([[ThrowsAfter(15)]]*10) + assert_raises(ValueError, np.nonzero, a) + + def test_structured_threadsafety(self): + # Nonzero (and some other functions) should be threadsafe for + # structured datatypes, see gh-15387. This test can behave randomly. + from concurrent.futures import ThreadPoolExecutor + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)]) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0] + def func(arr): + arr.nonzero() + + tpe = ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + + +class TestIndex: + def test_boolean(self): + a = rand(3, 5, 8) + V = rand(5, 8) + g1 = randint(0, 5, size=15) + g2 = randint(0, 8, size=15) + V[g1, g2] = -V[g1, g2] + assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + + def test_boolean_edgecase(self): + a = np.array([], dtype='int32') + b = np.array([], dtype='bool') + c = a[b] + assert_equal(c, []) + assert_equal(c.dtype, np.dtype('int32')) + + +class TestBinaryRepr: + def test_zero(self): + assert_equal(np.binary_repr(0), '0') + + def test_positive(self): + assert_equal(np.binary_repr(10), '1010') + assert_equal(np.binary_repr(12522), + '11000011101010') + assert_equal(np.binary_repr(10736848), + '101000111101010011010000') + + def test_negative(self): + assert_equal(np.binary_repr(-1), '-1') + assert_equal(np.binary_repr(-10), '-1010') + assert_equal(np.binary_repr(-12522), + '-11000011101010') + assert_equal(np.binary_repr(-10736848), + '-101000111101010011010000') + + def test_sufficient_width(self): + assert_equal(np.binary_repr(0, width=5), '00000') + assert_equal(np.binary_repr(10, width=7), '0001010') + assert_equal(np.binary_repr(-5, width=7), '1111011') + + def test_neg_width_boundaries(self): + # see gh-8670 + + # Ensure that the example in the issue does not + # break before proceeding to a more thorough test. + assert_equal(np.binary_repr(-128, width=8), '10000000') + + for width in range(1, 11): + num = -2**(width - 1) + exp = '1' + (width - 1) * '0' + assert_equal(np.binary_repr(num, width=width), exp) + + def test_large_neg_int64(self): + # See gh-14289. + assert_equal(np.binary_repr(np.int64(-2**62), width=64), + '11' + '0'*62) + + +class TestBaseRepr: + def test_base3(self): + assert_equal(np.base_repr(3**5, 3), '100000') + + def test_positive(self): + assert_equal(np.base_repr(12, 10), '12') + assert_equal(np.base_repr(12, 10, 4), '000012') + assert_equal(np.base_repr(12, 4), '30') + assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW') + + def test_negative(self): + assert_equal(np.base_repr(-12, 10), '-12') + assert_equal(np.base_repr(-12, 10, 4), '-000012') + assert_equal(np.base_repr(-12, 4), '-30') + + def test_base_range(self): + with assert_raises(ValueError): + np.base_repr(1, 1) + with assert_raises(ValueError): + np.base_repr(1, 37) + + +class TestArrayComparisons: + def test_array_equal(self): + res = np.array_equal(np.array([1, 2]), np.array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equal(np.array([1, 2]), np.array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equal(np.array([1, 2]), np.array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1')) + assert_(res) + assert_(type(res) is bool) + res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'), + np.array([('a', 1)], dtype='S1,u4')) + assert_(res) + assert_(type(res) is bool) + + def test_array_equal_equal_nan(self): + # Test array_equal with equal_nan kwarg + a1 = np.array([1, 2, np.nan]) + a2 = np.array([1, np.nan, 2]) + a3 = np.array([1, 2, np.inf]) + + # equal_nan=False by default + assert_(not np.array_equal(a1, a1)) + assert_(np.array_equal(a1, a1, equal_nan=True)) + assert_(not np.array_equal(a1, a2, equal_nan=True)) + # nan's not conflated with inf's + assert_(not np.array_equal(a1, a3, equal_nan=True)) + # 0-D arrays + a = np.array(np.nan) + assert_(not np.array_equal(a, a)) + assert_(np.array_equal(a, a, equal_nan=True)) + # Non-float dtype - equal_nan should have no effect + a = np.array([1, 2, 3], dtype=int) + assert_(np.array_equal(a, a)) + assert_(np.array_equal(a, a, equal_nan=True)) + # Multi-dimensional array + a = np.array([[0, 1], [np.nan, 1]]) + assert_(not np.array_equal(a, a)) + assert_(np.array_equal(a, a, equal_nan=True)) + # Complex values + a, b = [np.array([1 + 1j])]*2 + a.real, b.imag = np.nan, np.nan + assert_(not np.array_equal(a, b, equal_nan=False)) + assert_(np.array_equal(a, b, equal_nan=True)) + + def test_none_compares_elementwise(self): + a = np.array([None, 1, None], dtype=object) + assert_equal(a == None, [True, False, True]) + assert_equal(a != None, [False, True, False]) + + a = np.ones(3) + assert_equal(a == None, [False, False, False]) + assert_equal(a != None, [True, True, True]) + + def test_array_equiv(self): + res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + + res = np.array_equiv(np.array([1, 1]), np.array([1])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([2])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + assert_(not res) + assert_(type(res) is bool) + + @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"]) + def test_compare_unstructured_voids(self, dtype): + zeros = np.zeros(3, dtype=dtype) + + assert_array_equal(zeros, zeros) + assert not (zeros != zeros).any() + + if dtype == "V0": + # Can't test != of actually different data + return + + nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype) + + assert not (zeros == nonzeros).any() + assert (zeros != nonzeros).all() + + +def assert_array_strict_equal(x, y): + assert_array_equal(x, y) + # Check flags, 32 bit arches typically don't provide 16 byte alignment + if ((x.dtype.alignment <= 8 or + np.intp().dtype.itemsize != 4) and + sys.platform != 'win32'): + assert_(x.flags == y.flags) + else: + assert_(x.flags.owndata == y.flags.owndata) + assert_(x.flags.writeable == y.flags.writeable) + assert_(x.flags.c_contiguous == y.flags.c_contiguous) + assert_(x.flags.f_contiguous == y.flags.f_contiguous) + assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) + # check endianness + assert_(x.dtype.isnative == y.dtype.isnative) + + +class TestClip: + def setup(self): + self.nr = 5 + self.nc = 3 + + def fastclip(self, a, m, M, out=None, casting=None): + if out is None: + if casting is None: + return a.clip(m, M) + else: + return a.clip(m, M, casting=casting) + else: + if casting is None: + return a.clip(m, M, out) + else: + return a.clip(m, M, out, casting=casting) + + def clip(self, a, m, M, out=None): + # use slow-clip + selector = np.less(a, m) + 2*np.greater(a, M) + return selector.choose((a, m, M), out=out) + + # Handy functions + def _generate_data(self, n, m): + return randn(n, m) + + def _generate_data_complex(self, n, m): + return randn(n, m) + 1.j * rand(n, m) + + def _generate_flt_data(self, n, m): + return (randn(n, m)).astype(np.float32) + + def _neg_byteorder(self, a): + a = np.asarray(a) + if sys.byteorder == 'little': + a = a.astype(a.dtype.newbyteorder('>')) + else: + a = a.astype(a.dtype.newbyteorder('<')) + return a + + def _generate_non_native_data(self, n, m): + data = randn(n, m) + data = self._neg_byteorder(data) + assert_(not data.dtype.isnative) + return data + + def _generate_int_data(self, n, m): + return (10 * rand(n, m)).astype(np.int64) + + def _generate_int32_data(self, n, m): + return (10 * rand(n, m)).astype(np.int32) + + # Now the real test cases + + @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO') + def test_ones_pathological(self, dtype): + # for preservation of behavior described in + # gh-12519; amin > amax behavior may still change + # in the future + arr = np.ones(10, dtype=dtype) + expected = np.zeros(10, dtype=dtype) + actual = np.clip(arr, 1, 0) + if dtype == 'O': + assert actual.tolist() == expected.tolist() + else: + assert_equal(actual, expected) + + def test_simple_double(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.1 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_int(self): + # Test native int input with scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(int) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_array_double(self): + # Test native double input with array min/max. + a = self._generate_data(self.nr, self.nc) + m = np.zeros(a.shape) + M = m + 0.5 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_nonnative(self): + # Test non native double input with scalar min/max. + # Test native double input with non native double scalar min/max. + a = self._generate_non_native_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + # Test native double input with non native double scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = self._neg_byteorder(0.6) + assert_(not M.dtype.isnative) + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + def test_simple_complex(self): + # Test native complex input with native double scalar min/max. + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data_complex(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data(self.nr, self.nc) + m = -0.5 + 1.j + M = 1. + 2.j + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_clip_complex(self): + # Address Issue gh-5354 for clipping complex arrays + # Test native complex input without explicit min/max + # ie, either min=None or max=None + a = np.ones(10, dtype=complex) + m = a.min() + M = a.max() + am = self.fastclip(a, m, None) + aM = self.fastclip(a, None, M) + assert_array_strict_equal(am, a) + assert_array_strict_equal(aM, a) + + def test_clip_non_contig(self): + # Test clip for non contiguous native input and native scalar min/max. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = self.fastclip(a, -1.6, 1.7) + act = self.clip(a, -1.6, 1.7) + assert_array_strict_equal(ac, act) + + def test_simple_out(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + @pytest.mark.parametrize("casting", [None, "unsafe"]) + def test_simple_int32_inout(self, casting): + # Test native int32 input with double min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + if casting is None: + with assert_warns(DeprecationWarning): + # NumPy 1.17.0, 2018-02-24 - casting is unsafe + self.fastclip(a, m, M, ac, casting=casting) + else: + # explicitly passing "unsafe" will silence warning + self.fastclip(a, m, M, ac, casting=casting) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_out(self): + # Test native int32 input with int32 scalar min/max and int64 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_inout(self): + # Test native int32 input with double array min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + with assert_warns(DeprecationWarning): + # NumPy 1.17.0, 2018-02-24 - casting is unsafe + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int32_out(self): + # Test native double input with scalar min/max and int out. + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + with assert_warns(DeprecationWarning): + # NumPy 1.17.0, 2018-02-24 - casting is unsafe + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_inplace_01(self): + # Test native double input with array min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_simple_inplace_02(self): + # Test native double input with scalar min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_noncontig_inplace(self): + # Test non contiguous double input with double scalar min/max in-place. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_equal(a, ac) + + def test_type_cast_01(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_02(self): + # Test native int32 input with int32 scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(np.int32) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_03(self): + # Test native int32 input with float64 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = -2 + M = 4 + ac = self.fastclip(a, np.float64(m), np.float64(M)) + act = self.clip(a, np.float64(m), np.float64(M)) + assert_array_strict_equal(ac, act) + + def test_type_cast_04(self): + # Test native int32 input with float32 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float32(-2) + M = np.float32(4) + act = self.fastclip(a, m, M) + ac = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_05(self): + # Test native int32 with double arrays min/max. + a = self._generate_int_data(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m * np.zeros(a.shape), M) + act = self.clip(a, m * np.zeros(a.shape), M) + assert_array_strict_equal(ac, act) + + def test_type_cast_06(self): + # Test native with NON native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.5 + m_s = self._neg_byteorder(m) + M = 1. + act = self.clip(a, m_s, M) + ac = self.fastclip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_07(self): + # Test NON native with native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + act = a_s.clip(m, M) + ac = self.fastclip(a_s, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_08(self): + # Test NON native with native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + ac = self.fastclip(a_s, m, M) + act = a_s.clip(m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_09(self): + # Test native with NON native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + m_s = self._neg_byteorder(m) + assert_(not m_s.dtype.isnative) + ac = self.fastclip(a, m_s, M) + act = self.clip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_10(self): + # Test native int32 with float min/max and float out for output argument. + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.float32(-0.5) + M = np.float32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_type_cast_11(self): + # Test non native with native scalar, min/max, out non native + a = self._generate_non_native_data(self.nr, self.nc) + b = a.copy() + b = b.astype(b.dtype.newbyteorder('>')) + bt = b.copy() + m = -0.5 + M = 1. + self.fastclip(a, m, M, out=b) + self.clip(a, m, M, out=bt) + assert_array_strict_equal(b, bt) + + def test_type_cast_12(self): + # Test native int32 input and min/max and float out + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.int32(0) + M = np.int32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple2(self): + # Test native int32 input with double min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + with assert_warns(DeprecationWarning): + # NumPy 1.17.0, 2018-02-24 - casting is unsafe + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple_int32(self): + # Test native int32 input with int32 scalar min/max and int64 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_int32(self): + # Test native int32 input with double array min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + with assert_warns(DeprecationWarning): + # NumPy 1.17.0, 2018-02-24 - casting is unsafe + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_outint32(self): + # Test native double input with scalar min/max and int out + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + with assert_warns(DeprecationWarning): + # NumPy 1.17.0, 2018-02-24 - casting is unsafe + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_transposed(self): + # Test that the out argument works when transposed + a = np.arange(16).reshape(4, 4) + out = np.empty_like(a).T + a.clip(4, 10, out=out) + expected = self.clip(a, 4, 10) + assert_array_equal(out, expected) + + def test_clip_with_out_memory_overlap(self): + # Test that the out argument works when it has memory overlap + a = np.arange(16).reshape(4, 4) + ac = a.copy() + a[:-1].clip(4, 10, out=a[1:]) + expected = self.clip(ac[:-1], 4, 10) + assert_array_equal(a[1:], expected) + + def test_clip_inplace_array(self): + # Test native double input with array min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_inplace_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_func_takes_out(self): + # Ensure that the clip() function takes an out=argument. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + a2 = np.clip(a, m, M, out=a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a2, ac) + assert_(a2 is a) + + def test_clip_nan(self): + d = np.arange(7.) + with assert_warns(DeprecationWarning): + assert_equal(d.clip(min=np.nan), d) + with assert_warns(DeprecationWarning): + assert_equal(d.clip(max=np.nan), d) + with assert_warns(DeprecationWarning): + assert_equal(d.clip(min=np.nan, max=np.nan), d) + with assert_warns(DeprecationWarning): + assert_equal(d.clip(min=-2, max=np.nan), d) + with assert_warns(DeprecationWarning): + assert_equal(d.clip(min=np.nan, max=10), d) + + def test_object_clip(self): + a = np.arange(10, dtype=object) + actual = np.clip(a, 1, 5) + expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5]) + assert actual.tolist() == expected.tolist() + + def test_clip_all_none(self): + a = np.arange(10, dtype=object) + with assert_raises_regex(ValueError, 'max or min'): + np.clip(a, None, None) + + def test_clip_invalid_casting(self): + a = np.arange(10, dtype=object) + with assert_raises_regex(ValueError, + 'casting must be one of'): + self.fastclip(a, 1, 8, casting="garbage") + + @pytest.mark.parametrize("amin, amax", [ + # two scalars + (1, 0), + # mix scalar and array + (1, np.zeros(10)), + # two arrays + (np.ones(10), np.zeros(10)), + ]) + def test_clip_value_min_max_flip(self, amin, amax): + a = np.arange(10, dtype=np.int64) + # requirement from ufunc_docstrings.py + expected = np.minimum(np.maximum(a, amin), amax) + actual = np.clip(a, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.parametrize("arr, amin, amax, exp", [ + # for a bug in npy_ObjectClip, based on a + # case produced by hypothesis + (np.zeros(10, dtype=np.int64), + 0, + -2**64+1, + np.full(10, -2**64+1, dtype=object)), + # for bugs in NPY_TIMEDELTA_MAX, based on a case + # produced by hypothesis + (np.zeros(10, dtype='m8') - 1, + 0, + 0, + np.zeros(10, dtype='m8')), + ]) + def test_clip_problem_cases(self, arr, amin, amax, exp): + actual = np.clip(arr, amin, amax) + assert_equal(actual, exp) + + @pytest.mark.xfail(reason="no scalar nan propagation yet", + raises=AssertionError, + strict=True) + @pytest.mark.parametrize("arr, amin, amax", [ + # problematic scalar nan case from hypothesis + (np.zeros(10, dtype=np.int64), + np.array(np.nan), + np.zeros(10, dtype=np.int32)), + ]) + @pytest.mark.filterwarnings("ignore::DeprecationWarning") + def test_clip_scalar_nan_propagation(self, arr, amin, amax): + # enforcement of scalar nan propagation for comparisons + # called through clip() + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.xfail(reason="propagation doesn't match spec") + @pytest.mark.parametrize("arr, amin, amax", [ + (np.array([1] * 10, dtype='m8'), + np.timedelta64('NaT'), + np.zeros(10, dtype=np.int32)), + ]) + @pytest.mark.filterwarnings("ignore::DeprecationWarning") + def test_NaT_propagation(self, arr, amin, amax): + # NOTE: the expected function spec doesn't + # propagate NaT, but clip() now does + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @given(data=st.data(), shape=hynp.array_shapes()) + def test_clip_property(self, data, shape): + """A property-based test using Hypothesis. + + This aims for maximum generality: it could in principle generate *any* + valid inputs to np.clip, and in practice generates much more varied + inputs than human testers come up with. + + Because many of the inputs have tricky dependencies - compatible dtypes + and mutually-broadcastable shapes - we use `st.data()` strategy draw + values *inside* the test function, from strategies we construct based + on previous values. An alternative would be to define a custom strategy + with `@st.composite`, but until we have duplicated code inline is fine. + + That accounts for most of the function; the actual test is just three + lines to calculate and compare actual vs expected results! + """ + # Our base array and bounds should not need to be of the same type as + # long as they are all compatible - so we allow any int or float type. + dtype_strategy = hynp.integer_dtypes() | hynp.floating_dtypes() + + # The following line is a total hack to disable the varied-dtypes + # component of this test, because result != expected if dtypes can vary. + dtype_strategy = st.just(data.draw(dtype_strategy)) + + # Generate an arbitrary array of the chosen shape and dtype + # This is the value that we clip. + arr = data.draw(hynp.arrays(dtype=dtype_strategy, shape=shape)) + + # Generate shapes for the bounds which can be broadcast with each other + # and with the base shape. Below, we might decide to use scalar bounds, + # but it's clearer to generate these shapes unconditionally in advance. + in_shapes, result_shape = data.draw( + hynp.mutually_broadcastable_shapes( + num_shapes=2, + base_shape=shape, + # Commenting out the min_dims line allows zero-dimensional arrays, + # and zero-dimensional arrays containing NaN make the test fail. + min_dims=1 + + ) + ) + amin = data.draw( + dtype_strategy.flatmap(hynp.from_dtype) + | hynp.arrays(dtype=dtype_strategy, shape=in_shapes[0]) + ) + amax = data.draw( + dtype_strategy.flatmap(hynp.from_dtype) + | hynp.arrays(dtype=dtype_strategy, shape=in_shapes[1]) + ) + # If we allow either bound to be a scalar `nan`, the test will fail - + # so we just "assume" that away (if it is, this raises a special + # exception and Hypothesis will try again with different inputs) + assume(not np.isscalar(amin) or not np.isnan(amin)) + assume(not np.isscalar(amax) or not np.isnan(amax)) + + # Then calculate our result and expected result and check that they're + # equal! See gh-12519 for discussion deciding on this property. + result = np.clip(arr, amin, amax) + expected = np.minimum(amax, np.maximum(arr, amin)) + assert_array_equal(result, expected) + + +class TestAllclose: + rtol = 1e-5 + atol = 1e-8 + + def setup(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown(self): + np.seterr(**self.olderr) + + def tst_allclose(self, x, y): + assert_(np.allclose(x, y), "%s and %s not close" % (x, y)) + + def tst_not_allclose(self, x, y): + assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y)) + + def test_ip_allclose(self): + # Parametric test factory. + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1+rtol+atol]), + (arr, arr + arr*rtol), + (arr, arr + arr*rtol + atol*2), + (aran, aran + aran*rtol), + (np.inf, np.inf), + (np.inf, [np.inf])] + + for (x, y) in data: + self.tst_allclose(x, y) + + def test_ip_not_allclose(self): + # Parametric test factory. + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([np.inf, 0], [1, np.inf]), + ([np.inf, 0], [1, 0]), + ([np.inf, np.inf], [1, np.inf]), + ([np.inf, np.inf], [1, 0]), + ([-np.inf, 0], [np.inf, 0]), + ([np.nan, 0], [np.nan, 0]), + ([atol*2], [0]), + ([1], [1+rtol+atol*2]), + (aran, aran + aran*atol + atol*2), + (np.array([np.inf, 1]), np.array([0, np.inf]))] + + for (x, y) in data: + self.tst_not_allclose(x, y) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.allclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_min_int(self): + # Could make problems because of abs(min_int) == min_int + min_int = np.iinfo(np.int_).min + a = np.array([min_int], dtype=np.int_) + assert_(np.allclose(a, a)) + + def test_equalnan(self): + x = np.array([1.0, np.nan]) + assert_(np.allclose(x, x, equal_nan=True)) + + def test_return_class_is_ndarray(self): + # Issue gh-6475 + # Check that allclose does not preserve subtypes + class Foo(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + a = Foo([1]) + assert_(type(np.allclose(a, a)) is bool) + + +class TestIsclose: + rtol = 1e-5 + atol = 1e-8 + + def setup(self): + atol = self.atol + rtol = self.rtol + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + self.all_close_tests = [ + ([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr*rtol), + (arr, arr + arr*rtol + atol), + (aran, aran + aran*rtol), + (np.inf, np.inf), + (np.inf, [np.inf]), + ([np.inf, -np.inf], [np.inf, -np.inf]), + ] + self.none_close_tests = [ + ([np.inf, 0], [1, np.inf]), + ([np.inf, -np.inf], [1, 0]), + ([np.inf, np.inf], [1, -np.inf]), + ([np.inf, np.inf], [1, 0]), + ([np.nan, 0], [np.nan, -np.inf]), + ([atol*2], [0]), + ([1], [1 + rtol + atol*2]), + (aran, aran + rtol*1.1*aran + atol*1.1), + (np.array([np.inf, 1]), np.array([0, np.inf])), + ] + self.some_close_tests = [ + ([np.inf, 0], [np.inf, atol*2]), + ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]), + (np.arange(3), [0, 1, 2.1]), + (np.nan, [np.nan, np.nan, np.nan]), + ([0], [atol, np.inf, -np.inf, np.nan]), + (0, [atol, np.inf, -np.inf, np.nan]), + ] + self.some_close_results = [ + [True, False], + [True, False, False], + [True, True, False], + [False, False, False], + [True, False, False, False], + [True, False, False, False], + ] + + def test_ip_isclose(self): + self.setup() + tests = self.some_close_tests + results = self.some_close_results + for (x, y), result in zip(tests, results): + assert_array_equal(np.isclose(x, y), result) + + def tst_all_isclose(self, x, y): + assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y)) + + def tst_none_isclose(self, x, y): + msg = "%s and %s shouldn't be close" + assert_(not np.any(np.isclose(x, y)), msg % (x, y)) + + def tst_isclose_allclose(self, x, y): + msg = "isclose.all() and allclose aren't same for %s and %s" + msg2 = "isclose and allclose aren't same for %s and %s" + if np.isscalar(x) and np.isscalar(y): + assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) + else: + assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + + def test_ip_all_isclose(self): + self.setup() + for (x, y) in self.all_close_tests: + self.tst_all_isclose(x, y) + + def test_ip_none_isclose(self): + self.setup() + for (x, y) in self.none_close_tests: + self.tst_none_isclose(x, y) + + def test_ip_isclose_allclose(self): + self.setup() + tests = (self.all_close_tests + self.none_close_tests + + self.some_close_tests) + for (x, y) in tests: + self.tst_isclose_allclose(x, y) + + def test_equal_nan(self): + assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) + arr = np.array([1.0, np.nan]) + assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True]) + + def test_masked_arrays(self): + # Make sure to test the output type when arguments are interchanged. + + x = np.ma.masked_where([True, True, False], np.arange(3)) + assert_(type(x) is type(np.isclose(2, x))) + assert_(type(x) is type(np.isclose(x, 2))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan]) + assert_(type(x) is type(np.isclose(np.inf, x))) + assert_(type(x) is type(np.isclose(x, np.inf))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(np.nan, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + y = np.isclose(x, np.nan, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(x, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + def test_scalar_return(self): + assert_(np.isscalar(np.isclose(1, 1))) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.isclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_non_finite_scalar(self): + # GH7014, when two scalars are compared the output should also be a + # scalar + assert_(np.isclose(np.inf, -np.inf) is np.False_) + assert_(np.isclose(0, np.inf) is np.False_) + assert_(type(np.isclose(0, np.inf)) is np.bool_) + + def test_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert np.isclose(a, a, atol=0, equal_nan=True).all() + assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all() + assert np.allclose(a, a, atol=0, equal_nan=True) + assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) + + +class TestStdVar: + def setup(self): + self.A = np.array([1, -1, 1, -1]) + self.real_var = 1 + + def test_basic(self): + assert_almost_equal(np.var(self.A), self.real_var) + assert_almost_equal(np.std(self.A)**2, self.real_var) + + def test_scalars(self): + assert_equal(np.var(1), 0) + assert_equal(np.std(1), 0) + + def test_ddof1(self): + assert_almost_equal(np.var(self.A, ddof=1), + self.real_var*len(self.A)/float(len(self.A)-1)) + assert_almost_equal(np.std(self.A, ddof=1)**2, + self.real_var*len(self.A)/float(len(self.A)-1)) + + def test_ddof2(self): + assert_almost_equal(np.var(self.A, ddof=2), + self.real_var*len(self.A)/float(len(self.A)-2)) + assert_almost_equal(np.std(self.A, ddof=2)**2, + self.real_var*len(self.A)/float(len(self.A)-2)) + + def test_out_scalar(self): + d = np.arange(10) + out = np.array(0.) + r = np.std(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.var(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.mean(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + + +class TestStdVarComplex: + def test_basic(self): + A = np.array([1, 1.j, -1, -1.j]) + real_var = 1 + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) + + def test_scalars(self): + assert_equal(np.var(1j), 0) + assert_equal(np.std(1j), 0) + + +class TestCreationFuncs: + # Test ones, zeros, empty and full. + + def setup(self): + dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())} + # void, bytes, str + variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + self.dtypes = sorted(dtypes - variable_sized | + {np.dtype(tp.str.replace("0", str(i))) + for tp in variable_sized for i in range(1, 10)}, + key=lambda dtype: dtype.str) + self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + self.ndims = 10 + + def check_function(self, func, fill_value=None): + par = ((0, 1, 2), + range(self.ndims), + self.orders, + self.dtypes) + fill_kwarg = {} + if fill_value is not None: + fill_kwarg = {'fill_value': fill_value} + + for size, ndims, order, dtype in itertools.product(*par): + shape = ndims * [size] + + # do not fill void type + if fill_kwarg and dtype.str.startswith('|V'): + continue + + arr = func(shape, order=order, dtype=dtype, + **fill_kwarg) + + assert_equal(arr.dtype, dtype) + assert_(getattr(arr.flags, self.orders[order])) + + if fill_value is not None: + if dtype.str.startswith('|S'): + val = str(fill_value) + else: + val = fill_value + assert_equal(arr, dtype.type(val)) + + def test_zeros(self): + self.check_function(np.zeros) + + def test_ones(self): + self.check_function(np.ones) + + def test_empty(self): + self.check_function(np.empty) + + def test_full(self): + self.check_function(np.full, 0) + self.check_function(np.full, 1) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_for_reference_leak(self): + # Make sure we have an object for reference + dim = 1 + beg = sys.getrefcount(dim) + np.zeros([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.ones([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.empty([dim]*10) + assert_(sys.getrefcount(dim) == beg) + np.full([dim]*10, 0) + assert_(sys.getrefcount(dim) == beg) + + +class TestLikeFuncs: + '''Test ones_like, zeros_like, empty_like and full_like''' + + def setup(self): + self.data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + self.shapes = [(), (5,), (5,6,), (5,6,7,)] + + def compare_array_value(self, dz, value, fill_value): + if value is not None: + if fill_value: + try: + z = dz.dtype.type(value) + except OverflowError: + pass + else: + assert_(np.all(dz == z)) + else: + assert_(np.all(dz == value)) + + def check_like_function(self, like_function, value, fill_value=False): + if fill_value: + fill_kwarg = {'fill_value': value} + else: + fill_kwarg = {} + for d, dtype in self.data: + # default (K) order, dtype + dz = like_function(d, dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_equal(np.array(dz.strides)*d.dtype.itemsize, + np.array(d.strides)*dz.dtype.itemsize) + assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) + assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # C order, default dtype + dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # F order, default dtype + dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # A order + dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + if d.flags.f_contiguous: + assert_(dz.flags.f_contiguous) + else: + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # Test the 'shape' parameter + for s in self.shapes: + for o in 'CFA': + sz = like_function(d, dtype=dtype, shape=s, order=o, + **fill_kwarg) + assert_equal(sz.shape, s) + if dtype is None: + assert_equal(sz.dtype, d.dtype) + else: + assert_equal(sz.dtype, np.dtype(dtype)) + if o == 'C' or (o == 'A' and d.flags.c_contiguous): + assert_(sz.flags.c_contiguous) + elif o == 'F' or (o == 'A' and d.flags.f_contiguous): + assert_(sz.flags.f_contiguous) + self.compare_array_value(sz, value, fill_value) + + if (d.ndim != len(s)): + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(np.empty(s, dtype=dtype, + order='C').strides)) + else: + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(d.strides)) + + # Test the 'subok' parameter + class MyNDArray(np.ndarray): + pass + + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + + b = like_function(a, **fill_kwarg) + assert_(type(b) is MyNDArray) + + b = like_function(a, subok=False, **fill_kwarg) + assert_(type(b) is not MyNDArray) + + def test_ones_like(self): + self.check_like_function(np.ones_like, 1) + + def test_zeros_like(self): + self.check_like_function(np.zeros_like, 0) + + def test_empty_like(self): + self.check_like_function(np.empty_like, None) + + def test_filled_like(self): + self.check_like_function(np.full_like, 0, True) + self.check_like_function(np.full_like, 1, True) + self.check_like_function(np.full_like, 1000, True) + self.check_like_function(np.full_like, 123.456, True) + self.check_like_function(np.full_like, np.inf, True) + + +class TestCorrelate: + def _setup(self, dt): + self.x = np.array([1, 2, 3, 4, 5], dtype=dt) + self.xs = np.arange(1, 20)[::3] + self.y = np.array([-1, -2, -3], dtype=dt) + self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt) + self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) + self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) + self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) + self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) + self.zs = np.array([-3., -14., -30., -48., -66., -84., + -102., -54., -19.], dtype=dt) + + def test_float(self): + self._setup(float) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.x, self.y[:-1], 'full') + assert_array_almost_equal(z, self.z1_4) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + z = np.correlate(self.x[::-1], self.y, 'full') + assert_array_almost_equal(z, self.z1r) + z = np.correlate(self.y, self.x[::-1], 'full') + assert_array_almost_equal(z, self.z2r) + z = np.correlate(self.xs, self.y, 'full') + assert_array_almost_equal(z, self.zs) + + def test_object(self): + self._setup(Decimal) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.correlate(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_complex(self): + x = np.array([1, 2, 3, 4+1j], dtype=complex) + y = np.array([-1, -2j, 3+1j], dtype=complex) + r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex) + r_z = r_z[::-1].conjugate() + z = np.correlate(y, x, mode='full') + assert_array_almost_equal(z, r_z) + + def test_zero_size(self): + with pytest.raises(ValueError): + np.correlate(np.array([]), np.ones(1000), mode='full') + with pytest.raises(ValueError): + np.correlate(np.ones(1000), np.array([]), mode='full') + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.correlate(d, k, mode='valid') + with assert_warns(DeprecationWarning): + valid_mode = np.correlate(d, k, mode='v') + assert_array_equal(valid_mode, default_mode) + # integer mode + with assert_raises(ValueError): + np.correlate(d, k, mode=-1) + assert_array_equal(np.correlate(d, k, mode=0), valid_mode) + # illegal arguments + with assert_raises(TypeError): + np.correlate(d, k, mode=None) + + +class TestConvolve: + def test_object(self): + d = [1.] * 100 + k = [1.] * 3 + assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.convolve(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.convolve(d, k, mode='full') + with assert_warns(DeprecationWarning): + full_mode = np.convolve(d, k, mode='f') + assert_array_equal(full_mode, default_mode) + # integer mode + with assert_raises(ValueError): + np.convolve(d, k, mode=-1) + assert_array_equal(np.convolve(d, k, mode=2), full_mode) + # illegal arguments + with assert_raises(TypeError): + np.convolve(d, k, mode=None) + + +class TestArgwhere: + + @pytest.mark.parametrize('nd', [0, 1, 2]) + def test_nd(self, nd): + # get an nd array with multiple elements in every dimension + x = np.empty((2,)*nd, bool) + + # none + x[...] = False + assert_equal(np.argwhere(x).shape, (0, nd)) + + # only one + x[...] = False + x.flat[0] = True + assert_equal(np.argwhere(x).shape, (1, nd)) + + # all but one + x[...] = True + x.flat[0] = False + assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) + + # all + x[...] = True + assert_equal(np.argwhere(x).shape, (x.size, nd)) + + def test_2D(self): + x = np.arange(6).reshape((2, 3)) + assert_array_equal(np.argwhere(x > 1), + [[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + def test_list(self): + assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) + + +class TestStringFunction: + + def test_set_string_function(self): + a = np.array([1]) + np.set_string_function(lambda x: "FOO", repr=True) + assert_equal(repr(a), "FOO") + np.set_string_function(None, repr=True) + assert_equal(repr(a), "array([1])") + + np.set_string_function(lambda x: "FOO", repr=False) + assert_equal(str(a), "FOO") + np.set_string_function(None, repr=False) + assert_equal(str(a), "[1]") + + +class TestRoll: + def test_roll1d(self): + x = np.arange(10) + xr = np.roll(x, 2) + assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) + + def test_roll2d(self): + x2 = np.reshape(np.arange(10), (2, 5)) + x2r = np.roll(x2, 1) + assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) + + x2r = np.roll(x2, 1, axis=0) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, 1, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + # Roll multiple axes at once. + x2r = np.roll(x2, 1, axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (-1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (0, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, (0, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]])) + + x2r = np.roll(x2, (1, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (-1, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]])) + + # Roll the same axis multiple times. + x2r = np.roll(x2, 1, axis=(0, 0)) + assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])) + + x2r = np.roll(x2, 1, axis=(1, 1)) + assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]])) + + # Roll more than one turn in either direction. + x2r = np.roll(x2, 6, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, -4, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + def test_roll_empty(self): + x = np.array([]) + assert_equal(np.roll(x, 1), np.array([])) + + +class TestRollaxis: + + # expected shape indexed by (axis, start) for array of + # shape (1, 2, 3, 4) + tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4), + (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4), + (0, 4): (2, 3, 4, 1), + (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4), + (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4), + (1, 4): (1, 3, 4, 2), + (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4), + (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4), + (2, 4): (1, 2, 4, 3), + (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3), + (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4), + (3, 4): (1, 2, 3, 4)} + + def test_exceptions(self): + a = np.arange(1*2*3*4).reshape(1, 2, 3, 4) + assert_raises(np.AxisError, np.rollaxis, a, -5, 0) + assert_raises(np.AxisError, np.rollaxis, a, 0, -5) + assert_raises(np.AxisError, np.rollaxis, a, 4, 0) + assert_raises(np.AxisError, np.rollaxis, a, 0, 5) + + def test_results(self): + a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() + aind = np.indices(a.shape) + assert_(a.flags['OWNDATA']) + for (i, j) in self.tgtshape: + # positive axis, positive start + res = np.rollaxis(a, axis=i, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, j)], str((i,j))) + assert_(not res.flags['OWNDATA']) + + # negative axis, positive start + ip = i + 1 + res = np.rollaxis(a, axis=-ip, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, j)]) + assert_(not res.flags['OWNDATA']) + + # positive axis, negative start + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=i, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + # negative axis, negative start + ip = i + 1 + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=-ip, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + +class TestMoveaxis: + def test_move_to_end(self): + x = np.random.randn(5, 6, 7) + for source, expected in [(0, (6, 7, 5)), + (1, (5, 7, 6)), + (2, (5, 6, 7)), + (-1, (5, 6, 7))]: + actual = np.moveaxis(x, source, -1).shape + assert_(actual, expected) + + def test_move_new_position(self): + x = np.random.randn(1, 2, 3, 4) + for source, destination, expected in [ + (0, 1, (2, 1, 3, 4)), + (1, 2, (1, 3, 2, 4)), + (1, -1, (1, 3, 4, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_preserve_order(self): + x = np.zeros((1, 2, 3, 4)) + for source, destination in [ + (0, 0), + (3, -1), + (-1, 3), + ([0, -1], [0, -1]), + ([2, 0], [2, 0]), + (range(4), range(4)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, (1, 2, 3, 4)) + + def test_move_multiples(self): + x = np.zeros((0, 1, 2, 3)) + for source, destination, expected in [ + ([0, 1], [2, 3], (2, 3, 0, 1)), + ([2, 3], [0, 1], (2, 3, 0, 1)), + ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)), + ([3, 0], [1, 0], (0, 3, 1, 2)), + ([0, 3], [0, 1], (0, 3, 1, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_errors(self): + x = np.random.randn(1, 2, 3) + assert_raises_regex(np.AxisError, 'source.*out of bounds', + np.moveaxis, x, 3, 0) + assert_raises_regex(np.AxisError, 'source.*out of bounds', + np.moveaxis, x, -4, 0) + assert_raises_regex(np.AxisError, 'destination.*out of bounds', + np.moveaxis, x, 0, 5) + assert_raises_regex(ValueError, 'repeated axis in `source`', + np.moveaxis, x, [0, 0], [0, 1]) + assert_raises_regex(ValueError, 'repeated axis in `destination`', + np.moveaxis, x, [0, 1], [1, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, 0, [0, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, [0, 1], [0]) + + def test_array_likes(self): + x = np.ma.zeros((1, 2, 3)) + result = np.moveaxis(x, 0, 0) + assert_(x.shape, result.shape) + assert_(isinstance(result, np.ma.MaskedArray)) + + x = [1, 2, 3] + result = np.moveaxis(x, 0, 0) + assert_(x, list(result)) + assert_(isinstance(result, np.ndarray)) + + +class TestCross: + def test_2x2(self): + u = [1, 2] + v = [3, 4] + z = -2 + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_2x3(self): + u = [1, 2] + v = [3, 4, 5] + z = np.array([10, -5, -2]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_3x3(self): + u = [1, 2, 3] + v = [4, 5, 6] + z = np.array([-3, 6, -3]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_broadcasting(self): + # Ticket #2624 (Trac #2032) + u = np.tile([1, 2], (11, 1)) + v = np.tile([3, 4], (11, 1)) + z = -2 + assert_equal(np.cross(u, v), z) + assert_equal(np.cross(v, u), -z) + assert_equal(np.cross(u, u), 0) + + u = np.tile([1, 2], (11, 1)).T + v = np.tile([3, 4, 5], (11, 1)) + z = np.tile([10, -5, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0), z) + assert_equal(np.cross(v, u.T), -z) + assert_equal(np.cross(v, v), 0) + + u = np.tile([1, 2, 3], (11, 1)).T + v = np.tile([3, 4], (11, 1)).T + z = np.tile([-12, 9, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0, axisb=0), z) + assert_equal(np.cross(v.T, u.T), -z) + assert_equal(np.cross(u.T, u.T), 0) + + u = np.tile([1, 2, 3], (5, 1)) + v = np.tile([4, 5, 6], (5, 1)).T + z = np.tile([-3, 6, -3], (5, 1)) + assert_equal(np.cross(u, v, axisb=0), z) + assert_equal(np.cross(v.T, u), -z) + assert_equal(np.cross(u, u), 0) + + def test_broadcasting_shapes(self): + u = np.ones((2, 1, 3)) + v = np.ones((5, 3)) + assert_equal(np.cross(u, v).shape, (2, 5, 3)) + u = np.ones((10, 3, 5)) + v = np.ones((2, 5)) + assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) + assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2) + assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0) + u = np.ones((10, 3, 5, 7)) + v = np.ones((5, 7, 2)) + assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) + assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2) + assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4) + # gh-5885 + u = np.ones((3, 4, 2)) + for axisc in range(-2, 2): + assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) + + +def test_outer_out_param(): + arr1 = np.ones((5,)) + arr2 = np.ones((2,)) + arr3 = np.linspace(-2, 2, 5) + out1 = np.ndarray(shape=(5,5)) + out2 = np.ndarray(shape=(2, 5)) + res1 = np.outer(arr1, arr3, out1) + assert_equal(res1, out1) + assert_equal(np.outer(arr2, arr3, out2), out2) + + +class TestIndices: + + def test_simple(self): + [x, y] = np.indices((4, 3)) + assert_array_equal(x, np.array([[0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [3, 3, 3]])) + assert_array_equal(y, np.array([[0, 1, 2], + [0, 1, 2], + [0, 1, 2], + [0, 1, 2]])) + + def test_single_input(self): + [x] = np.indices((4,)) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + [x] = np.indices((4,), sparse=True) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + def test_scalar_input(self): + assert_array_equal([], np.indices(())) + assert_array_equal([], np.indices((), sparse=True)) + assert_array_equal([[]], np.indices((0,))) + assert_array_equal([[]], np.indices((0,), sparse=True)) + + def test_sparse(self): + [x, y] = np.indices((4,3), sparse=True) + assert_array_equal(x, np.array([[0], [1], [2], [3]])) + assert_array_equal(y, np.array([[0, 1, 2]])) + + @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) + @pytest.mark.parametrize("dims", [(), (0,), (4, 3)]) + def test_return_type(self, dtype, dims): + inds = np.indices(dims, dtype=dtype) + assert_(inds.dtype == dtype) + + for arr in np.indices(dims, dtype=dtype, sparse=True): + assert_(arr.dtype == dtype) + + +class TestRequire: + flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS', + 'F', 'F_CONTIGUOUS', 'FORTRAN', + 'A', 'ALIGNED', + 'W', 'WRITEABLE', + 'O', 'OWNDATA'] + + def generate_all_false(self, dtype): + arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)]) + arr.setflags(write=False) + a = arr['a'] + assert_(not a.flags['C']) + assert_(not a.flags['F']) + assert_(not a.flags['O']) + assert_(not a.flags['W']) + assert_(not a.flags['A']) + return a + + def set_and_check_flag(self, flag, dtype, arr): + if dtype is None: + dtype = arr.dtype + b = np.require(arr, dtype, [flag]) + assert_(b.flags[flag]) + assert_(b.dtype == dtype) + + # a further call to np.require ought to return the same array + # unless OWNDATA is specified. + c = np.require(b, None, [flag]) + if flag[0] != 'O': + assert_(c is b) + else: + assert_(c.flags[flag]) + + def test_require_each(self): + + id = ['f8', 'i4'] + fd = [None, 'f8', 'c16'] + for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): + a = self.generate_all_false(idtype) + self.set_and_check_flag(flag, fdtype, a) + + def test_unknown_requirement(self): + a = self.generate_all_false('f8') + assert_raises(KeyError, np.require, a, None, 'Q') + + def test_non_array_input(self): + a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O']) + assert_(a.flags['O']) + assert_(a.flags['C']) + assert_(a.flags['A']) + assert_(a.dtype == 'i4') + assert_equal(a, [1, 2, 3, 4]) + + def test_C_and_F_simul(self): + a = self.generate_all_false('f8') + assert_raises(ValueError, np.require, a, None, ['C', 'F']) + + def test_ensure_array(self): + class ArraySubclass(np.ndarray): + pass + + a = ArraySubclass((2, 2)) + b = np.require(a, None, ['E']) + assert_(type(b) is np.ndarray) + + def test_preserve_subtype(self): + class ArraySubclass(np.ndarray): + pass + + for flag in self.flag_names: + a = ArraySubclass((2, 2)) + self.set_and_check_flag(flag, None, a) + + +class TestBroadcast: + def test_broadcast_in_args(self): + # gh-5881 + arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), + np.empty((5, 1, 7))] + mits = [np.broadcast(*arrs), + np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])), + np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])), + np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), + np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] + for mit in mits: + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 4) + for a, ia in zip(arrs, mit.iters): + assert_(a is ia.base) + + def test_broadcast_single_arg(self): + # gh-6899 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 1) + assert_(arrs[0] is mit.iters[0].base) + + def test_number_of_arguments(self): + arr = np.empty((5,)) + for j in range(35): + arrs = [arr] * j + if j > 32: + assert_raises(ValueError, np.broadcast, *arrs) + else: + mit = np.broadcast(*arrs) + assert_equal(mit.numiter, j) + + def test_broadcast_error_kwargs(self): + #gh-13455 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + mit2 = np.broadcast(*arrs, **{}) + assert_equal(mit.shape, mit2.shape) + assert_equal(mit.ndim, mit2.ndim) + assert_equal(mit.nd, mit2.nd) + assert_equal(mit.numiter, mit2.numiter) + assert_(mit.iters[0].base is mit2.iters[0].base) + + assert_raises(ValueError, np.broadcast, 1, **{'x': 1}) + +class TestKeepdims: + + class sub_array(np.ndarray): + def sum(self, axis=None, dtype=None, out=None): + return np.ndarray.sum(self, axis, dtype, out, keepdims=True) + + def test_raise(self): + sub_class = self.sub_array + x = np.arange(30).view(sub_class) + assert_raises(TypeError, np.sum, x, keepdims=True) + + +class TestTensordot: + + def test_zero_dimension(self): + # Test resolution to issue #5663 + a = np.ndarray((3,0)) + b = np.ndarray((0,4)) + td = np.tensordot(a, b, (1, 0)) + assert_array_equal(td, np.dot(a, b)) + assert_array_equal(td, np.einsum('ij,jk', a, b)) + + def test_zero_dimensional(self): + # gh-12130 + arr_0d = np.array(1) + ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined + assert_array_equal(ret, arr_0d) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_numerictypes.py b/MLPY/Lib/site-packages/numpy/core/tests/test_numerictypes.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd91161db132e20e8b5179b9895f0a3af1165b5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_numerictypes.py @@ -0,0 +1,554 @@ +import sys +import itertools + +import pytest +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY + +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', (u'NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), b'dd', (u'OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + + +byteorder = {'little':'<', 'big':'>'}[sys.byteorder] + +def normalize_descr(descr): + "Normalize a description adding the platform byteorder." + + out = [] + for item in descr: + dtype = item[1] + if isinstance(dtype, str): + if dtype[0] not in ['|', '<', '>']: + onebyte = dtype[1:] == "1" + if onebyte or dtype[0] in ['S', 'V', 'b']: + dtype = "|" + dtype + else: + dtype = byteorder + dtype + if len(item) > 2 and np.prod(item[2]) > 1: + nitem = (item[0], dtype, item[2]) + else: + nitem = (item[0], dtype) + out.append(nitem) + elif isinstance(dtype, list): + l = normalize_descr(dtype) + out.append((item[0], l)) + else: + raise ValueError("Expected a str or list and got %s" % + (type(item))) + return out + + +############################################################ +# Creation tests +############################################################ + +class CreateZeros: + """Check the creation of heterogeneous arrays zero-valued""" + + def test_zeros0D(self): + """Check creation of 0-dimensional objects""" + h = np.zeros((), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype.fields['x'][0].name[:4] == 'void') + assert_(h.dtype.fields['x'][0].char == 'V') + assert_(h.dtype.fields['x'][0].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((), dtype='u1')) + + def test_zerosSD(self): + """Check creation of single-dimensional objects""" + h = np.zeros((2,), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['y'].name[:4] == 'void') + assert_(h.dtype['y'].char == 'V') + assert_(h.dtype['y'].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2,), dtype='u1')) + + def test_zerosMD(self): + """Check creation of multi-dimensional objects""" + h = np.zeros((2, 3), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['z'].name == 'uint8') + assert_(h.dtype['z'].char == 'B') + assert_(h.dtype['z'].type == np.uint8) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) + + +class TestCreateZerosPlain(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (plain)""" + _descr = Pdescr + +class TestCreateZerosNested(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (nested)""" + _descr = Ndescr + + +class CreateValues: + """Check the creation of heterogeneous arrays with values""" + + def test_tuple(self): + """Check creation from tuples""" + h = np.array(self._buffer, dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (2,)) + else: + assert_(h.shape == ()) + + def test_list_of_tuple(self): + """Check creation from list of tuples""" + h = np.array([self._buffer], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 2)) + else: + assert_(h.shape == (1,)) + + def test_list_of_list_of_tuple(self): + """Check creation from list of list of tuples""" + h = np.array([[self._buffer]], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 1, 2)) + else: + assert_(h.shape == (1, 1)) + + +class TestCreateValuesPlainSingle(CreateValues): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestCreateValuesPlainMultiple(CreateValues): + """Check the creation of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class TestCreateValuesNestedSingle(CreateValues): + """Check the creation of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = 0 + _buffer = NbufferT[0] + +class TestCreateValuesNestedMultiple(CreateValues): + """Check the creation of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = 1 + _buffer = NbufferT + + +############################################################ +# Reading tests +############################################################ + +class ReadValuesPlain: + """Check the reading of values in heterogeneous arrays (plain)""" + + def test_access_fields(self): + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][1], + self._buffer[1][1]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][2], + self._buffer[1][2]], dtype='u1')) + + +class TestReadValuesPlainSingle(ReadValuesPlain): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestReadValuesPlainMultiple(ReadValuesPlain): + """Check the values of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class ReadValuesNested: + """Check the reading of values in heterogeneous arrays (nested)""" + + def test_access_top_fields(self): + """Check reading the top fields of a nested array""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][4], + self._buffer[1][4]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][5], + self._buffer[1][5]], dtype='u1')) + + def test_nested1_acessors(self): + """Check reading the nested fields of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['value'], + np.array(self._buffer[1][0], dtype='c16')) + assert_equal(h['Info']['y2'], + np.array(self._buffer[1][1], dtype='f8')) + assert_equal(h['info']['Name'], + np.array(self._buffer[3][0], dtype='U2')) + assert_equal(h['info']['Value'], + np.array(self._buffer[3][1], dtype='c16')) + else: + assert_equal(h['Info']['value'], + np.array([self._buffer[0][1][0], + self._buffer[1][1][0]], + dtype='c16')) + assert_equal(h['Info']['y2'], + np.array([self._buffer[0][1][1], + self._buffer[1][1][1]], + dtype='f8')) + assert_equal(h['info']['Name'], + np.array([self._buffer[0][3][0], + self._buffer[1][3][0]], + dtype='U2')) + assert_equal(h['info']['Value'], + np.array([self._buffer[0][3][1], + self._buffer[1][3][1]], + dtype='c16')) + + def test_nested2_acessors(self): + """Check reading the nested fields of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['Info2']['value'], + np.array(self._buffer[1][2][1], dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array(self._buffer[1][2][3], dtype='u4')) + else: + assert_equal(h['Info']['Info2']['value'], + np.array([self._buffer[0][1][2][1], + self._buffer[1][1][2][1]], + dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array([self._buffer[0][1][2][3], + self._buffer[1][1][2][3]], + dtype='u4')) + + def test_nested1_descriptor(self): + """Check access nested descriptors of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['value'].name == 'complex128') + assert_(h.dtype['Info']['y2'].name == 'float64') + assert_(h.dtype['info']['Name'].name == 'str256') + assert_(h.dtype['info']['Value'].name == 'complex128') + + def test_nested2_descriptor(self): + """Check access nested descriptors of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['Info2']['value'].name == 'void256') + assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') + + +class TestReadValuesNestedSingle(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = False + _buffer = NbufferT[0] + +class TestReadValuesNestedMultiple(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = True + _buffer = NbufferT + +class TestEmptyField: + def test_assign(self): + a = np.arange(10, dtype=np.float32) + a.dtype = [("int", "<0i4"), ("float", "<2f4")] + assert_(a['int'].shape == (5, 0)) + assert_(a['float'].shape == (5, 2)) + +class TestCommonType: + def test_scalar_loses1(self): + res = np.find_common_type(['f4', 'f4', 'i2'], ['f8']) + assert_(res == 'f4') + + def test_scalar_loses2(self): + res = np.find_common_type(['f4', 'f4'], ['i8']) + assert_(res == 'f4') + + def test_scalar_wins(self): + res = np.find_common_type(['f4', 'f4', 'i2'], ['c8']) + assert_(res == 'c8') + + def test_scalar_wins2(self): + res = np.find_common_type(['u4', 'i4', 'i4'], ['f4']) + assert_(res == 'f8') + + def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose + res = np.find_common_type(['u8', 'i8', 'i8'], ['f8']) + assert_(res == 'f8') + +class TestMultipleFields: + def setup(self): + self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + + def _bad_call(self): + return self.ary['f0', 'f1'] + + def test_no_tuple(self): + assert_raises(IndexError, self._bad_call) + + def test_return(self): + res = self.ary[['f0', 'f2']].tolist() + assert_(res == [(1, 3), (5, 7)]) + + +class TestIsSubDType: + # scalar types can be promoted into dtypes + wrappers = [np.dtype, lambda x: x] + + def test_both_abstract(self): + assert_(np.issubdtype(np.floating, np.inexact)) + assert_(not np.issubdtype(np.inexact, np.floating)) + + def test_same(self): + for cls in (np.float32, np.int32): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(np.issubdtype(w1(cls), w2(cls))) + + def test_subclass(self): + # note we cannot promote floating to a dtype, as it would turn into a + # concrete type + for w in self.wrappers: + assert_(np.issubdtype(w(np.float32), np.floating)) + assert_(np.issubdtype(w(np.float64), np.floating)) + + def test_subclass_backwards(self): + for w in self.wrappers: + assert_(not np.issubdtype(np.floating, w(np.float32))) + assert_(not np.issubdtype(np.floating, w(np.float64))) + + def test_sibling_class(self): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(not np.issubdtype(w1(np.float32), w2(np.float64))) + assert_(not np.issubdtype(w1(np.float64), w2(np.float32))) + + def test_nondtype_nonscalartype(self): + # See gh-14619 and gh-9505 which introduced the deprecation to fix + # this. These tests are directly taken from gh-9505 + assert not np.issubdtype(np.float32, 'float64') + assert not np.issubdtype(np.float32, 'f8') + assert not np.issubdtype(np.int32, str) + assert not np.issubdtype(np.int32, 'int64') + assert not np.issubdtype(np.str_, 'void') + # for the following the correct spellings are + # np.integer, np.floating, or np.complexfloating respectively: + assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_ + assert not np.issubdtype(np.float32, float) + assert not np.issubdtype(np.complex64, complex) + assert not np.issubdtype(np.float32, "float") + assert not np.issubdtype(np.float64, "f") + + # Test the same for the correct first datatype and abstract one + # in the case of int, float, complex: + assert np.issubdtype(np.float64, 'float64') + assert np.issubdtype(np.float64, 'f8') + assert np.issubdtype(np.str_, str) + assert np.issubdtype(np.int64, 'int64') + assert np.issubdtype(np.void, 'void') + assert np.issubdtype(np.int8, np.integer) + assert np.issubdtype(np.float32, np.floating) + assert np.issubdtype(np.complex64, np.complexfloating) + assert np.issubdtype(np.float64, "float") + assert np.issubdtype(np.float32, "f") + + +class TestSctypeDict: + def test_longdouble(self): + assert_(np.sctypeDict['f8'] is not np.longdouble) + assert_(np.sctypeDict['c16'] is not np.clongdouble) + + +class TestBitName: + def test_abstract(self): + assert_raises(ValueError, np.core.numerictypes.bitname, np.floating) + + +class TestMaximumSctype: + + # note that parametrizing with sctype['int'] and similar would skip types + # with the same size (gh-11923) + + @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong]) + def test_int(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1]) + + @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong]) + def test_uint(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1]) + + @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) + def test_float(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1]) + + @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) + def test_complex(self, t): + assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1]) + + @pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void]) + def test_other(self, t): + assert_equal(np.maximum_sctype(t), t) + + +class Test_sctype2char: + # This function is old enough that we're really just documenting the quirks + # at this point. + + def test_scalar_type(self): + assert_equal(np.sctype2char(np.double), 'd') + assert_equal(np.sctype2char(np.int_), 'l') + assert_equal(np.sctype2char(np.unicode_), 'U') + assert_equal(np.sctype2char(np.bytes_), 'S') + + def test_other_type(self): + assert_equal(np.sctype2char(float), 'd') + assert_equal(np.sctype2char(list), 'O') + assert_equal(np.sctype2char(np.ndarray), 'O') + + def test_third_party_scalar_type(self): + from numpy.core._rational_tests import rational + assert_raises(KeyError, np.sctype2char, rational) + assert_raises(KeyError, np.sctype2char, rational(1)) + + def test_array_instance(self): + assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd') + + def test_abstract_type(self): + assert_raises(KeyError, np.sctype2char, np.floating) + + def test_non_type(self): + assert_raises(ValueError, np.sctype2char, 1) + +@pytest.mark.parametrize("rep, expected", [ + (np.int32, True), + (list, False), + (1.1, False), + (str, True), + (np.dtype(np.float64), True), + (np.dtype((np.int16, (3, 4))), True), + (np.dtype([('a', np.int8)]), True), + ]) +def test_issctype(rep, expected): + # ensure proper identification of scalar + # data-types by issctype() + actual = np.issctype(rep) + assert_equal(actual, expected) + + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") +@pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") +class TestDocStrings: + def test_platform_dependent_aliases(self): + if np.int64 is np.int_: + assert_('int64' in np.int_.__doc__) + elif np.int64 is np.longlong: + assert_('int64' in np.longlong.__doc__) + + +class TestScalarTypeNames: + # gh-9799 + + numeric_types = [ + np.byte, np.short, np.intc, np.int_, np.longlong, + np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong, + np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble, + ] + + def test_names_are_unique(self): + # none of the above may be aliases for each other + assert len(set(self.numeric_types)) == len(self.numeric_types) + + # names must be unique + names = [t.__name__ for t in self.numeric_types] + assert len(set(names)) == len(names) + + @pytest.mark.parametrize('t', numeric_types) + def test_names_reflect_attributes(self, t): + """ Test that names correspond to where the type is under ``np.`` """ + assert getattr(np, t.__name__) is t + + @pytest.mark.parametrize('t', numeric_types) + def test_names_are_undersood_by_dtype(self, t): + """ Test the dtype constructor maps names back to the type """ + assert np.dtype(t.__name__).type is t diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_overrides.py b/MLPY/Lib/site-packages/numpy/core/tests/test_overrides.py new file mode 100644 index 0000000000000000000000000000000000000000..3a6a3eee0577da06c07e8369470bc6a2a1142ed5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_overrides.py @@ -0,0 +1,584 @@ +import inspect +import sys +import os +import tempfile +from io import StringIO +from unittest import mock + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex) +from numpy.core.overrides import ( + _get_implementing_args, array_function_dispatch, + verify_matching_signatures, ARRAY_FUNCTION_ENABLED) +from numpy.compat import pickle +import pytest + + +requires_array_function = pytest.mark.skipif( + not ARRAY_FUNCTION_ENABLED, + reason="__array_function__ dispatch not enabled.") + + +def _return_not_implemented(self, *args, **kwargs): + return NotImplemented + + +# need to define this at the top level to test pickling +@array_function_dispatch(lambda array: (array,)) +def dispatched_one_arg(array): + """Docstring.""" + return 'original' + + +@array_function_dispatch(lambda array1, array2: (array1, array2)) +def dispatched_two_arg(array1, array2): + """Docstring.""" + return 'original' + + +class TestGetImplementingArgs: + + def test_ndarray(self): + array = np.array(1) + + args = _get_implementing_args([array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, 1]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([1, array]) + assert_equal(list(args), [array]) + + def test_ndarray_subclasses(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + array = np.array(1).view(np.ndarray) + override_sub = np.array(1).view(OverrideSub) + no_override_sub = np.array(1).view(NoOverrideSub) + + args = _get_implementing_args([array, override_sub]) + assert_equal(list(args), [override_sub, array]) + + args = _get_implementing_args([array, no_override_sub]) + assert_equal(list(args), [no_override_sub, array]) + + args = _get_implementing_args( + [override_sub, no_override_sub]) + assert_equal(list(args), [override_sub, no_override_sub]) + + def test_ndarray_and_duck_array(self): + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + other = Other() + + args = _get_implementing_args([other, array]) + assert_equal(list(args), [other, array]) + + args = _get_implementing_args([array, other]) + assert_equal(list(args), [array, other]) + + def test_ndarray_subclass_and_duck_array(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + subarray = np.array(1).view(OverrideSub) + other = Other() + + assert_equal(_get_implementing_args([array, subarray, other]), + [subarray, array, other]) + assert_equal(_get_implementing_args([array, other, subarray]), + [subarray, array, other]) + + def test_many_duck_arrays(self): + + class A: + __array_function__ = _return_not_implemented + + class B(A): + __array_function__ = _return_not_implemented + + class C(A): + __array_function__ = _return_not_implemented + + class D: + __array_function__ = _return_not_implemented + + a = A() + b = B() + c = C() + d = D() + + assert_equal(_get_implementing_args([1]), []) + assert_equal(_get_implementing_args([a]), [a]) + assert_equal(_get_implementing_args([a, 1]), [a]) + assert_equal(_get_implementing_args([a, a, a]), [a]) + assert_equal(_get_implementing_args([a, d, a]), [a, d]) + assert_equal(_get_implementing_args([a, b]), [b, a]) + assert_equal(_get_implementing_args([b, a]), [b, a]) + assert_equal(_get_implementing_args([a, b, c]), [b, c, a]) + assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) + + def test_too_many_duck_arrays(self): + namespace = dict(__array_function__=_return_not_implemented) + types = [type('A' + str(i), (object,), namespace) for i in range(33)] + relevant_args = [t() for t in types] + + actual = _get_implementing_args(relevant_args[:32]) + assert_equal(actual, relevant_args[:32]) + + with assert_raises_regex(TypeError, 'distinct argument types'): + _get_implementing_args(relevant_args) + + +class TestNDArrayArrayFunction: + + @requires_array_function + def test_method(self): + + class Other: + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + array = np.array([1]) + other = Other() + no_override_sub = array.view(NoOverrideSub) + override_sub = array.view(OverrideSub) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray,), + args=(array, 1.), kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, Other), + args=(array, other), kwargs={}) + assert_(result is NotImplemented) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, NoOverrideSub), + args=(array, no_override_sub), + kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, OverrideSub), + args=(array, override_sub), + kwargs={}) + assert_equal(result, 'original') + + with assert_raises_regex(TypeError, 'no implementation found'): + np.concatenate((array, other)) + + expected = np.concatenate((array, array)) + result = np.concatenate((array, no_override_sub)) + assert_equal(result, expected.view(NoOverrideSub)) + result = np.concatenate((array, override_sub)) + assert_equal(result, expected.view(OverrideSub)) + + def test_no_wrapper(self): + # This shouldn't happen unless a user intentionally calls + # __array_function__ with invalid arguments, but check that we raise + # an appropriate error all the same. + array = np.array(1) + func = lambda x: x + with assert_raises_regex(AttributeError, '_implementation'): + array.__array_function__(func=func, types=(np.ndarray,), + args=(array,), kwargs={}) + + +@requires_array_function +class TestArrayFunctionDispatch: + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + roundtripped = pickle.loads( + pickle.dumps(dispatched_one_arg, protocol=proto)) + assert_(roundtripped is dispatched_one_arg) + + def test_name_and_docstring(self): + assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg') + if sys.flags.optimize < 2: + assert_equal(dispatched_one_arg.__doc__, 'Docstring.') + + def test_interface(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return (self, func, types, args, kwargs) + + original = MyArray() + (obj, func, types, args, kwargs) = dispatched_one_arg(original) + assert_(obj is original) + assert_(func is dispatched_one_arg) + assert_equal(set(types), {MyArray}) + # assert_equal uses the overloaded np.iscomplexobj() internally + assert_(args == (original,)) + assert_equal(kwargs, {}) + + def test_not_implemented(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return NotImplemented + + array = MyArray() + with assert_raises_regex(TypeError, 'no implementation found'): + dispatched_one_arg(array) + + +@requires_array_function +class TestVerifyMatchingSignatures: + + def test_verify_matching_signatures(self): + + verify_matching_signatures(lambda x: 0, lambda x: 0) + verify_matching_signatures(lambda x=None: 0, lambda x=None: 0) + verify_matching_signatures(lambda x=1: 0, lambda x=None: 0) + + with assert_raises(RuntimeError): + verify_matching_signatures(lambda a: 0, lambda b: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x: 0, lambda x=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=None: 0, lambda y=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=1: 0, lambda y=1: 0) + + def test_array_function_dispatch(self): + + with assert_raises(RuntimeError): + @array_function_dispatch(lambda x: (x,)) + def f(y): + pass + + # should not raise + @array_function_dispatch(lambda x: (x,), verify=False) + def f(y): + pass + + +def _new_duck_type_and_implements(): + """Create a duck array type and implements functions.""" + HANDLED_FUNCTIONS = {} + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + if func not in HANDLED_FUNCTIONS: + return NotImplemented + if not all(issubclass(t, MyArray) for t in types): + return NotImplemented + return HANDLED_FUNCTIONS[func](*args, **kwargs) + + def implements(numpy_function): + """Register an __array_function__ implementations.""" + def decorator(func): + HANDLED_FUNCTIONS[numpy_function] = func + return func + return decorator + + return (MyArray, implements) + + +@requires_array_function +class TestArrayFunctionImplementation: + + def test_one_arg(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(dispatched_one_arg) + def _(array): + return 'myarray' + + assert_equal(dispatched_one_arg(1), 'original') + assert_equal(dispatched_one_arg(MyArray()), 'myarray') + + def test_optional_args(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array, option=None: (array,)) + def func_with_option(array, option='default'): + return option + + @implements(func_with_option) + def my_array_func_with_option(array, new_option='myarray'): + return new_option + + # we don't need to implement every option on __array_function__ + # implementations + assert_equal(func_with_option(1), 'default') + assert_equal(func_with_option(1, option='extra'), 'extra') + assert_equal(func_with_option(MyArray()), 'myarray') + with assert_raises(TypeError): + func_with_option(MyArray(), option='extra') + + # but new options on implementations can't be used + result = my_array_func_with_option(MyArray(), new_option='yes') + assert_equal(result, 'yes') + with assert_raises(TypeError): + func_with_option(MyArray(), new_option='no') + + def test_not_implemented(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array: (array,), module='my') + def func(array): + return array + + array = np.array(1) + assert_(func(array) is array) + assert_equal(func.__module__, 'my') + + with assert_raises_regex( + TypeError, "no implementation found for 'my.func'"): + func(MyArray()) + + +class TestNDArrayMethods: + + def test_repr(self): + # gh-12162: should still be defined even if __array_function__ doesn't + # implement np.array_repr() + + class MyArray(np.ndarray): + def __array_function__(*args, **kwargs): + return NotImplemented + + array = np.array(1).view(MyArray) + assert_equal(repr(array), 'MyArray(1)') + assert_equal(str(array), '1') + + +class TestNumPyFunctions: + + def test_set_module(self): + assert_equal(np.sum.__module__, 'numpy') + assert_equal(np.char.equal.__module__, 'numpy.char') + assert_equal(np.fft.fft.__module__, 'numpy.fft') + assert_equal(np.linalg.solve.__module__, 'numpy.linalg') + + def test_inspect_sum(self): + signature = inspect.signature(np.sum) + assert_('axis' in signature.parameters) + + @requires_array_function + def test_override_sum(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(np.sum) + def _(array): + return 'yes' + + assert_equal(np.sum(MyArray()), 'yes') + + @requires_array_function + def test_sum_on_mock_array(self): + + # We need a proxy for mocks because __array_function__ is only looked + # up in the class dict + class ArrayProxy: + def __init__(self, value): + self.value = value + def __array_function__(self, *args, **kwargs): + return self.value.__array_function__(*args, **kwargs) + def __array__(self, *args, **kwargs): + return self.value.__array__(*args, **kwargs) + + proxy = ArrayProxy(mock.Mock(spec=ArrayProxy)) + proxy.value.__array_function__.return_value = 1 + result = np.sum(proxy) + assert_equal(result, 1) + proxy.value.__array_function__.assert_called_once_with( + np.sum, (ArrayProxy,), (proxy,), {}) + proxy.value.__array__.assert_not_called() + + @requires_array_function + def test_sum_forwarding_implementation(self): + + class MyArray(np.ndarray): + + def sum(self, axis, out): + return 'summed' + + def __array_function__(self, func, types, args, kwargs): + return super().__array_function__(func, types, args, kwargs) + + # note: the internal implementation of np.sum() calls the .sum() method + array = np.array(1).view(MyArray) + assert_equal(np.sum(array), 'summed') + + +class TestArrayLike: + def setup(self): + class MyArray(): + def __init__(self, function=None): + self.function = function + + def __array_function__(self, func, types, args, kwargs): + try: + my_func = getattr(self, func.__name__) + except AttributeError: + return NotImplemented + return my_func(*args, **kwargs) + + self.MyArray = MyArray + + class MyNoArrayFunctionArray(): + def __init__(self, function=None): + self.function = function + + self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + + def add_method(self, name, arr_class, enable_value_error=False): + def _definition(*args, **kwargs): + # Check that `like=` isn't propagated downstream + assert 'like' not in kwargs + + if enable_value_error and 'value_error' in kwargs: + raise ValueError + + return arr_class(getattr(arr_class, name)) + setattr(arr_class, name, _definition) + + def func_args(*args, **kwargs): + return args, kwargs + + @requires_array_function + def test_array_like_not_implemented(self): + self.add_method('array', self.MyArray) + + ref = self.MyArray.array() + + with assert_raises_regex(TypeError, 'no implementation found'): + array_like = np.asarray(1, like=ref) + + _array_tests = [ + ('array', *func_args((1,))), + ('asarray', *func_args((1,))), + ('asanyarray', *func_args((1,))), + ('ascontiguousarray', *func_args((2, 3))), + ('asfortranarray', *func_args((2, 3))), + ('require', *func_args((np.arange(6).reshape(2, 3),), + requirements=['A', 'F'])), + ('empty', *func_args((1,))), + ('full', *func_args((1,), 2)), + ('ones', *func_args((1,))), + ('zeros', *func_args((1,))), + ('arange', *func_args(3)), + ('frombuffer', *func_args(b'\x00' * 8, dtype=int)), + ('fromiter', *func_args(range(3), dtype=int)), + ('fromstring', *func_args('1,2', dtype=int, sep=',')), + ('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))), + ('genfromtxt', *func_args(lambda: StringIO(u'1,2.1'), + dtype=[('int', 'i8'), ('float', 'f8')], + delimiter=',')), + ] + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('numpy_ref', [True, False]) + @requires_array_function + def test_array_like(self, function, args, kwargs, numpy_ref): + self.add_method('array', self.MyArray) + self.add_method(function, self.MyArray) + np_func = getattr(np, function) + my_func = getattr(self.MyArray, function) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = self.MyArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + + if numpy_ref is True: + assert type(array_like) is np.ndarray + + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + + # Special-case np.empty to ensure values match + if function == "empty": + np_arr.fill(1) + array_like.fill(1) + + assert_equal(array_like, np_arr) + else: + assert type(array_like) is self.MyArray + assert array_like.function is my_func + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) + @requires_array_function + def test_no_array_function_like(self, function, args, kwargs, ref): + self.add_method('array', self.MyNoArrayFunctionArray) + self.add_method(function, self.MyNoArrayFunctionArray) + np_func = getattr(np, function) + + # Instantiate ref if it's the MyNoArrayFunctionArray class + if ref == "MyNoArrayFunctionArray": + ref = self.MyNoArrayFunctionArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + + with assert_raises_regex(TypeError, + 'The `like` argument must be an array-like that implements'): + np_func(*like_args, **kwargs, like=ref) + + @pytest.mark.parametrize('numpy_ref', [True, False]) + def test_array_like_fromfile(self, numpy_ref): + self.add_method('array', self.MyArray) + self.add_method("fromfile", self.MyArray) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = self.MyArray.array() + + data = np.random.random(5) + + with tempfile.TemporaryDirectory() as tmpdir: + fname = os.path.join(tmpdir, "testfile") + data.tofile(fname) + + array_like = np.fromfile(fname, like=ref) + if numpy_ref is True: + assert type(array_like) is np.ndarray + np_res = np.fromfile(fname, like=ref) + assert_equal(np_res, data) + assert_equal(array_like, np_res) + else: + assert type(array_like) is self.MyArray + assert array_like.function is self.MyArray.fromfile + + @requires_array_function + def test_exception_handling(self): + self.add_method('array', self.MyArray, enable_value_error=True) + + ref = self.MyArray.array() + + with assert_raises(TypeError): + # Raises the error about `value_error` being invalid first + np.array(1, value_error=True, like=ref) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_print.py b/MLPY/Lib/site-packages/numpy/core/tests/test_print.py new file mode 100644 index 0000000000000000000000000000000000000000..5c90ebc8bb5ef514149fb7f3506a9b51ffd2321c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_print.py @@ -0,0 +1,200 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import assert_, assert_equal +from numpy.core.tests._locales import CommaDecimalPointLocale + + +from io import StringIO + +_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_types(tp): + """ Check formatting. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(float(x)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e16).itemsize > 4: + assert_equal(str(tp(1e16)), str(float('1e16')), + err_msg='Failed str formatting for type %s' % tp) + else: + ref = '1e+16' + assert_equal(str(tp(1e16)), ref, + err_msg='Failed str formatting for type %s' % tp) + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_nan_inf_float(tp): + """ Check formatting of nan & inf. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [np.inf, -np.inf, np.nan]: + assert_equal(str(tp(x)), _REF[x], + err_msg='Failed str formatting for type %s' % tp) + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_types(tp): + """Check formatting of complex types. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(complex(x)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x*1j)), str(complex(x*1j)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e16).itemsize > 8: + assert_equal(str(tp(1e16)), str(complex(1e16)), + err_msg='Failed str formatting for type %s' % tp) + else: + ref = '(1e+16+0j)' + assert_equal(str(tp(1e16)), ref, + err_msg='Failed str formatting for type %s' % tp) + + +@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_inf_nan(dtype): + """Check inf/nan formatting of complex types.""" + TESTS = { + complex(np.inf, 0): "(inf+0j)", + complex(0, np.inf): "infj", + complex(-np.inf, 0): "(-inf+0j)", + complex(0, -np.inf): "-infj", + complex(np.inf, 1): "(inf+1j)", + complex(1, np.inf): "(1+infj)", + complex(-np.inf, 1): "(-inf+1j)", + complex(1, -np.inf): "(1-infj)", + complex(np.nan, 0): "(nan+0j)", + complex(0, np.nan): "nanj", + complex(-np.nan, 0): "(nan+0j)", + complex(0, -np.nan): "nanj", + complex(np.nan, 1): "(nan+1j)", + complex(1, np.nan): "(1+nanj)", + complex(-np.nan, 1): "(nan+1j)", + complex(1, -np.nan): "(1+nanj)", + } + for c, s in TESTS.items(): + assert_equal(str(dtype(c)), s) + + +# print tests +def _test_redirected_print(x, tp, ref=None): + file = StringIO() + file_tp = StringIO() + stdout = sys.stdout + try: + sys.stdout = file_tp + print(tp(x)) + sys.stdout = file + if ref: + print(ref) + else: + print(x) + finally: + sys.stdout = stdout + + assert_equal(file.getvalue(), file_tp.getvalue(), + err_msg='print failed for type%s' % tp) + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_type_print(tp): + """Check formatting when using print """ + for x in [0, 1, -1, 1e20]: + _test_redirected_print(float(x), tp) + + for x in [np.inf, -np.inf, np.nan]: + _test_redirected_print(float(x), tp, _REF[x]) + + if tp(1e16).itemsize > 4: + _test_redirected_print(float(1e16), tp) + else: + ref = '1e+16' + _test_redirected_print(float(1e16), tp, ref) + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_type_print(tp): + """Check formatting when using print """ + # We do not create complex with inf/nan directly because the feature is + # missing in python < 2.6 + for x in [0, 1, -1, 1e20]: + _test_redirected_print(complex(x), tp) + + if tp(1e16).itemsize > 8: + _test_redirected_print(complex(1e16), tp) + else: + ref = '(1e+16+0j)' + _test_redirected_print(complex(1e16), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + + +def test_scalar_format(): + """Test the str.format method with NumPy scalar types""" + tests = [('{0}', True, np.bool_), + ('{0}', False, np.bool_), + ('{0:d}', 130, np.uint8), + ('{0:d}', 50000, np.uint16), + ('{0:d}', 3000000000, np.uint32), + ('{0:d}', 15000000000000000000, np.uint64), + ('{0:d}', -120, np.int8), + ('{0:d}', -30000, np.int16), + ('{0:d}', -2000000000, np.int32), + ('{0:d}', -7000000000000000000, np.int64), + ('{0:g}', 1.5, np.float16), + ('{0:g}', 1.5, np.float32), + ('{0:g}', 1.5, np.float64), + ('{0:g}', 1.5, np.longdouble), + ('{0:g}', 1.5+0.5j, np.complex64), + ('{0:g}', 1.5+0.5j, np.complex128), + ('{0:g}', 1.5+0.5j, np.clongdouble)] + + for (fmat, val, valtype) in tests: + try: + assert_equal(fmat.format(val), fmat.format(valtype(val)), + "failed with val %s, type %s" % (val, valtype)) + except ValueError as e: + assert_(False, + "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % + (fmat, repr(val), repr(valtype), str(e))) + + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_locale_single(self): + assert_equal(str(np.float32(1.2)), str(float(1.2))) + + def test_locale_double(self): + assert_equal(str(np.double(1.2)), str(float(1.2))) + + def test_locale_longdouble(self): + assert_equal(str(np.longdouble('1.2')), str(float(1.2))) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_protocols.py b/MLPY/Lib/site-packages/numpy/core/tests/test_protocols.py new file mode 100644 index 0000000000000000000000000000000000000000..e519a844fbdf81475b25c17684331d17d91833ac --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_protocols.py @@ -0,0 +1,44 @@ +import pytest +import warnings +import numpy as np + + +@pytest.mark.filterwarnings("error") +def test_getattr_warning(): + # issue gh-14735: make sure we clear only getattr errors, and let warnings + # through + class Wrapper: + def __init__(self, array): + self.array = array + + def __len__(self): + return len(self.array) + + def __getitem__(self, item): + return type(self)(self.array[item]) + + def __getattr__(self, name): + if name.startswith("__array_"): + warnings.warn("object got converted", UserWarning, stacklevel=1) + + return getattr(self.array, name) + + def __repr__(self): + return "".format(self=self) + + array = Wrapper(np.arange(10)) + with pytest.raises(UserWarning, match="object got converted"): + np.asarray(array) + + +def test_array_called(): + class Wrapper: + val = '0' * 100 + def __array__(self, result=None): + return np.array([self.val], dtype=object) + + + wrapped = Wrapper() + arr = np.array(wrapped, dtype=str) + assert arr.dtype == 'U100' + assert arr[0] == Wrapper.val diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_records.py b/MLPY/Lib/site-packages/numpy/core/tests/test_records.py new file mode 100644 index 0000000000000000000000000000000000000000..3b5bc27de33d9d96194c7cc908ad35aca8fc30e6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_records.py @@ -0,0 +1,520 @@ +import collections.abc +import textwrap +from io import BytesIO +from os import path +from pathlib import Path +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_array_almost_equal, + assert_raises, temppath, + ) +from numpy.compat import pickle + + +class TestFromrecords: + def test_fromrecords(self): + r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], + names='col1,col2,col3') + assert_equal(r[0].item(), (456, 'dbe', 1.2)) + assert_equal(r['col1'].dtype.kind, 'i') + assert_equal(r['col2'].dtype.kind, 'U') + assert_equal(r['col2'].dtype.itemsize, 12) + assert_equal(r['col3'].dtype.kind, 'f') + + def test_fromrecords_0len(self): + """ Verify fromrecords works with a 0-length input """ + dtype = [('a', float), ('b', float)] + r = np.rec.fromrecords([], dtype=dtype) + assert_equal(r.shape, (0,)) + + def test_fromrecords_2d(self): + data = [ + [(1, 2), (3, 4), (5, 6)], + [(6, 5), (4, 3), (2, 1)] + ] + expected_a = [[1, 3, 5], [6, 4, 2]] + expected_b = [[2, 4, 6], [5, 3, 1]] + + # try with dtype + r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)]) + assert_equal(r1['a'], expected_a) + assert_equal(r1['b'], expected_b) + + # try with names + r2 = np.rec.fromrecords(data, names=['a', 'b']) + assert_equal(r2['a'], expected_a) + assert_equal(r2['b'], expected_b) + + assert_equal(r1, r2) + + def test_method_array(self): + r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big') + assert_equal(r[1].item(), (25444, b'efg', 1633837924)) + + def test_method_array2(self): + r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), + (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') + assert_equal(r[1].item(), (2, 22.0, b'b')) + + def test_recarray_slices(self): + r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'), + (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1') + assert_equal(r[1::2][1].item(), (4, 44.0, b'd')) + + def test_recarray_fromarrays(self): + x1 = np.array([1, 2, 3, 4]) + x2 = np.array(['a', 'dd', 'xyz', '12']) + x3 = np.array([1.1, 2, 3, 4]) + r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') + assert_equal(r[1].item(), (2, 'dd', 2.0)) + x1[1] = 34 + assert_equal(r.a, np.array([1, 2, 3, 4])) + + def test_recarray_fromfile(self): + data_dir = path.join(path.dirname(__file__), 'data') + filename = path.join(data_dir, 'recarray_from_file.fits') + fd = open(filename, 'rb') + fd.seek(2880 * 2) + r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') + fd.seek(2880 * 2) + r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big') + fd.seek(2880 * 2) + bytes_array = BytesIO() + bytes_array.write(fd.read()) + bytes_array.seek(0) + r3 = np.rec.fromfile(bytes_array, formats='f8,i4,a5', shape=3, byteorder='big') + fd.close() + assert_equal(r1, r2) + assert_equal(r2, r3) + + def test_recarray_from_obj(self): + count = 10 + a = np.zeros(count, dtype='O') + b = np.zeros(count, dtype='f8') + c = np.zeros(count, dtype='f8') + for i in range(len(a)): + a[i] = list(range(1, 10)) + + mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') + for i in range(len(a)): + assert_((mine.date[i] == list(range(1, 10)))) + assert_((mine.data1[i] == 0.0)) + assert_((mine.data2[i] == 0.0)) + + def test_recarray_repr(self): + a = np.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', ' 2) & (a < 6)) + xb = np.where((b > 2) & (b < 6)) + ya = ((a > 2) & (a < 6)) + yb = ((b > 2) & (b < 6)) + assert_array_almost_equal(xa, ya.nonzero()) + assert_array_almost_equal(xb, yb.nonzero()) + assert_(np.all(a[ya] > 0.5)) + assert_(np.all(b[yb] > 0.5)) + + def test_endian_where(self): + # GitHub issue #369 + net = np.zeros(3, dtype='>f4') + net[1] = 0.00458849 + net[2] = 0.605202 + max_net = net.max() + test = np.where(net <= 0., max_net, net) + correct = np.array([ 0.60520202, 0.00458849, 0.60520202]) + assert_array_almost_equal(test, correct) + + def test_endian_recarray(self): + # Ticket #2185 + dt = np.dtype([ + ('head', '>u4'), + ('data', '>u4', 2), + ]) + buf = np.recarray(1, dtype=dt) + buf[0]['head'] = 1 + buf[0]['data'][:] = [1, 1] + + h = buf[0]['head'] + d = buf[0]['data'][0] + buf[0]['head'] = h + buf[0]['data'][0] = d + assert_(buf[0]['head'] == 1) + + def test_mem_dot(self): + # Ticket #106 + x = np.random.randn(0, 1) + y = np.random.randn(10, 1) + # Dummy array to detect bad memory access: + _z = np.ones(10) + _dummy = np.empty((0, 10)) + z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) + np.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + # Do the same for the built-in dot: + np.core.multiarray.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + + def test_arange_endian(self): + # Ticket #111 + ref = np.arange(10) + x = np.arange(10, dtype=' 1 and x['two'] > 2) + + def test_method_args(self): + # Make sure methods and functions have same default axis + # keyword and arguments + funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'), + ('sometrue', 'any'), + ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'), + 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', + 'round', 'min', 'max', 'argsort', 'sort'] + funcs2 = ['compress', 'take', 'repeat'] + + for func in funcs1: + arr = np.random.rand(8, 7) + arr2 = arr.copy() + if isinstance(func, tuple): + func_meth = func[1] + func = func[0] + else: + func_meth = func + res1 = getattr(arr, func_meth)() + res2 = getattr(np, func)(arr2) + if res1 is None: + res1 = arr + + if res1.dtype.kind in 'uib': + assert_((res1 == res2).all(), func) + else: + assert_(abs(res1-res2).max() < 1e-8, func) + + for func in funcs2: + arr1 = np.random.rand(8, 7) + arr2 = np.random.rand(8, 7) + res1 = None + if func == 'compress': + arr1 = arr1.ravel() + res1 = getattr(arr2, func)(arr1) + else: + arr2 = (15*arr2).astype(int).ravel() + if res1 is None: + res1 = getattr(arr1, func)(arr2) + res2 = getattr(np, func)(arr1, arr2) + assert_(abs(res1-res2).max() < 1e-8, func) + + def test_mem_lexsort_strings(self): + # Ticket #298 + lst = ['abc', 'cde', 'fgh'] + np.lexsort((lst,)) + + def test_fancy_index(self): + # Ticket #302 + x = np.array([1, 2])[np.array([0])] + assert_equal(x.shape, (1,)) + + def test_recarray_copy(self): + # Ticket #312 + dt = [('x', np.int16), ('y', np.float64)] + ra = np.array([(1, 2.3)], dtype=dt) + rb = np.rec.array(ra, dtype=dt) + rb['x'] = 2. + assert_(ra['x'] != rb['x']) + + def test_rec_fromarray(self): + # Ticket #322 + x1 = np.array([[1, 2], [3, 4], [5, 6]]) + x2 = np.array(['a', 'dd', 'xyz']) + x3 = np.array([1.1, 2, 3]) + np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") + + def test_object_array_assign(self): + x = np.empty((2, 2), object) + x.flat[2] = (1, 2, 3) + assert_equal(x.flat[2], (1, 2, 3)) + + def test_ndmin_float64(self): + # Ticket #324 + x = np.array([1, 2, 3], dtype=np.float64) + assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) + assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) + + def test_ndmin_order(self): + # Issue #465 and related checks + assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) + assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) + + def test_mem_axis_minimization(self): + # Ticket #327 + data = np.arange(5) + data = np.add.outer(data, data) + + def test_mem_float_imag(self): + # Ticket #330 + np.float64(1.0).imag + + def test_dtype_tuple(self): + # Ticket #334 + assert_(np.dtype('i4') == np.dtype(('i4', ()))) + + def test_dtype_posttuple(self): + # Ticket #335 + np.dtype([('col1', '()i4')]) + + def test_numeric_carray_compare(self): + # Ticket #341 + assert_equal(np.array(['X'], 'c'), b'X') + + def test_string_array_size(self): + # Ticket #342 + assert_raises(ValueError, + np.array, [['X'], ['X', 'X', 'X']], '|S1') + + def test_dtype_repr(self): + # Ticket #344 + dt1 = np.dtype(('uint32', 2)) + dt2 = np.dtype(('uint32', (2,))) + assert_equal(dt1.__repr__(), dt2.__repr__()) + + def test_reshape_order(self): + # Make sure reshape order works. + a = np.arange(6).reshape(2, 3, order='F') + assert_equal(a, [[0, 2, 4], [1, 3, 5]]) + a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + b = a[:, 1] + assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) + + def test_reshape_zero_strides(self): + # Issue #380, test reshaping of zero strided arrays + a = np.ones(1) + a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) + assert_(a.reshape(5, 1).strides[0] == 0) + + def test_reshape_zero_size(self): + # GitHub Issue #2700, setting shape failed for 0-sized arrays + a = np.ones((0, 2)) + a.shape = (-1, 2) + + # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. + # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous. + @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, + reason="Using relaxed stride checking") + def test_reshape_trailing_ones_strides(self): + # GitHub issue gh-2949, bad strides for trailing ones of new shape + a = np.zeros(12, dtype=np.int32)[::2] # not contiguous + strides_c = (16, 8, 8, 8) + strides_f = (8, 24, 48, 48) + assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) + assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) + assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) + + def test_repeat_discont(self): + # Ticket #352 + a = np.arange(12).reshape(4, 3)[:, 2] + assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) + + def test_array_index(self): + # Make sure optimization is not called in this case. + a = np.array([1, 2, 3]) + a2 = np.array([[1, 2, 3]]) + assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)]) + + def test_object_argmax(self): + a = np.array([1, 2, 3], dtype=object) + assert_(a.argmax() == 2) + + def test_recarray_fields(self): + # Ticket #372 + dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) + dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) + for a in [np.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)]), + np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), + np.rec.fromarrays([(1, 2), (3, 4)])]: + assert_(a.dtype in [dt0, dt1]) + + def test_random_shuffle(self): + # Ticket #374 + a = np.arange(5).reshape((5, 1)) + b = a.copy() + np.random.shuffle(b) + assert_equal(np.sort(b, axis=0), a) + + def test_refcount_vdot(self): + # Changeset #3443 + _assert_valid_refcount(np.vdot) + + def test_startswith(self): + ca = np.char.array(['Hi', 'There']) + assert_equal(ca.startswith('H'), [True, False]) + + def test_noncommutative_reduce_accumulate(self): + # Ticket #413 + tosubtract = np.arange(5) + todivide = np.array([2.0, 0.5, 0.25]) + assert_equal(np.subtract.reduce(tosubtract), -10) + assert_equal(np.divide.reduce(todivide), 16.0) + assert_array_equal(np.subtract.accumulate(tosubtract), + np.array([0, -1, -3, -6, -10])) + assert_array_equal(np.divide.accumulate(todivide), + np.array([2., 4., 16.])) + + def test_convolve_empty(self): + # Convolve should raise an error for empty input array. + assert_raises(ValueError, np.convolve, [], [1]) + assert_raises(ValueError, np.convolve, [1], []) + + def test_multidim_byteswap(self): + # Ticket #449 + r = np.array([(1, (0, 1, 2))], dtype="i2,3i2") + assert_array_equal(r.byteswap(), + np.array([(256, (0, 256, 512))], r.dtype)) + + def test_string_NULL(self): + # Changeset 3557 + assert_equal(np.array("a\x00\x0b\x0c\x00").item(), + 'a\x00\x0b\x0c') + + def test_junk_in_string_fields_of_recarray(self): + # Ticket #483 + r = np.array([[b'abc']], dtype=[('var1', '|S20')]) + assert_(asbytes(r['var1'][0][0]) == b'abc') + + def test_take_output(self): + # Ensure that 'take' honours output parameter. + x = np.arange(12).reshape((3, 4)) + a = np.take(x, [0, 2], axis=1) + b = np.zeros_like(a) + np.take(x, [0, 2], axis=1, out=b) + assert_array_equal(a, b) + + def test_take_object_fail(self): + # Issue gh-3001 + d = 123. + a = np.array([d, 1], dtype=object) + if HAS_REFCOUNT: + ref_d = sys.getrefcount(d) + try: + a.take([0, 100]) + except IndexError: + pass + if HAS_REFCOUNT: + assert_(ref_d == sys.getrefcount(d)) + + def test_array_str_64bit(self): + # Ticket #501 + s = np.array([1, np.nan], dtype=np.float64) + with np.errstate(all='raise'): + np.array_str(s) # Should succeed + + def test_frompyfunc_endian(self): + # Ticket #503 + from math import radians + uradians = np.frompyfunc(radians, 1, 1) + big_endian = np.array([83.4, 83.5], dtype='>f8') + little_endian = np.array([83.4, 83.5], dtype=' object + # casting succeeds + def rs(): + x = np.ones([484, 286]) + y = np.zeros([484, 286]) + x |= y + + assert_raises(TypeError, rs) + + def test_unicode_scalar(self): + # Ticket #600 + x = np.array(["DROND", "DROND1"], dtype="U6") + el = x[1] + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + new = pickle.loads(pickle.dumps(el, protocol=proto)) + assert_equal(new, el) + + def test_arange_non_native_dtype(self): + # Ticket #616 + for T in ('>f4', ' 0)] = v + + assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) + + # Old special case (different code path): + assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) + + def test_mem_scalar_indexing(self): + # Ticket #603 + x = np.array([0], dtype=float) + index = np.array(0, dtype=np.int32) + x[index] + + def test_binary_repr_0_width(self): + assert_equal(np.binary_repr(0, width=3), '000') + + def test_fromstring(self): + assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), + [12, 9, 9]) + + def test_searchsorted_variable_length(self): + x = np.array(['a', 'aa', 'b']) + y = np.array(['d', 'e']) + assert_equal(x.searchsorted(y), [3, 3]) + + def test_string_argsort_with_zeros(self): + # Check argsort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) + assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) + + def test_string_sort_with_zeros(self): + # Check sort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2") + assert_array_equal(np.sort(x, kind="q"), y) + + def test_copy_detection_zero_dim(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_flat_byteorder(self): + # Ticket #657 + x = np.arange(10) + assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): + x = np.array([-1, 0, 1], dtype=dt) + assert_equal(x.flat[0].dtype, x[0].dtype) + + def test_copy_detection_corner_case(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. + # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous, + # 0-sized reshape itself is tested elsewhere. + @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max, + reason="Using relaxed stride checking") + def test_copy_detection_corner_case2(self): + # Ticket #771: strides are not set correctly when reshaping 0-sized + # arrays + b = np.indices((0, 3, 4)).T.reshape(-1, 3) + assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) + + def test_object_array_refcounting(self): + # Ticket #633 + if not hasattr(sys, 'getrefcount'): + return + + # NB. this is probably CPython-specific + + cnt = sys.getrefcount + + a = object() + b = object() + c = object() + + cnt0_a = cnt(a) + cnt0_b = cnt(b) + cnt0_c = cnt(c) + + # -- 0d -> 1-d broadcast slice assignment + + arr = np.zeros(5, dtype=np.object_) + + arr[:] = a + assert_equal(cnt(a), cnt0_a + 5) + + arr[:] = b + assert_equal(cnt(a), cnt0_a) + assert_equal(cnt(b), cnt0_b + 5) + + arr[:2] = c + assert_equal(cnt(b), cnt0_b + 3) + assert_equal(cnt(c), cnt0_c + 2) + + del arr + + # -- 1-d -> 2-d broadcast slice assignment + + arr = np.zeros((5, 2), dtype=np.object_) + arr0 = np.zeros(2, dtype=np.object_) + + arr0[0] = a + assert_(cnt(a) == cnt0_a + 1) + arr0[1] = b + assert_(cnt(b) == cnt0_b + 1) + + arr[:, :] = arr0 + assert_(cnt(a) == cnt0_a + 6) + assert_(cnt(b) == cnt0_b + 6) + + arr[:, 0] = None + assert_(cnt(a) == cnt0_a + 1) + + del arr, arr0 + + # -- 2-d copying + flattening + + arr = np.zeros((5, 2), dtype=np.object_) + + arr[:, 0] = a + arr[:, 1] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + arr2 = arr[:, 0].copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.flatten() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + del arr, arr2 + + # -- concatenate, repeat, take, choose + + arr1 = np.zeros((5, 1), dtype=np.object_) + arr2 = np.zeros((5, 1), dtype=np.object_) + + arr1[...] = a + arr2[...] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + tmp = np.concatenate((arr1, arr2)) + assert_(cnt(a) == cnt0_a + 5 + 5) + assert_(cnt(b) == cnt0_b + 5 + 5) + + tmp = arr1.repeat(3, axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3*5) + + tmp = arr1.take([1, 2, 3], axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3) + + x = np.array([[0], [1], [0], [1], [1]], int) + tmp = x.choose(arr1, arr2) + assert_(cnt(a) == cnt0_a + 5 + 2) + assert_(cnt(b) == cnt0_b + 5 + 3) + + del tmp # Avoid pyflakes unused variable warning + + def test_mem_custom_float_to_array(self): + # Ticket 702 + class MyFloat: + def __float__(self): + return 1.0 + + tmp = np.atleast_1d([MyFloat()]) + tmp.astype(float) # Should succeed + + def test_object_array_refcount_self_assign(self): + # Ticket #711 + class VictimObject: + deleted = False + + def __del__(self): + self.deleted = True + + d = VictimObject() + arr = np.zeros(5, dtype=np.object_) + arr[:] = d + del d + arr[:] = arr # refcount of 'd' might hit zero here + assert_(not arr[0].deleted) + arr[:] = arr # trying to induce a segfault by doing it again... + assert_(not arr[0].deleted) + + def test_mem_fromiter_invalid_dtype_string(self): + x = [1, 2, 3] + assert_raises(ValueError, + np.fromiter, [xi for xi in x], dtype='S') + + def test_reduce_big_object_array(self): + # Ticket #713 + oldsize = np.setbufsize(10*16) + a = np.array([None]*161, object) + assert_(not np.any(a)) + np.setbufsize(oldsize) + + def test_mem_0d_array_index(self): + # Ticket #714 + np.zeros(10)[np.array(0)] + + def test_nonnative_endian_fill(self): + # Non-native endian arrays were incorrectly filled with scalars + # before r5034. + if sys.byteorder == 'little': + dtype = np.dtype('>i4') + else: + dtype = np.dtype('data contains non-zero floats + x = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + x.resize((m, 0), refcheck=False) + else: + x.resize((m, 0)) + y = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + y.resize((0, n), refcheck=False) + else: + y.resize((0, n)) + + # `dot` should just return zero (m, n) matrix + z = np.dot(x, y) + assert_(np.all(z == 0)) + assert_(z.shape == (m, n)) + + def test_zeros(self): + # Regression test for #1061. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed dimension exceeded'): + np.empty(sz) + + def test_huge_arange(self): + # Regression test for #1062. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed size exceeded'): + np.arange(sz) + assert_(np.size == sz) + + def test_fromiter_bytes(self): + # Ticket #1058 + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_array_from_sequence_scalar_array(self): + # Ticket #1078: segfaults when creating an array with a sequence of + # 0d arrays. + a = np.array((np.ones(2), np.array(2)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], np.ones(2)) + assert_equal(a[1], np.array(2)) + + a = np.array(((1,), np.array(1)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], (1,)) + assert_equal(a[1], np.array(1)) + + def test_array_from_sequence_scalar_array2(self): + # Ticket #1081: weird array with strange input... + t = np.array([np.array([]), np.array(0, object)], dtype=object) + assert_equal(t.shape, (2,)) + assert_equal(t.dtype, np.dtype(object)) + + def test_array_too_big(self): + # Ticket #1080. + assert_raises(ValueError, np.zeros, [975]*7, np.int8) + assert_raises(ValueError, np.zeros, [26244]*5, np.int8) + + def test_dtype_keyerrors_(self): + # Ticket #1106. + dt = np.dtype([('f1', np.uint)]) + assert_raises(KeyError, dt.__getitem__, "f2") + assert_raises(IndexError, dt.__getitem__, 1) + assert_raises(TypeError, dt.__getitem__, 0.0) + + def test_lexsort_buffer_length(self): + # Ticket #1217, don't segfault. + a = np.ones(100, dtype=np.int8) + b = np.ones(100, dtype=np.int32) + i = np.lexsort((a[::-1], b)) + assert_equal(i, np.arange(100, dtype=int)) + + def test_object_array_to_fixed_string(self): + # Ticket #1235. + a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) + b = np.array(a, dtype=(np.str_, 8)) + assert_equal(a, b) + c = np.array(a, dtype=(np.str_, 5)) + assert_equal(c, np.array(['abcde', 'ijklm'])) + d = np.array(a, dtype=(np.str_, 12)) + assert_equal(a, d) + e = np.empty((2, ), dtype=(np.str_, 8)) + e[:] = a[:] + assert_equal(a, e) + + def test_unicode_to_string_cast(self): + # Ticket #1240. + a = np.array([[u'abc', u'\u03a3'], + [u'asdf', u'erw']], + dtype='U') + assert_raises(UnicodeEncodeError, np.array, a, 'S4') + + def test_unicode_to_string_cast_error(self): + # gh-15790 + a = np.array([u'\x80'] * 129, dtype='U3') + assert_raises(UnicodeEncodeError, np.array, a, 'S') + b = a.reshape(3, 43)[:-1, :-1] + assert_raises(UnicodeEncodeError, np.array, b, 'S') + + def test_mixed_string_unicode_array_creation(self): + a = np.array(['1234', u'123']) + assert_(a.itemsize == 16) + a = np.array([u'123', '1234']) + assert_(a.itemsize == 16) + a = np.array(['1234', u'123', '12345']) + assert_(a.itemsize == 20) + a = np.array([u'123', '1234', u'12345']) + assert_(a.itemsize == 20) + a = np.array([u'123', '1234', u'1234']) + assert_(a.itemsize == 16) + + def test_misaligned_objects_segfault(self): + # Ticket #1198 and #1267 + a1 = np.zeros((10,), dtype='O,c') + a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') + a1['f0'] = a2 + repr(a1) + np.argmax(a1['f0']) + a1['f0'][1] = "FOO" + a1['f0'] = "FOO" + np.array(a1['f0'], dtype='S') + np.nonzero(a1['f0']) + a1.sort() + copy.deepcopy(a1) + + def test_misaligned_scalars_segfault(self): + # Ticket #1267 + s1 = np.array(('a', 'Foo'), dtype='c,O') + s2 = np.array(('b', 'Bar'), dtype='c,O') + s1['f1'] = s2['f1'] + s1['f1'] = 'Baz' + + def test_misaligned_dot_product_objects(self): + # Ticket #1267 + # This didn't require a fix, but it's worth testing anyway, because + # it may fail if .dot stops enforcing the arrays to be BEHAVED + a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') + b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') + np.dot(a['f0'], b['f0']) + + def test_byteswap_complex_scalar(self): + # Ticket #1259 and gh-441 + for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: + z = np.array([2.2-1.1j], dtype) + x = z[0] # always native-endian + y = x.byteswap() + if x.dtype.byteorder == z.dtype.byteorder: + # little-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder())) + else: + # big-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype)) + # double check real and imaginary parts: + assert_equal(x.real, y.real.byteswap()) + assert_equal(x.imag, y.imag.byteswap()) + + def test_structured_arrays_with_objects1(self): + # Ticket #1299 + stra = 'aaaa' + strb = 'bbbb' + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(x[0, 1] == x[0, 0]) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_structured_arrays_with_objects2(self): + # Ticket #1299 second test + stra = 'aaaa' + strb = 'bbbb' + numb = sys.getrefcount(strb) + numa = sys.getrefcount(stra) + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(sys.getrefcount(strb) == numb) + assert_(sys.getrefcount(stra) == numa + 2) + + def test_duplicate_title_and_name(self): + # Ticket #1254 + dtspec = [(('a', 'a'), 'i'), ('b', 'i')] + assert_raises(ValueError, np.dtype, dtspec) + + def test_signed_integer_division_overflow(self): + # Ticket #1317. + def test_type(t): + min = np.array([np.iinfo(t).min]) + min //= -1 + + with np.errstate(divide="ignore"): + for t in (np.int8, np.int16, np.int32, np.int64, int): + test_type(t) + + def test_buffer_hashlib(self): + from hashlib import sha256 + + x = np.array([1, 2, 3], dtype=np.dtype('c') + + def test_log1p_compiler_shenanigans(self): + # Check if log1p is behaving on 32 bit intel systems. + assert_(np.isfinite(np.log1p(np.exp2(-53)))) + + def test_fromiter_comparison(self): + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_fromstring_crash(self): + # Ticket #1345: the following should not cause a crash + with assert_warns(DeprecationWarning): + np.fromstring(b'aa, aa, 1.0', sep=',') + + def test_ticket_1539(self): + dtypes = [x for x in np.sctypeDict.values() + if (issubclass(x, np.number) + and not issubclass(x, np.timedelta64))] + a = np.array([], np.bool_) # not x[0] because it is unordered + failures = [] + + for x in dtypes: + b = a.astype(x) + for y in dtypes: + c = a.astype(y) + try: + np.dot(b, c) + except TypeError: + failures.append((x, y)) + if failures: + raise AssertionError("Failures: %r" % failures) + + def test_ticket_1538(self): + x = np.finfo(np.float32) + for name in 'eps epsneg max min resolution tiny'.split(): + assert_equal(type(getattr(x, name)), np.float32, + err_msg=name) + + def test_ticket_1434(self): + # Check that the out= argument in var and std has an effect + data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) + out = np.zeros((3,)) + + ret = data.var(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.var(axis=1)) + + ret = data.std(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.std(axis=1)) + + def test_complex_nan_maximum(self): + cnan = complex(0, np.nan) + assert_equal(np.maximum(1, cnan), cnan) + + def test_subclass_int_tuple_assignment(self): + # ticket #1563 + class Subclass(np.ndarray): + def __new__(cls, i): + return np.ones((i,)).view(cls) + + x = Subclass(5) + x[(0,)] = 2 # shouldn't raise an exception + assert_equal(x[0], 2) + + def test_ufunc_no_unnecessary_views(self): + # ticket #1548 + class Subclass(np.ndarray): + pass + x = np.array([1, 2, 3]).view(Subclass) + y = np.add(x, x, x) + assert_equal(id(x), id(y)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_take_refcount(self): + # ticket #939 + a = np.arange(16, dtype=float) + a.shape = (4, 4) + lut = np.ones((5 + 3, 4), float) + rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) + c1 = sys.getrefcount(rgba) + try: + lut.take(a, axis=0, mode='clip', out=rgba) + except TypeError: + pass + c2 = sys.getrefcount(rgba) + assert_equal(c1, c2) + + def test_fromfile_tofile_seeks(self): + # On Python 3, tofile/fromfile used to get (#1610) the Python + # file handle out of sync + f0 = tempfile.NamedTemporaryFile() + f = f0.file + f.write(np.arange(255, dtype='u1').tobytes()) + + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) + + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) + + f.seek(40) + data = f.read(3) + assert_equal(data, b"\x01\x02\x03") + + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) + + f.close() + + def test_complex_scalar_warning(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1+2j) + assert_warns(np.ComplexWarning, float, x) + with suppress_warnings() as sup: + sup.filter(np.ComplexWarning) + assert_equal(float(x), float(x.real)) + + def test_complex_scalar_complex_cast(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1+2j) + assert_equal(complex(x), 1+2j) + + def test_complex_boolean_cast(self): + # Ticket #2218 + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp) + assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) + assert_(np.any(x)) + assert_(np.all(x[1:])) + + def test_uint_int_conversion(self): + x = 2**64 - 1 + assert_equal(int(np.uint64(x)), x) + + def test_duplicate_field_names_assign(self): + ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8') + ra.dtype.names = ('f1', 'f2') + repr(ra) # should not cause a segmentation fault + assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) + + def test_eq_string_and_object_array(self): + # From e-mail thread "__eq__ with str and object" (Keith Goodman) + a1 = np.array(['a', 'b'], dtype=object) + a2 = np.array(['a', 'c']) + assert_array_equal(a1 == a2, [True, False]) + assert_array_equal(a2 == a1, [True, False]) + + def test_nonzero_byteswap(self): + a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) + a.dtype = np.float32 + assert_equal(a.nonzero()[0], [1]) + a = a.byteswap().newbyteorder() + assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap + + def test_find_common_type_boolean(self): + # Ticket #1695 + assert_(np.find_common_type([], ['?', '?']) == '?') + + def test_empty_mul(self): + a = np.array([1.]) + a[1:1] *= 2 + assert_equal(a, [1.]) + + def test_array_side_effect(self): + # The second use of itemsize was throwing an exception because in + # ctors.c, discover_itemsize was calling PyObject_Length without + # checking the return code. This failed to get the length of the + # number 2, and the exception hung around until something checked + # PyErr_Occurred() and returned an error. + assert_equal(np.dtype('S10').itemsize, 10) + np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_) + assert_equal(np.dtype('S10').itemsize, 10) + + def test_any_float(self): + # all and any for floats + a = np.array([0.1, 0.9]) + assert_(np.any(a)) + assert_(np.all(a)) + + def test_large_float_sum(self): + a = np.arange(10000, dtype='f') + assert_equal(a.sum(dtype='d'), a.astype('d').sum()) + + def test_ufunc_casting_out(self): + a = np.array(1.0, dtype=np.float32) + b = np.array(1.0, dtype=np.float64) + c = np.array(1.0, dtype=np.float32) + np.add(a, b, out=c) + assert_equal(c, 2.0) + + def test_array_scalar_contiguous(self): + # Array scalars are both C and Fortran contiguous + assert_(np.array(1.0).flags.c_contiguous) + assert_(np.array(1.0).flags.f_contiguous) + assert_(np.array(np.float32(1.0)).flags.c_contiguous) + assert_(np.array(np.float32(1.0)).flags.f_contiguous) + + def test_squeeze_contiguous(self): + # Similar to GitHub issue #387 + a = np.zeros((1, 2)).squeeze() + b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze() + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.f_contiguous) + + def test_squeeze_axis_handling(self): + # Issue #10779 + # Ensure proper handling of objects + # that don't support axis specification + # when squeezing + + class OldSqueeze(np.ndarray): + + def __new__(cls, + input_array): + obj = np.asarray(input_array).view(cls) + return obj + + # it is perfectly reasonable that prior + # to numpy version 1.7.0 a subclass of ndarray + # might have been created that did not expect + # squeeze to have an axis argument + # NOTE: this example is somewhat artificial; + # it is designed to simulate an old API + # expectation to guard against regression + def squeeze(self): + return super().squeeze() + + oldsqueeze = OldSqueeze(np.array([[1],[2],[3]])) + + # if no axis argument is specified the old API + # expectation should give the correct result + assert_equal(np.squeeze(oldsqueeze), + np.array([1,2,3])) + + # likewise, axis=None should work perfectly well + # with the old API expectation + assert_equal(np.squeeze(oldsqueeze, axis=None), + np.array([1,2,3])) + + # however, specification of any particular axis + # should raise a TypeError in the context of the + # old API specification, even when using a valid + # axis specification like 1 for this array + with assert_raises(TypeError): + # this would silently succeed for array + # subclasses / objects that did not support + # squeeze axis argument handling before fixing + # Issue #10779 + np.squeeze(oldsqueeze, axis=1) + + # check for the same behavior when using an invalid + # axis specification -- in this case axis=0 does not + # have size 1, but the priority should be to raise + # a TypeError for the axis argument and NOT a + # ValueError for squeezing a non-empty dimension + with assert_raises(TypeError): + np.squeeze(oldsqueeze, axis=0) + + # the new API knows how to handle the axis + # argument and will return a ValueError if + # attempting to squeeze an axis that is not + # of length 1 + with assert_raises(ValueError): + np.squeeze(np.array([[1],[2],[3]]), axis=0) + + def test_reduce_contiguous(self): + # GitHub issue #387 + a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) + b = np.add.reduce(np.zeros((2, 1, 2)), 1) + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.c_contiguous) + + def test_object_array_self_reference(self): + # Object arrays with references to themselves can cause problems + a = np.array(0, dtype=object) + a[()] = a + assert_raises(RecursionError, int, a) + assert_raises(RecursionError, float, a) + a[()] = None + + def test_object_array_circular_reference(self): + # Test the same for a circular reference. + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + b[()] = a + assert_raises(RecursionError, int, a) + # NumPy has no tp_traverse currently, so circular references + # cannot be detected. So resolve it: + a[()] = None + + # This was causing a to become like the above + a = np.array(0, dtype=object) + a[...] += 1 + assert_equal(a, 1) + + def test_object_array_nested(self): + # but is fine with a reference to a different array + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + assert_equal(int(a), int(0)) + assert_equal(float(a), float(0)) + + def test_object_array_self_copy(self): + # An object array being copied into itself DECREF'ed before INCREF'ing + # causing segmentation faults (gh-3787) + a = np.array(object(), dtype=object) + np.copyto(a, a) + if HAS_REFCOUNT: + assert_(sys.getrefcount(a[()]) == 2) + a[()].__class__ # will segfault if object was deleted + + def test_zerosize_accumulate(self): + "Ticket #1733" + x = np.array([[42, 0]], dtype=np.uint32) + assert_equal(np.add.accumulate(x[:-1, 0]), []) + + def test_objectarray_setfield(self): + # Setfield should not overwrite Object fields with non-Object data + x = np.array([1, 2, 3], dtype=object) + assert_raises(TypeError, x.setfield, 4, np.int32, 0) + + def test_setting_rank0_string(self): + "Ticket #1736" + s1 = b"hello1" + s2 = b"hello2" + a = np.zeros((), dtype="S10") + a[()] = s1 + assert_equal(a, np.array(s1)) + a[()] = np.array(s2) + assert_equal(a, np.array(s2)) + + a = np.zeros((), dtype='f4') + a[()] = 3 + assert_equal(a, np.array(3)) + a[()] = np.array(4) + assert_equal(a, np.array(4)) + + def test_string_astype(self): + "Ticket #1748" + s1 = b'black' + s2 = b'white' + s3 = b'other' + a = np.array([[s1], [s2], [s3]]) + assert_equal(a.dtype, np.dtype('S5')) + b = a.astype(np.dtype('S0')) + assert_equal(b.dtype, np.dtype('S5')) + + def test_ticket_1756(self): + # Ticket #1756 + s = b'0123456789abcdef' + a = np.array([s]*5) + for i in range(1, 17): + a1 = np.array(a, "|S%d" % i) + a2 = np.array([s[:i]]*5) + assert_equal(a1, a2) + + def test_fields_strides(self): + "gh-2355" + r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') + assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) + assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) + assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) + assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) + + def test_alignment_update(self): + # Check that alignment flag is updated on stride setting + a = np.arange(10) + assert_(a.flags.aligned) + a.strides = 3 + assert_(not a.flags.aligned) + + def test_ticket_1770(self): + "Should not segfault on python 3k" + import numpy as np + try: + a = np.zeros((1,), dtype=[('f1', 'f')]) + a['f1'] = 1 + a['f2'] = 1 + except ValueError: + pass + except Exception: + raise AssertionError + + def test_ticket_1608(self): + "x.flat shouldn't modify data" + x = np.array([[1, 2], [3, 4]]).T + np.array(x.flat) + assert_equal(x, [[1, 3], [2, 4]]) + + def test_pickle_string_overwrite(self): + import re + + data = np.array([1], dtype='b') + blob = pickle.dumps(data, protocol=1) + data = pickle.loads(blob) + + # Check that loads does not clobber interned strings + s = re.sub("a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + data[0] = 0xbb + s = re.sub("a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + + def test_pickle_bytes_overwrite(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + data = np.array([1], dtype='b') + data = pickle.loads(pickle.dumps(data, protocol=proto)) + data[0] = 0xdd + bytestring = "\x01 ".encode('ascii') + assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + + def test_pickle_py2_array_latin1_hack(self): + # Check that unpickling hacks in Py3 that support + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) + data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n" + b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" + b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" + b"p13\ntp14\nb.") + # This should work: + result = pickle.loads(data, encoding='latin1') + assert_array_equal(result, np.array([129], dtype='b')) + # Should not segfault: + assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + + def test_pickle_py2_scalar_latin1_hack(self): + # Check that scalar unpickling hack in Py3 that supports + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(...) + datas = [ + # (original, python2_pickle, koi8r_validity) + (np.unicode_('\u6bd2'), + (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" + b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n" + b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.float64(9e123), + (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n" + b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n" + b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."), + 'invalid'), + + (np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1 + (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n" + b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n" + b"tp8\nRp9\n."), + 'different'), + ] + for original, data, koi8r_validity in datas: + result = pickle.loads(data, encoding='latin1') + assert_equal(result, original) + + # Decoding under non-latin1 encoding (e.g.) KOI8-R can + # produce bad results, but should not segfault. + if koi8r_validity == 'different': + # Unicode code points happen to lie within latin1, + # but are different in koi8-r, resulting to silent + # bogus results + result = pickle.loads(data, encoding='koi8-r') + assert_(result != original) + elif koi8r_validity == 'invalid': + # Unicode code points outside latin1, so results + # to an encoding exception + assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') + else: + raise ValueError(koi8r_validity) + + def test_structured_type_to_object(self): + a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') + a_obj = np.empty((2,), dtype=object) + a_obj[0] = (0, 1) + a_obj[1] = (3, 2) + # astype records -> object + assert_equal(a_rec.astype(object), a_obj) + # '=' records -> object + b = np.empty_like(a_obj) + b[...] = a_rec + assert_equal(b, a_obj) + # '=' object -> records + b = np.empty_like(a_rec) + b[...] = a_obj + assert_equal(b, a_rec) + + def test_assign_obj_listoflists(self): + # Ticket # 1870 + # The inner list should get assigned to the object elements + a = np.zeros(4, dtype=object) + b = a.copy() + a[0] = [1] + a[1] = [2] + a[2] = [3] + a[3] = [4] + b[...] = [[1], [2], [3], [4]] + assert_equal(a, b) + # The first dimension should get broadcast + a = np.zeros((2, 2), dtype=object) + a[...] = [[1, 2]] + assert_equal(a, [[1, 2], [1, 2]]) + + @pytest.mark.slow_pypy + def test_memoryleak(self): + # Ticket #1917 - ensure that array data doesn't leak + for i in range(1000): + # 100MB times 1000 would give 100GB of memory usage if it leaks + a = np.empty((100000000,), dtype='i1') + del a + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_ufunc_reduce_memoryleak(self): + a = np.arange(6) + acnt = sys.getrefcount(a) + np.add.reduce(a) + assert_equal(sys.getrefcount(a), acnt) + + def test_search_sorted_invalid_arguments(self): + # Ticket #2021, should not segfault. + x = np.arange(0, 4, dtype='datetime64[D]') + assert_raises(TypeError, x.searchsorted, 1) + + def test_string_truncation(self): + # Ticket #1990 - Data can be truncated in creation of an array from a + # mixed sequence of numeric values and strings (gh-2583) + for val in [True, 1234, 123.4, complex(1, 234)]: + for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]: + b = np.array([val, tostr('xx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + # test also with longer strings + b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + def test_string_truncation_ucs2(self): + # Ticket #2081. Python compiled with two byte unicode + # can lead to truncation if itemsize is not properly + # adjusted for NumPy's four byte unicode. + a = np.array(['abcd']) + assert_equal(a.dtype.itemsize, 16) + + def test_unique_stable(self): + # Ticket #2063 must always choose stable sort for argsort to + # get consistent results + v = np.array(([0]*5 + [1]*6 + [2]*6)*4) + res = np.unique(v, return_index=True) + tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11])) + assert_equal(res, tgt) + + def test_unicode_alloc_dealloc_match(self): + # Ticket #1578, the mismatch only showed up when running + # python-debug for python versions >= 2.7, and then as + # a core dump and error message. + a = np.array(['abc'], dtype=np.unicode_)[0] + del a + + def test_refcount_error_in_clip(self): + # Ticket #1588 + a = np.zeros((2,), dtype='>i2').clip(min=0) + x = a + a + # This used to segfault: + y = str(x) + # Check the final string: + assert_(y == "[0 0]") + + def test_searchsorted_wrong_dtype(self): + # Ticket #2189, it used to segfault, so we check that it raises the + # proper exception. + a = np.array([('a', 1)], dtype='S1, int') + assert_raises(TypeError, np.searchsorted, a, 1.2) + # Ticket #2066, similar problem: + dtype = np.format_parser(['i4', 'i4'], [], []) + a = np.recarray((2,), dtype) + a[...] = [(1, 2), (3, 4)] + assert_raises(TypeError, np.searchsorted, a, 1) + + def test_complex64_alignment(self): + # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment + dtt = np.complex64 + arr = np.arange(10, dtype=dtt) + # 2D array + arr2 = np.reshape(arr, (2, 5)) + # Fortran write followed by (C or F) read caused bus error + data_str = arr2.tobytes('F') + data_back = np.ndarray(arr2.shape, + arr2.dtype, + buffer=data_str, + order='F') + assert_array_equal(arr2, data_back) + + def test_structured_count_nonzero(self): + arr = np.array([0, 1]).astype('i4, (2)i4')[:1] + count = np.count_nonzero(arr) + assert_equal(count, 0) + + def test_copymodule_preserves_f_contiguity(self): + a = np.empty((2, 2), order='F') + b = copy.copy(a) + c = copy.deepcopy(a) + assert_(b.flags.fortran) + assert_(b.flags.f_contiguous) + assert_(c.flags.fortran) + assert_(c.flags.f_contiguous) + + def test_fortran_order_buffer(self): + import numpy as np + a = np.array([['Hello', 'Foob']], dtype='U5', order='F') + arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) + arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'], + [u'F', u'o', u'o', u'b', u'']]]) + assert_array_equal(arr, arr2) + + def test_assign_from_sequence_error(self): + # Ticket #4024. + arr = np.array([1, 2, 3]) + assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) + arr.__setitem__(slice(None), [9]) + assert_equal(arr, [9, 9, 9]) + + def test_format_on_flex_array_element(self): + # Ticket #4369. + dt = np.dtype([('date', ' 0: + # unpickling ndarray goes through _frombuffer for protocol 5 + assert b'numpy.core.numeric' in s + else: + assert b'numpy.core.multiarray' in s + + def test_object_casting_errors(self): + # gh-11993 update to ValueError (see gh-16909), since strings can in + # principle be converted to complex, but this string cannot. + arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object) + assert_raises(ValueError, arr.astype, 'c8') + + def test_eff1d_casting(self): + # gh-12711 + x = np.array([1, 2, 4, 7, 0], dtype=np.int16) + res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + assert_equal(res, [-99, 1, 2, 3, -7, 88, 99]) + + # The use of safe casting means, that 1<<20 is cast unsafely, an + # error may be better, but currently there is no mechanism for it. + res = np.ediff1d(x, to_begin=(1<<20), to_end=(1<<20)) + assert_equal(res, [0, 1, 2, 3, -7, 0]) + + def test_pickle_datetime64_array(self): + # gh-12745 (would fail with pickle5 installed) + d = np.datetime64('2015-07-04 12:59:59.50', 'ns') + arr = np.array([d]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + dumped = pickle.dumps(arr, protocol=proto) + assert_equal(pickle.loads(dumped), arr) + + def test_bad_array_interface(self): + class T: + __array_interface__ = {} + + with assert_raises(ValueError): + np.array([T()]) + + def test_2d__array__shape(self): + class T: + def __array__(self): + return np.ndarray(shape=(0,0)) + + # Make sure __array__ is used instead of Sequence methods. + def __iter__(self): + return iter([]) + + def __getitem__(self, idx): + raise AssertionError("__getitem__ was called") + + def __len__(self): + return 0 + + + t = T() + # gh-13659, would raise in broadcasting [x=t for x in result] + arr = np.array([t]) + assert arr.shape == (1, 0, 0) + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8), + reason='overflows on windows, fixed in bpo-16865') + def test_to_ctypes(self): + #gh-14214 + arr = np.zeros((2 ** 31 + 1,), 'b') + assert arr.size * arr.itemsize > 2 ** 31 + c_arr = np.ctypeslib.as_ctypes(arr) + assert_equal(c_arr._length_, arr.size) + + def test_complex_conversion_error(self): + # gh-17068 + with pytest.raises(TypeError, match=r"Unable to convert dtype.*"): + complex(np.array("now", np.datetime64)) + + def test__array_interface__descr(self): + # gh-17068 + dt = np.dtype(dict(names=['a', 'b'], + offsets=[0, 0], + formats=[np.int64, np.int64])) + descr = np.array((1, 1), dtype=dt).__array_interface__['descr'] + assert descr == [('', '|V8')] # instead of [(b'', '|V8')] + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + @requires_memory(free_bytes=9e9) + def test_dot_big_stride(self): + # gh-17111 + # blas stride = stride//itemsize > int32 max + int32_max = np.iinfo(np.int32).max + n = int32_max + 3 + a = np.empty([n], dtype=np.float32) + b = a[::n-1] + b[...] = 1 + assert b.strides[0] > int32_max * b.dtype.itemsize + assert np.dot(b, b) == 2.0 + + def test_frompyfunc_name(self): + # name conversion was failing for python 3 strings + # resulting in the default '?' name. Also test utf-8 + # encoding using non-ascii name. + def cassé(x): + return x + + f = np.frompyfunc(cassé, 1, 1) + assert str(f) == "" + + @pytest.mark.parametrize("operation", [ + 'add', 'subtract', 'multiply', 'floor_divide', + 'conjugate', 'fmod', 'square', 'reciprocal', + 'power', 'absolute', 'negative', 'positive', + 'greater', 'greater_equal', 'less', + 'less_equal', 'equal', 'not_equal', 'logical_and', + 'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or', + 'bitwise_xor', 'invert', 'left_shift', 'right_shift', + 'gcd', 'lcm' + ] + ) + @pytest.mark.parametrize("order", [ + ('b->', 'B->'), + ('h->', 'H->'), + ('i->', 'I->'), + ('l->', 'L->'), + ('q->', 'Q->'), + ] + ) + def test_ufunc_order(self, operation, order): + # gh-18075 + # Ensure signed types before unsigned + def get_idx(string, str_lst): + for i, s in enumerate(str_lst): + if string in s: + return i + raise ValueError(f"{string} not in list") + types = getattr(np, operation).types + assert get_idx(order[0], types) < get_idx(order[1], types), ( + f"Unexpected types order of ufunc in {operation}" + f"for {order}. Possible fix: Use signed before unsigned" + "in generate_umath.py") diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_scalar_ctors.py b/MLPY/Lib/site-packages/numpy/core/tests/test_scalar_ctors.py new file mode 100644 index 0000000000000000000000000000000000000000..39e17249fe5d18f7b29f042a1551549052553694 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_scalar_ctors.py @@ -0,0 +1,115 @@ +""" +Test the scalar constructors, which also do type-coercion +""" +import pytest + +import numpy as np +from numpy.testing import ( + assert_equal, assert_almost_equal, assert_warns, + ) + +class TestFromString: + def test_floating(self): + # Ticket #640, floats from string + fsingle = np.single('1.234') + fdouble = np.double('1.234') + flongdouble = np.longdouble('1.234') + assert_almost_equal(fsingle, 1.234) + assert_almost_equal(fdouble, 1.234) + assert_almost_equal(flongdouble, 1.234) + + def test_floating_overflow(self): + """ Strings containing an unrepresentable float overflow """ + fhalf = np.half('1e10000') + assert_equal(fhalf, np.inf) + fsingle = np.single('1e10000') + assert_equal(fsingle, np.inf) + fdouble = np.double('1e10000') + assert_equal(fdouble, np.inf) + flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') + assert_equal(flongdouble, np.inf) + + fhalf = np.half('-1e10000') + assert_equal(fhalf, -np.inf) + fsingle = np.single('-1e10000') + assert_equal(fsingle, -np.inf) + fdouble = np.double('-1e10000') + assert_equal(fdouble, -np.inf) + flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') + assert_equal(flongdouble, -np.inf) + + +class TestExtraArgs: + def test_superclass(self): + # try both positional and keyword arguments + s = np.str_(b'\\x61', encoding='unicode-escape') + assert s == 'a' + s = np.str_(b'\\x61', 'unicode-escape') + assert s == 'a' + + # previously this would return '\\xx' + with pytest.raises(UnicodeDecodeError): + np.str_(b'\\xx', encoding='unicode-escape') + with pytest.raises(UnicodeDecodeError): + np.str_(b'\\xx', 'unicode-escape') + + # superclass fails, but numpy succeeds + assert np.bytes_(-2) == b'-2' + + def test_datetime(self): + dt = np.datetime64('2000-01', ('M', 2)) + assert np.datetime_data(dt) == ('M', 2) + + with pytest.raises(TypeError): + np.datetime64('2000', garbage=True) + + def test_bool(self): + with pytest.raises(TypeError): + np.bool_(False, garbage=True) + + def test_void(self): + with pytest.raises(TypeError): + np.void(b'test', garbage=True) + + +class TestFromInt: + def test_intp(self): + # Ticket #99 + assert_equal(1024, np.intp(1024)) + + def test_uint64_from_negative(self): + assert_equal(np.uint64(-2), np.uint64(18446744073709551614)) + + +int_types = [np.byte, np.short, np.intc, np.int_, np.longlong] +uint_types = [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong] +float_types = [np.half, np.single, np.double, np.longdouble] +cfloat_types = [np.csingle, np.cdouble, np.clongdouble] + + +class TestArrayFromScalar: + """ gh-15467 """ + + def _do_test(self, t1, t2): + x = t1(2) + arr = np.array(x, dtype=t2) + # type should be preserved exactly + if t2 is None: + assert arr.dtype.type is t1 + else: + assert arr.dtype.type is t2 + + @pytest.mark.parametrize('t1', int_types + uint_types) + @pytest.mark.parametrize('t2', int_types + uint_types + [None]) + def test_integers(self, t1, t2): + return self._do_test(t1, t2) + + @pytest.mark.parametrize('t1', float_types) + @pytest.mark.parametrize('t2', float_types + [None]) + def test_reals(self, t1, t2): + return self._do_test(t1, t2) + + @pytest.mark.parametrize('t1', cfloat_types) + @pytest.mark.parametrize('t2', cfloat_types + [None]) + def test_complex(self, t1, t2): + return self._do_test(t1, t2) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_scalar_methods.py b/MLPY/Lib/site-packages/numpy/core/tests/test_scalar_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..9a4e1373b0229b5486187e1eef19290ffb285172 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_scalar_methods.py @@ -0,0 +1,104 @@ +""" +Test the scalar constructors, which also do type-coercion +""" +import fractions +import platform + +import pytest +import numpy as np + +from numpy.testing import assert_equal, assert_raises + + +class TestAsIntegerRatio: + # derived in part from the cpython test "test_floatasratio" + + @pytest.mark.parametrize("ftype", [ + np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("f, ratio", [ + (0.875, (7, 8)), + (-0.875, (-7, 8)), + (0.0, (0, 1)), + (11.5, (23, 2)), + ]) + def test_small(self, ftype, f, ratio): + assert_equal(ftype(f).as_integer_ratio(), ratio) + + @pytest.mark.parametrize("ftype", [ + np.half, np.single, np.double, np.longdouble]) + def test_simple_fractions(self, ftype): + R = fractions.Fraction + assert_equal(R(0, 1), + R(*ftype(0.0).as_integer_ratio())) + assert_equal(R(5, 2), + R(*ftype(2.5).as_integer_ratio())) + assert_equal(R(1, 2), + R(*ftype(0.5).as_integer_ratio())) + assert_equal(R(-2100, 1), + R(*ftype(-2100.0).as_integer_ratio())) + + @pytest.mark.parametrize("ftype", [ + np.half, np.single, np.double, np.longdouble]) + def test_errors(self, ftype): + assert_raises(OverflowError, ftype('inf').as_integer_ratio) + assert_raises(OverflowError, ftype('-inf').as_integer_ratio) + assert_raises(ValueError, ftype('nan').as_integer_ratio) + + def test_against_known_values(self): + R = fractions.Fraction + assert_equal(R(1075, 512), + R(*np.half(2.1).as_integer_ratio())) + assert_equal(R(-1075, 512), + R(*np.half(-2.1).as_integer_ratio())) + assert_equal(R(4404019, 2097152), + R(*np.single(2.1).as_integer_ratio())) + assert_equal(R(-4404019, 2097152), + R(*np.single(-2.1).as_integer_ratio())) + assert_equal(R(4728779608739021, 2251799813685248), + R(*np.double(2.1).as_integer_ratio())) + assert_equal(R(-4728779608739021, 2251799813685248), + R(*np.double(-2.1).as_integer_ratio())) + # longdouble is platform dependent + + @pytest.mark.parametrize("ftype, frac_vals, exp_vals", [ + # dtype test cases generated using hypothesis + # first five generated cases per dtype + (np.half, [0.0, 0.01154830649280303, 0.31082276347447274, + 0.527350517124794, 0.8308562335072596], + [0, 1, 0, -8, 12]), + (np.single, [0.0, 0.09248576989263226, 0.8160498218131407, + 0.17389442853722373, 0.7956044195067877], + [0, 12, 10, 17, -26]), + (np.double, [0.0, 0.031066908499895136, 0.5214135908877832, + 0.45780736035689296, 0.5906586745934036], + [0, -801, 51, 194, -653]), + pytest.param( + np.longdouble, + [0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495, + 0.9620175814461964], + [0, -7400, 14266, -7822, -8721], + marks=[ + pytest.mark.skipif( + np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double"), + pytest.mark.skipif( + platform.machine().startswith("ppc"), + reason="IBM double double"), + ] + ) + ]) + def test_roundtrip(self, ftype, frac_vals, exp_vals): + for frac, exp in zip(frac_vals, exp_vals): + f = np.ldexp(ftype(frac), exp) + assert f.dtype == ftype + n, d = f.as_integer_ratio() + + try: + # workaround for gh-9968 + nf = np.longdouble(str(n)) + df = np.longdouble(str(d)) + except (OverflowError, RuntimeWarning): + # the values may not fit in any float type + pytest.skip("longdouble too small on this platform") + + assert_equal(nf / df, f, "{}/{}".format(n, d)) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_scalarbuffer.py b/MLPY/Lib/site-packages/numpy/core/tests/test_scalarbuffer.py new file mode 100644 index 0000000000000000000000000000000000000000..8aa03e151b2d5faf038ca8a31a06574ccbe49b62 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_scalarbuffer.py @@ -0,0 +1,154 @@ +""" +Test scalar buffer interface adheres to PEP 3118 +""" +import numpy as np +from numpy.core._rational_tests import rational +from numpy.core._multiarray_tests import get_buffer_info +import pytest + +from numpy.testing import assert_, assert_equal, assert_raises + +# PEP3118 format strings for native (standard alignment and byteorder) types +scalars_and_codes = [ + (np.bool_, '?'), + (np.byte, 'b'), + (np.short, 'h'), + (np.intc, 'i'), + (np.int_, 'l'), + (np.longlong, 'q'), + (np.ubyte, 'B'), + (np.ushort, 'H'), + (np.uintc, 'I'), + (np.uint, 'L'), + (np.ulonglong, 'Q'), + (np.half, 'e'), + (np.single, 'f'), + (np.double, 'd'), + (np.longdouble, 'g'), + (np.csingle, 'Zf'), + (np.cdouble, 'Zd'), + (np.clongdouble, 'Zg'), +] +scalars_only, codes_only = zip(*scalars_and_codes) + + +class TestScalarPEP3118: + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_match_array(self, scalar): + x = scalar() + a = np.array([], dtype=np.dtype(scalar)) + mv_x = memoryview(x) + mv_a = memoryview(a) + assert_equal(mv_x.format, mv_a.format) + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_dim(self, scalar): + x = scalar() + mv_x = memoryview(x) + assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) + def test_scalar_code_and_properties(self, scalar, code): + x = scalar() + expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0, + shape=(), format=code, readonly=True) + + mv_x = memoryview(x) + print(mv_x.readonly, self._as_dict(mv_x)) + assert self._as_dict(mv_x) == expected + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_buffers_readonly(self, scalar): + x = scalar() + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def test_void_scalar_structured_data(self): + dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert_(isinstance(x, np.void)) + mv_x = memoryview(x) + expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize + expected_size += 2 * np.dtype(np.float64).itemsize + assert_equal(mv_x.itemsize, expected_size) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + # check scalar format string against ndarray format string + a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_(isinstance(a, np.ndarray)) + mv_a = memoryview(a) + assert_equal(mv_x.itemsize, mv_a.itemsize) + assert_equal(mv_x.format, mv_a.format) + + # Check that we do not allow writeable buffer export (technically + # we could allow it sometimes here...) + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def _as_dict(self, m): + return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, + ndim=m.ndim, format=m.format, readonly=m.readonly) + + def test_datetime_memoryview(self): + # gh-11656 + # Values verified with v1.13.3, shape is not () as in test_scalar_dim + + dt1 = np.datetime64('2016-01-01') + dt2 = np.datetime64('2017-01-01') + expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,), + format='B', readonly=True) + v = memoryview(dt1) + assert self._as_dict(v) == expected + + v = memoryview(dt2 - dt1) + assert self._as_dict(v) == expected + + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(1, dt) + # Fails to create a PEP 3118 valid buffer + assert_raises((ValueError, BufferError), memoryview, a[0]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(dt1, ["WRITABLE"]) + + @pytest.mark.parametrize('s', [ + pytest.param("\x32\x32", id="ascii"), + pytest.param("\uFE0F\uFE0F", id="basic multilingual"), + pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"), + ]) + def test_str_ucs4(self, s): + s = np.str_(s) # only our subclass implements the buffer protocol + + # all the same, characters always encode as ucs4 + expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w', + readonly=True) + + v = memoryview(s) + assert self._as_dict(v) == expected + + # integers of the paltform-appropriate endianness + code_points = np.frombuffer(v, dtype='i4') + + assert_equal(code_points, [ord(c) for c in s]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(s, ["WRITABLE"]) + + def test_user_scalar_fails_buffer(self): + r = rational(1) + with assert_raises(TypeError): + memoryview(r) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(r, ["WRITABLE"]) \ No newline at end of file diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_scalarinherit.py b/MLPY/Lib/site-packages/numpy/core/tests/test_scalarinherit.py new file mode 100644 index 0000000000000000000000000000000000000000..c0657907a98a59779c4ffade763855b1a207364e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_scalarinherit.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +""" Test printing of scalar types. + +""" +import pytest + +import numpy as np +from numpy.testing import assert_, assert_raises + + +class A: + pass +class B(A, np.float64): + pass + +class C(B): + pass +class D(C, B): + pass + +class B0(np.float64, A): + pass +class C0(B0): + pass + +class HasNew: + def __new__(cls, *args, **kwargs): + return cls, args, kwargs + +class B1(np.float64, HasNew): + pass + + +class TestInherit: + def test_init(self): + x = B(1.0) + assert_(str(x) == '1.0') + y = C(2.0) + assert_(str(y) == '2.0') + z = D(3.0) + assert_(str(z) == '3.0') + + def test_init2(self): + x = B0(1.0) + assert_(str(x) == '1.0') + y = C0(2.0) + assert_(str(y) == '2.0') + + def test_gh_15395(self): + # HasNew is the second base, so `np.float64` should have priority + x = B1(1.0) + assert_(str(x) == '1.0') + + # previously caused RecursionError!? + with pytest.raises(TypeError): + B1(1.0, 2.0) + + +class TestCharacter: + def test_char_radd(self): + # GH issue 9620, reached gentype_add and raise TypeError + np_s = np.string_('abc') + np_u = np.unicode_('abc') + s = b'def' + u = u'def' + assert_(np_s.__radd__(np_s) is NotImplemented) + assert_(np_s.__radd__(np_u) is NotImplemented) + assert_(np_s.__radd__(s) is NotImplemented) + assert_(np_s.__radd__(u) is NotImplemented) + assert_(np_u.__radd__(np_s) is NotImplemented) + assert_(np_u.__radd__(np_u) is NotImplemented) + assert_(np_u.__radd__(s) is NotImplemented) + assert_(np_u.__radd__(u) is NotImplemented) + assert_(s + np_s == b'defabc') + assert_(u + np_u == u'defabc') + + class MyStr(str, np.generic): + # would segfault + pass + + with assert_raises(TypeError): + # Previously worked, but gave completely wrong result + ret = s + MyStr('abc') + + class MyBytes(bytes, np.generic): + # would segfault + pass + + ret = s + MyBytes(b'abc') + assert(type(ret) is type(s)) + assert ret == b"defabc" + + def test_char_repeat(self): + np_s = np.string_('abc') + np_u = np.unicode_('abc') + res_s = b'abc' * 5 + res_u = u'abc' * 5 + assert_(np_s * 5 == res_s) + assert_(np_u * 5 == res_u) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_scalarmath.py b/MLPY/Lib/site-packages/numpy/core/tests/test_scalarmath.py new file mode 100644 index 0000000000000000000000000000000000000000..8340c530392ed158260fe9215a991884cbfaa498 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_scalarmath.py @@ -0,0 +1,824 @@ +import contextlib +import sys +import warnings +import itertools +import operator +import platform +import pytest +from hypothesis import given, settings, Verbosity, assume +from hypothesis.strategies import sampled_from + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_almost_equal, + assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, + assert_warns, assert_raises_regex, + ) + +types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, + np.int_, np.uint, np.longlong, np.ulonglong, + np.single, np.double, np.longdouble, np.csingle, + np.cdouble, np.clongdouble] + +floating_types = np.floating.__subclasses__() +complex_floating_types = np.complexfloating.__subclasses__() + + +# This compares scalarmath against ufuncs. + +class TestTypes: + def test_types(self): + for atype in types: + a = atype(1) + assert_(a == 1, "error with %r: got %r" % (atype, a)) + + def test_type_add(self): + # list of types + for k, atype in enumerate(types): + a_scalar = atype(3) + a_array = np.array([3], dtype=atype) + for l, btype in enumerate(types): + b_scalar = btype(1) + b_array = np.array([1], dtype=btype) + c_scalar = a_scalar + b_scalar + c_array = a_array + b_array + # It was comparing the type numbers, but the new ufunc + # function-finding mechanism finds the lowest function + # to which both inputs can be cast - which produces 'l' + # when you do 'q' + 'b'. The old function finding mechanism + # skipped ahead based on the first argument, but that + # does not produce properly symmetric results... + assert_equal(c_scalar.dtype, c_array.dtype, + "error with types (%d/'%c' + %d/'%c')" % + (k, np.dtype(atype).char, l, np.dtype(btype).char)) + + def test_type_create(self): + for k, atype in enumerate(types): + a = np.array([1, 2, 3], atype) + b = atype([1, 2, 3]) + assert_equal(a, b) + + def test_leak(self): + # test leak of scalar objects + # a leak would show up in valgrind as still-reachable of ~2.6MB + for i in range(200000): + np.add(1, 1) + + +class TestBaseMath: + def test_blocked(self): + # test alignments offsets for simd instructions + # alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]: + for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, + type='binary', + max_size=sz): + exp1 = np.ones_like(inp1) + inp1[...] = np.ones_like(inp1) + inp2[...] = np.zeros_like(inp2) + assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) + assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg) + assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) + + np.add(inp1, inp2, out=out) + assert_almost_equal(out, exp1, err_msg=msg) + + inp2[...] += np.arange(inp2.size, dtype=dt) + 1 + assert_almost_equal(np.square(inp2), + np.multiply(inp2, inp2), err_msg=msg) + # skip true divide for ints + if dt != np.int32: + assert_almost_equal(np.reciprocal(inp2), + np.divide(1, inp2), err_msg=msg) + + inp1[...] = np.ones_like(inp1) + np.add(inp1, 2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + inp2[...] = np.ones_like(inp2) + np.add(2, inp2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_almost_equal(d + d, d * 2) + np.add(d, d, out=o) + np.add(np.ones_like(d), d, out=o) + np.add(d, np.ones_like(d), out=o) + np.add(np.ones_like(d), d) + np.add(d, np.ones_like(d)) + + +class TestPower: + def test_small_types(self): + for t in [np.int8, np.int16, np.float16]: + a = t(3) + b = a ** 4 + assert_(b == 81, "error with %r: got %r" % (t, b)) + + def test_large_types(self): + for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: + a = t(51) + b = a ** 4 + msg = "error with %r: got %r" % (t, b) + if np.issubdtype(t, np.integer): + assert_(b == 6765201, msg) + else: + assert_almost_equal(b, 6765201, err_msg=msg) + + def test_integers_to_negative_integer_power(self): + # Note that the combination of uint64 with a signed integer + # has common type np.float64. The other combinations should all + # raise a ValueError for integer ** negative integer. + exp = [np.array(-1, dt)[()] for dt in 'bhilq'] + + # 1 ** -1 possible special case + base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, 1.) + + # -1 ** -1 possible special case + base = [np.array(-1, dt)[()] for dt in 'bhilq'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, -1.) + + # 2 ** -1 perhaps generic + base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, .5) + + def test_mixed_types(self): + typelist = [np.int8, np.int16, np.float16, + np.float32, np.float64, np.int8, + np.int16, np.int32, np.int64] + for t1 in typelist: + for t2 in typelist: + a = t1(3) + b = t2(2) + result = a**b + msg = ("error with %r and %r:" + "got %r, expected %r") % (t1, t2, result, 9) + if np.issubdtype(np.dtype(result), np.integer): + assert_(result == 9, msg) + else: + assert_almost_equal(result, 9, err_msg=msg) + + def test_modular_power(self): + # modular power is not implemented, so ensure it errors + a = 5 + b = 4 + c = 10 + expected = pow(a, b, c) # noqa: F841 + for t in (np.int32, np.float32, np.complex64): + # note that 3-operand power only dispatches on the first argument + assert_raises(TypeError, operator.pow, t(a), b, c) + assert_raises(TypeError, operator.pow, np.array(t(a)), b, c) + + +def floordiv_and_mod(x, y): + return (x // y, x % y) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestModulus: + + def test_modulus_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*71, dtype=dt1)[()] + b = np.array(sg2*19, dtype=dt2)[()] + div, rem = op(a, b) + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = list(divmod(*t) for t in arg) + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floordiv_and_mod, divmod]: + for dt in np.typecodes['Float']: + msg = 'op: %s, dtype: %s' % (op.__name__, dt) + fa = a.astype(dt) + fb = b.astype(dt) + # use list comprehension so a_ and b_ are scalars + div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_modulus_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*78*6e-8, dtype=dt1)[()] + b = np.array(sg2*6e-8, dtype=dt2)[()] + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = operator.mod(a, b) + assert_(rem <= b, 'dt: %s' % dt) + rem = operator.mod(-a, -b) + assert_(rem >= -b, 'dt: %s' % dt) + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + sup.filter(RuntimeWarning, "divide by zero encountered in remainder") + sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") + sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + sup.filter(RuntimeWarning, "invalid value encountered in divmod") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = operator.mod(fone, fzer) + assert_(np.isnan(rem), 'dt: %s' % dt) + # MSVC 2008 returns NaN here, so disable the check. + #rem = operator.mod(fone, finf) + #assert_(rem == fone, 'dt: %s' % dt) + rem = operator.mod(fone, fnan) + assert_(np.isnan(rem), 'dt: %s' % dt) + rem = operator.mod(finf, fone) + assert_(np.isnan(rem), 'dt: %s' % dt) + for op in [floordiv_and_mod, divmod]: + div, mod = op(fone, fzer) + assert_(np.isinf(div)) and assert_(np.isnan(mod)) + + def test_inplace_floordiv_handling(self): + # issue gh-12927 + # this only applies to in-place floordiv //=, because the output type + # promotes to float which does not fit + a = np.array([1, 2], np.int64) + b = np.array([1, 2], np.uint64) + pattern = 'could not be coerced to provided output parameter' + with assert_raises_regex(TypeError, pattern): + a //= b + + +class TestComplexDivision: + def test_zero_division(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + a = t(0.0) + b = t(1.0) + assert_(np.isinf(b/a)) + b = t(complex(np.inf, np.inf)) + assert_(np.isinf(b/a)) + b = t(complex(np.inf, np.nan)) + assert_(np.isinf(b/a)) + b = t(complex(np.nan, np.inf)) + assert_(np.isinf(b/a)) + b = t(complex(np.nan, np.nan)) + assert_(np.isnan(b/a)) + b = t(0.) + assert_(np.isnan(b/a)) + + def test_signed_zeros(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = ( + (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)), + (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)), + (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)), + (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)), + (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), + ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0)) + ) + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + def test_branches(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = list() + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by else condition as neither are == 0 + data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0))) + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by if condition as both are == 0 + # is performed in test_zero_division(), so this is skipped + + # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) + data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0))) + + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + +class TestConversion: + def test_int_from_long(self): + l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] + li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] + for T in [None, np.float64, np.int64]: + a = np.array(l, dtype=T) + assert_equal([int(_m) for _m in a], li) + + a = np.array(l[:3], dtype=np.uint64) + assert_equal([int(_m) for _m in a], li[:3]) + + def test_iinfo_long_values(self): + for code in 'bBhH': + res = np.array(np.iinfo(code).max + 1, dtype=code) + tgt = np.iinfo(code).min + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.array(np.iinfo(code).max, dtype=code) + tgt = np.iinfo(code).max + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.dtype(code).type(np.iinfo(code).max) + tgt = np.iinfo(code).max + assert_(res == tgt) + + def test_int_raise_behaviour(self): + def overflow_error_func(dtype): + dtype(np.iinfo(dtype).max + 1) + + for code in [np.int_, np.uint, np.longlong, np.ulonglong]: + assert_raises(OverflowError, overflow_error_func, code) + + def test_int_from_infinite_longdouble(self): + # gh-627 + x = np.longdouble(np.inf) + assert_raises(OverflowError, int, x) + with suppress_warnings() as sup: + sup.record(np.ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, int, x) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") + def test_int_from_infinite_longdouble___int__(self): + x = np.longdouble(np.inf) + assert_raises(OverflowError, x.__int__) + with suppress_warnings() as sup: + sup.record(np.ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, x.__int__) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") + @pytest.mark.skipif(platform.machine().startswith("ppc"), + reason="IBM double double") + def test_int_from_huge_longdouble(self): + # Produce a longdouble that would overflow a double, + # use exponent that avoids bug in Darwin pow function. + exp = np.finfo(np.double).maxexp - 1 + huge_ld = 2 * 1234 * np.longdouble(2) ** exp + huge_i = 2 * 1234 * 2 ** exp + assert_(huge_ld != np.inf) + assert_equal(int(huge_ld), huge_i) + + def test_int_from_longdouble(self): + x = np.longdouble(1.5) + assert_equal(int(x), 1) + x = np.longdouble(-10.5) + assert_equal(int(x), -10) + + def test_numpy_scalar_relational_operators(self): + # All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,)) + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + #Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,)) + + #unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + #Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,)) + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], + "type %s and %s failed" % (dt1, dt2)) + + def test_scalar_comparison_to_none(self): + # Scalars should just return False and not give a warnings. + # The comparisons are flagged by pep8, ignore that. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_(not np.float32(1) == None) + assert_(not np.str_('test') == None) + # This is dubious (see below): + assert_(not np.datetime64('NaT') == None) + + assert_(np.float32(1) != None) + assert_(np.str_('test') != None) + # This is dubious (see below): + assert_(np.datetime64('NaT') != None) + assert_(len(w) == 0) + + # For documentation purposes, this is why the datetime is dubious. + # At the time of deprecation this was no behaviour change, but + # it has to be considered when the deprecations are done. + assert_(np.equal(np.datetime64('NaT'), None)) + + +#class TestRepr: +# def test_repr(self): +# for t in types: +# val = t(1197346475.0137341) +# val_repr = repr(val) +# val2 = eval(val_repr) +# assert_equal( val, val2 ) + + +class TestRepr: + def _test_type_repr(self, t): + finfo = np.finfo(t) + last_fraction_bit_idx = finfo.nexp + finfo.nmant + last_exponent_bit_idx = finfo.nexp + storage_bytes = np.dtype(t).itemsize*8 + # could add some more types to the list below + for which in ['small denorm', 'small norm']: + # Values from https://en.wikipedia.org/wiki/IEEE_754 + constr = np.array([0x00]*storage_bytes, dtype=np.uint8) + if which == 'small denorm': + byte = last_fraction_bit_idx // 8 + bytebit = 7-(last_fraction_bit_idx % 8) + constr[byte] = 1 << bytebit + elif which == 'small norm': + byte = last_exponent_bit_idx // 8 + bytebit = 7-(last_exponent_bit_idx % 8) + constr[byte] = 1 << bytebit + else: + raise ValueError('hmm') + val = constr.view(t)[0] + val_repr = repr(val) + val2 = t(eval(val_repr)) + if not (val2 == 0 and val < 1e-100): + assert_equal(val, val2) + + def test_float_repr(self): + # long double test cannot work, because eval goes through a python + # float + for t in [np.float32, np.float64]: + self._test_type_repr(t) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) + + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestMultiply: + def test_seq_repeat(self): + # Test that basic sequences get repeated when multiplied with + # numpy integers. And errors are raised when multiplied with others. + # Some of this behaviour may be controversial and could be open for + # change. + accepted_types = set(np.typecodes["AllInteger"]) + deprecated_types = {'?'} + forbidden_types = ( + set(np.typecodes["All"]) - accepted_types - deprecated_types) + forbidden_types -= {'V'} # can't default-construct void scalars + + for seq_type in (list, tuple): + seq = seq_type([1, 2, 3]) + for numpy_type in accepted_types: + i = np.dtype(numpy_type).type(2) + assert_equal(seq * i, seq * int(i)) + assert_equal(i * seq, int(i) * seq) + + for numpy_type in deprecated_types: + i = np.dtype(numpy_type).type() + assert_equal( + assert_warns(DeprecationWarning, operator.mul, seq, i), + seq * int(i)) + assert_equal( + assert_warns(DeprecationWarning, operator.mul, i, seq), + int(i) * seq) + + for numpy_type in forbidden_types: + i = np.dtype(numpy_type).type() + assert_raises(TypeError, operator.mul, seq, i) + assert_raises(TypeError, operator.mul, i, seq) + + def test_no_seq_repeat_basic_array_like(self): + # Test that an array-like which does not know how to be multiplied + # does not attempt sequence repeat (raise TypeError). + # See also gh-7428. + class ArrayLike: + def __init__(self, arr): + self.arr = arr + def __array__(self): + return self.arr + + # Test for simple ArrayLike above and memoryviews (original report) + for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))): + assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.)) + assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.)) + assert_array_equal(arr_like * np.int_(3), np.full(3, 3)) + assert_array_equal(np.int_(3) * arr_like, np.full(3, 3)) + + +class TestNegative: + def test_exceptions(self): + a = np.ones((), dtype=np.bool_)[()] + assert_raises(TypeError, operator.neg, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + assert_equal(operator.neg(a) + a, 0) + + +class TestSubtract: + def test_exceptions(self): + a = np.ones((), dtype=np.bool_)[()] + assert_raises(TypeError, operator.sub, a, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + assert_equal(operator.sub(a, a), 0) + + +class TestAbs: + def _test_abs_func(self, absfunc, test_dtype): + x = test_dtype(-1.5) + assert_equal(absfunc(x), 1.5) + x = test_dtype(0.0) + res = absfunc(x) + # assert_equal() checks zero signedness + assert_equal(res, 0.0) + x = test_dtype(-0.0) + res = absfunc(x) + assert_equal(res, 0.0) + + x = test_dtype(np.finfo(test_dtype).max) + assert_equal(absfunc(x), x.real) + + x = test_dtype(np.finfo(test_dtype).tiny) + assert_equal(absfunc(x), x.real) + + x = test_dtype(np.finfo(test_dtype).min) + assert_equal(absfunc(x), -x.real) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_builtin_abs(self, dtype): + self._test_abs_func(abs, dtype) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_numpy_abs(self, dtype): + self._test_abs_func(np.abs, dtype) + +class TestBitShifts: + + @pytest.mark.parametrize('type_code', np.typecodes['AllInteger']) + @pytest.mark.parametrize('op', + [operator.rshift, operator.lshift], ids=['>>', '<<']) + def test_shift_all_bits(self, type_code, op): + """ Shifts where the shift amount is the width of the type or wider """ + # gh-2449 + dt = np.dtype(type_code) + nbits = dt.itemsize * 8 + for val in [5, -5]: + for shift in [nbits, nbits + 4]: + val_scl = dt.type(val) + shift_scl = dt.type(shift) + res_scl = op(val_scl, shift_scl) + if val_scl < 0 and op is operator.rshift: + # sign bit is preserved + assert_equal(res_scl, -1) + else: + assert_equal(res_scl, 0) + + # Result on scalars should be the same as on arrays + val_arr = np.array([val]*32, dtype=dt) + shift_arr = np.array([shift]*32, dtype=dt) + res_arr = op(val_arr, shift_arr) + assert_equal(res_arr, res_scl) + + +class TestHash: + @pytest.mark.parametrize("type_code", np.typecodes['AllInteger']) + def test_integer_hashes(self, type_code): + scalar = np.dtype(type_code).type + for i in range(128): + assert hash(i) == hash(scalar(i)) + + @pytest.mark.parametrize("type_code", np.typecodes['AllFloat']) + def test_float_and_complex_hashes(self, type_code): + scalar = np.dtype(type_code).type + for val in [np.pi, np.inf, 3, 6.]: + numpy_val = scalar(val) + # Cast back to Python, in case the NumPy scalar has less precision + if numpy_val.dtype.kind == 'c': + val = complex(numpy_val) + else: + val = float(numpy_val) + assert val == numpy_val + print(repr(numpy_val), repr(val)) + assert hash(val) == hash(numpy_val) + + if hash(float(np.nan)) != hash(float(np.nan)): + # If Python distinguises different NaNs we do so too (gh-18833) + assert hash(scalar(np.nan)) != hash(scalar(np.nan)) + + @pytest.mark.parametrize("type_code", np.typecodes['Complex']) + def test_complex_hashes(self, type_code): + # Test some complex valued hashes specifically: + scalar = np.dtype(type_code).type + for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]: + numpy_val = scalar(val) + assert hash(complex(numpy_val)) == hash(numpy_val) + + +@contextlib.contextmanager +def recursionlimit(n): + o = sys.getrecursionlimit() + try: + sys.setrecursionlimit(n) + yield + finally: + sys.setrecursionlimit(o) + + +objecty_things = [object(), None] +reasonable_operators_for_scalars = [ + operator.lt, operator.le, operator.eq, operator.ne, operator.ge, + operator.gt, operator.add, operator.floordiv, operator.mod, + operator.mul, operator.matmul, operator.pow, operator.sub, + operator.truediv, +] + + +@given(sampled_from(objecty_things), + sampled_from(reasonable_operators_for_scalars), + sampled_from(types)) +@settings(verbosity=Verbosity.verbose) +def test_operator_object_left(o, op, type_): + try: + with recursionlimit(200): + op(o, type_(1)) + except TypeError: + pass + + +@given(sampled_from(objecty_things), + sampled_from(reasonable_operators_for_scalars), + sampled_from(types)) +def test_operator_object_right(o, op, type_): + try: + with recursionlimit(200): + op(type_(1), o) + except TypeError: + pass + + +@given(sampled_from(reasonable_operators_for_scalars), + sampled_from(types), + sampled_from(types)) +def test_operator_scalars(op, type1, type2): + try: + op(type1(1), type2(1)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +def test_longdouble_inf_loop(op): + try: + op(np.longdouble(3), None) + except TypeError: + pass + try: + op(None, np.longdouble(3)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +def test_clongdouble_inf_loop(op): + if op in {operator.mod} and False: + pytest.xfail("The modulo operator is known to be broken") + try: + op(np.clongdouble(3), None) + except TypeError: + pass + try: + op(None, np.longdouble(3)) + except TypeError: + pass diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_scalarprint.py b/MLPY/Lib/site-packages/numpy/core/tests/test_scalarprint.py new file mode 100644 index 0000000000000000000000000000000000000000..bea32a320325a8e125eea7c36b065c1750db4807 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_scalarprint.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +""" Test printing of scalar types. + +""" +import code +import platform +import pytest +import sys + +from tempfile import TemporaryFile +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + +class TestRealScalars: + def test_str(self): + svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] + styps = [np.float16, np.float32, np.float64, np.longdouble] + wanted = [ + ['0.0', '0.0', '0.0', '0.0' ], + ['-0.0', '-0.0', '-0.0', '-0.0'], + ['1.0', '1.0', '1.0', '1.0' ], + ['-1.0', '-1.0', '-1.0', '-1.0'], + ['inf', 'inf', 'inf', 'inf' ], + ['-inf', '-inf', '-inf', '-inf'], + ['nan', 'nan', 'nan', 'nan']] + + for wants, val in zip(wanted, svals): + for want, styp in zip(wants, styps): + msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val)) + assert_equal(str(styp(val)), want, err_msg=msg) + + def test_scalar_cutoffs(self): + # test that both the str and repr of np.float64 behaves + # like python floats in python3. + def check(v): + assert_equal(str(np.float64(v)), str(v)) + assert_equal(str(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), str(v)) + + # check we use the same number of significant digits + check(1.12345678901234567890) + check(0.0112345678901234567890) + + # check switch from scientific output to positional and back + check(1e-5) + check(1e-4) + check(1e15) + check(1e16) + + def test_py2_float_print(self): + # gh-10753 + # In python2, the python float type implements an obsolete method + # tp_print, which overrides tp_repr and tp_str when using "print" to + # output to a "real file" (ie, not a StringIO). Make sure we don't + # inherit it. + x = np.double(0.1999999999999) + with TemporaryFile('r+t') as f: + print(x, file=f) + f.seek(0) + output = f.read() + assert_equal(output, str(x) + '\n') + # In python2 the value float('0.1999999999999') prints with reduced + # precision as '0.2', but we want numpy's np.double('0.1999999999999') + # to print the unique value, '0.1999999999999'. + + # gh-11031 + # Only in the python2 interactive shell and when stdout is a "real" + # file, the output of the last command is printed to stdout without + # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print + # x` are potentially different. Make sure they are the same. The only + # way I found to get prompt-like output is using an actual prompt from + # the 'code' module. Again, must use tempfile to get a "real" file. + + # dummy user-input which enters one line and then ctrl-Ds. + def userinput(): + yield 'np.sqrt(2)' + raise EOFError + gen = userinput() + input_func = lambda prompt="": next(gen) + + with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe: + orig_stdout, orig_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = fo, fe + + code.interact(local={'np': np}, readfunc=input_func, banner='') + + sys.stdout, sys.stderr = orig_stdout, orig_stderr + + fo.seek(0) + capture = fo.read().strip() + + assert_equal(capture, repr(np.sqrt(2))) + + def test_dragon4(self): + # these tests are adapted from Ryan Juckett's dragon4 implementation, + # see dragon4.c for details. + + fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) + fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) + fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) + fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) + + preckwd = lambda prec: {'unique': False, 'precision': prec} + + assert_equal(fpos32('1.0'), "1.") + assert_equal(fsci32('1.0'), "1.e+00") + assert_equal(fpos32('10.234'), "10.234") + assert_equal(fpos32('-10.234'), "-10.234") + assert_equal(fsci32('10.234'), "1.0234e+01") + assert_equal(fsci32('-10.234'), "-1.0234e+01") + assert_equal(fpos32('1000.0'), "1000.") + assert_equal(fpos32('1.0', precision=0), "1.") + assert_equal(fsci32('1.0', precision=0), "1.e+00") + assert_equal(fpos32('10.234', precision=0), "10.") + assert_equal(fpos32('-10.234', precision=0), "-10.") + assert_equal(fsci32('10.234', precision=0), "1.e+01") + assert_equal(fsci32('-10.234', precision=0), "-1.e+01") + assert_equal(fpos32('10.234', precision=2), "10.23") + assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") + assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), + '9.9999999999999995e-08') + assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), + '9.8813129168249309e-324') + assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), + '9.9999999999999694e-311') + + + # test rounding + # 3.1415927410 is closest float32 to np.pi + assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), + "3.1415927410") + assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), + "3.1415927410e+00") + assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), + "3.1415926536") + assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), + "3.1415926536e+00") + # 299792448 is closest float32 to 299792458 + assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") + assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") + assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") + assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") + + assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), + "3.1415927410125732421875000") + assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), + "3.14159265358979311599796346854418516159057617187500") + assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") + + + # smallest numbers + assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), + "0.00000000000000000000000000000000000000000000140129846432" + "4817070923729583289916131280261941876515771757068283889791" + "08268586060148663818836212158203125") + + assert_equal(fpos64(5e-324, unique=False, precision=1074), + "0.00000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000049406564584124654417656" + "8792868221372365059802614324764425585682500675507270208751" + "8652998363616359923797965646954457177309266567103559397963" + "9877479601078187812630071319031140452784581716784898210368" + "8718636056998730723050006387409153564984387312473397273169" + "6151400317153853980741262385655911710266585566867681870395" + "6031062493194527159149245532930545654440112748012970999954" + "1931989409080416563324524757147869014726780159355238611550" + "1348035264934720193790268107107491703332226844753335720832" + "4319360923828934583680601060115061698097530783422773183292" + "4790498252473077637592724787465608477820373446969953364701" + "7972677717585125660551199131504891101451037862738167250955" + "8373897335989936648099411642057026370902792427675445652290" + "87538682506419718265533447265625") + + # largest numbers + f32x = np.finfo(np.float32).max + assert_equal(fpos32(f32x, **preckwd(0)), + "340282346638528859811704183484516925440.") + assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), + "1797693134862315708145274237317043567980705675258449965989" + "1747680315726078002853876058955863276687817154045895351438" + "2464234321326889464182768467546703537516986049910576551282" + "0762454900903893289440758685084551339423045832369032229481" + "6580855933212334827479782620414472316873817718091929988125" + "0404026184124858368.") + # Warning: In unique mode only the integer digits necessary for + # uniqueness are computed, the rest are 0. + assert_equal(fpos32(f32x), + "340282350000000000000000000000000000000.") + + # Further tests of zero-padding vs rounding in different combinations + # of unique, fractional, precision, min_digits + # precision can only reduce digits, not add them. + # min_digits can only extend digits, not reduce them. + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0), + "340282346638528859811704183484516925440.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4), + "340282346638528859811704183484516925440.0000") + assert_equal(fpos32(f32x, unique=True, fractional=True, + min_digits=4, precision=4), + "340282346638528859811704183484516925440.0000") + assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False, + precision=0) + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=20), + "340282346638528859810000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=15), + "340282346638529000000000000000000000000.") + assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + # test that unique rounding is preserved when precision is supplied + # but no extra digits need to be printed (gh-18609) + a = np.float64.fromhex('-1p-97') + assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=False, precision=15), + '-6.310887241768094e-30') + assert_equal(fsci64(a, unique=True, precision=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, precision=15, min_digits=15), + '-6.310887241768095e-30') + # adds/remove digits in unique mode with unbiased rnding + assert_equal(fsci64(a, unique=True, precision=14), + '-6.31088724176809e-30') + assert_equal(fsci64(a, unique=True, min_digits=16), + '-6.3108872417680944e-30') + assert_equal(fsci64(a, unique=True, precision=16), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=14), + '-6.310887241768095e-30') + # test min_digits in unique mode with different rounding cases + assert_equal(fsci64('1e120', min_digits=3), '1.000e+120') + assert_equal(fsci64('1e100', min_digits=3), '1.000e+100') + + # test trailing zeros + assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") + assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") + assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") + assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") + assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") + assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") + # gh-10713 + assert_equal(fpos64('324', unique=False, precision=5, + fractional=False), "324.00") + + + def test_dragon4_interface(self): + tps = [np.float16, np.float32, np.float64] + if hasattr(np, 'float128'): + tps.append(np.float128) + + fpos = np.format_float_positional + fsci = np.format_float_scientific + + for tp in tps: + # test padding + assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") + assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") + assert_equal(fpos(tp('-10.2'), + pad_left=4, pad_right=4), " -10.2 ") + + # test exp_digits + assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") + + # test fixed (non-unique) mode + assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") + assert_equal(fsci(tp('1.0'), unique=False, precision=4), + "1.0000e+00") + + # test trimming + # trim of 'k' or '.' only affects non-unique mode, since unique + # mode will not output trailing 0s. + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), + "1.0000") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), + "1.") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), + "1.2" if tp != np.float16 else "1.2002") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), + "1.0") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='0'), "1.0") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), + "1") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='-'), "1") + + @pytest.mark.skipif(not platform.machine().startswith("ppc64"), + reason="only applies to ppc float128 values") + def test_ppc64_ibm_double_double128(self): + # check that the precision decreases once we get into the subnormal + # range. Unlike float64, this starts around 1e-292 instead of 1e-308, + # which happens when the first double is normal and the second is + # subnormal. + x = np.float128('2.123123123123123123123123123123123e-286') + got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)] + expected = [ + "1.06156156156156156156156156156157e-286", + "1.06156156156156156156156156156158e-287", + "1.06156156156156156156156156156159e-288", + "1.0615615615615615615615615615616e-289", + "1.06156156156156156156156156156157e-290", + "1.06156156156156156156156156156156e-291", + "1.0615615615615615615615615615616e-292", + "1.0615615615615615615615615615615e-293", + "1.061561561561561561561561561562e-294", + "1.06156156156156156156156156155e-295", + "1.0615615615615615615615615616e-296", + "1.06156156156156156156156156e-297", + "1.06156156156156156156156157e-298", + "1.0615615615615615615615616e-299", + "1.06156156156156156156156e-300", + "1.06156156156156156156155e-301", + "1.0615615615615615615616e-302", + "1.061561561561561561562e-303", + "1.06156156156156156156e-304", + "1.0615615615615615618e-305", + "1.06156156156156156e-306", + "1.06156156156156157e-307", + "1.0615615615615616e-308", + "1.06156156156156e-309", + "1.06156156156157e-310", + "1.0615615615616e-311", + "1.06156156156e-312", + "1.06156156154e-313", + "1.0615615616e-314", + "1.06156156e-315", + "1.06156155e-316", + "1.061562e-317", + "1.06156e-318", + "1.06155e-319", + "1.0617e-320", + "1.06e-321", + "1.04e-322", + "1e-323", + "0.0", + "0.0"] + assert_equal(got, expected) + + # Note: we follow glibc behavior, but it (or gcc) might not be right. + # In particular we can get two values that print the same but are not + # equal: + a = np.float128('2')/np.float128('3') + b = np.float128(str(a)) + assert_equal(str(a), str(b)) + assert_(a != b) + + def float32_roundtrip(self): + # gh-9360 + x = np.float32(1024 - 2**-14) + y = np.float32(1024 - 2**-13) + assert_(repr(x) != repr(y)) + assert_equal(np.float32(repr(x)), x) + assert_equal(np.float32(repr(y)), y) + + def float64_vs_python(self): + # gh-2643, gh-6136, gh-6908 + assert_equal(repr(np.float64(0.1)), repr(0.1)) + assert_(repr(np.float64(0.20000000000000004)) != repr(0.2)) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_shape_base.py b/MLPY/Lib/site-packages/numpy/core/tests/test_shape_base.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe43d87ba6cdf307cc1b99f60dd1c42ac3e8b33 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_shape_base.py @@ -0,0 +1,762 @@ +import pytest +import numpy as np +from numpy.core import ( + array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack, + newaxis, concatenate, stack + ) +from numpy.core.shape_base import (_block_dispatcher, _block_setup, + _block_concatenate, _block_slicing) +from numpy.testing import ( + assert_, assert_raises, assert_array_equal, assert_equal, + assert_raises_regex, assert_warns, IS_PYPY + ) + + +class TestAtleast1d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1]), array([2])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1, 2]), array([2, 3])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r1array(self): + """ Test to make sure equivalent Travis O's r1array function + """ + assert_(atleast_1d(3).shape == (1,)) + assert_(atleast_1d(3j).shape == (1,)) + assert_(atleast_1d(3.0).shape == (1,)) + assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) + + +class TestAtleast2d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1]]), array([[2]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1, 2]]), array([[2, 3]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r2array(self): + """ Test to make sure equivalent Travis O's r2array function + """ + assert_(atleast_2d(3).shape == (1, 1)) + assert_(atleast_2d([3j, 1]).shape == (1, 2)) + assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) + + +class TestAtleast3d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1]]]), array([[[2]]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1], [2]]]), array([[[2], [3]]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a[:,:, newaxis], b[:,:, newaxis]] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + +class TestHstack: + def test_non_iterable(self): + assert_raises(TypeError, hstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, hstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = hstack([a, b]) + desired = array([[1, 1], [2, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with assert_warns(FutureWarning): + hstack((np.arange(3) for _ in range(2))) + with assert_warns(FutureWarning): + hstack(map(lambda x: x, np.ones((3, 2)))) + + +class TestVstack: + def test_non_iterable(self): + assert_raises(TypeError, vstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, vstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = vstack([a, b]) + desired = array([[1], [2], [1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = array([1, 2]) + b = array([1, 2]) + res = vstack([a, b]) + desired = array([[1, 2], [1, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with assert_warns(FutureWarning): + vstack((np.arange(3) for _ in range(2))) + + +class TestConcatenate: + def test_returns_copy(self): + a = np.eye(3) + b = np.concatenate([a]) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_exceptions(self): + # test axis must be in bounds + for ndim in [1, 2, 3]: + a = np.ones((1,)*ndim) + np.concatenate((a, a), axis=0) # OK + assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim) + assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) + + # Scalars cannot be concatenated + assert_raises(ValueError, concatenate, (0,)) + assert_raises(ValueError, concatenate, (np.array(0),)) + + # dimensionality must match + assert_raises_regex( + ValueError, + r"all the input arrays must have same number of dimensions, but " + r"the array at index 0 has 1 dimension\(s\) and the array at " + r"index 1 has 2 dimension\(s\)", + np.concatenate, (np.zeros(1), np.zeros((1, 1)))) + + # test shapes must match except for concatenation axis + a = np.ones((1, 2, 3)) + b = np.ones((2, 2, 3)) + axis = list(range(3)) + for i in range(3): + np.concatenate((a, b), axis=axis[0]) # OK + assert_raises_regex( + ValueError, + "all the input array dimensions for the concatenation axis " + "must match exactly, but along dimension {}, the array at " + "index 0 has size 1 and the array at index 1 has size 2" + .format(i), + np.concatenate, (a, b), axis=axis[1]) + assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) + a = np.moveaxis(a, -1, 0) + b = np.moveaxis(b, -1, 0) + axis.append(axis.pop(0)) + + # No arrays to concatenate raises ValueError + assert_raises(ValueError, concatenate, ()) + + def test_concatenate_axis_None(self): + a = np.arange(4, dtype=np.float64).reshape((2, 2)) + b = list(range(3)) + c = ['x'] + r = np.concatenate((a, a), axis=None) + assert_equal(r.dtype, a.dtype) + assert_equal(r.ndim, 1) + r = np.concatenate((a, b), axis=None) + assert_equal(r.size, a.size + len(b)) + assert_equal(r.dtype, a.dtype) + r = np.concatenate((a, b, c), axis=None, dtype="U") + d = array(['0.0', '1.0', '2.0', '3.0', + '0', '1', '2', 'x']) + assert_array_equal(r, d) + + out = np.zeros(a.size + len(b)) + r = np.concatenate((a, b), axis=None) + rout = np.concatenate((a, b), axis=None, out=out) + assert_(out is rout) + assert_equal(r, rout) + + def test_large_concatenate_axis_None(self): + # When no axis is given, concatenate uses flattened versions. + # This also had a bug with many arrays (see gh-5979). + x = np.arange(1, 100) + r = np.concatenate(x, None) + assert_array_equal(x, r) + + # This should probably be deprecated: + r = np.concatenate(x, 100) # axis is >= MAXDIMS + assert_array_equal(x, r) + + def test_concatenate(self): + # Test concatenate function + # One sequence returns unmodified (but as array) + r4 = list(range(4)) + assert_array_equal(concatenate((r4,)), r4) + # Any sequence + assert_array_equal(concatenate((tuple(r4),)), r4) + assert_array_equal(concatenate((array(r4),)), r4) + # 1D default concatenation + r3 = list(range(3)) + assert_array_equal(concatenate((r4, r3)), r4 + r3) + # Mixed sequence types + assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3) + assert_array_equal(concatenate((array(r4), r3)), r4 + r3) + # Explicit axis specification + assert_array_equal(concatenate((r4, r3), 0), r4 + r3) + # Including negative + assert_array_equal(concatenate((r4, r3), -1), r4 + r3) + # 2D + a23 = array([[10, 11, 12], [13, 14, 15]]) + a13 = array([[0, 1, 2]]) + res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) + assert_array_equal(concatenate((a23, a13)), res) + assert_array_equal(concatenate((a23, a13), 0), res) + assert_array_equal(concatenate((a23.T, a13.T), 1), res.T) + assert_array_equal(concatenate((a23.T, a13.T), -1), res.T) + # Arrays much match shape + assert_raises(ValueError, concatenate, (a23.T, a13.T), 0) + # 3D + res = arange(2 * 3 * 7).reshape((2, 3, 7)) + a0 = res[..., :4] + a1 = res[..., 4:6] + a2 = res[..., 6:] + assert_array_equal(concatenate((a0, a1, a2), 2), res) + assert_array_equal(concatenate((a0, a1, a2), -1), res) + assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) + + out = res.copy() + rout = concatenate((a0, a1, a2), 2, out=out) + assert_(out is rout) + assert_equal(res, rout) + + @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + def test_operator_concat(self): + import operator + a = array([1, 2]) + b = array([3, 4]) + n = [1,2] + res = array([1, 2, 3, 4]) + assert_raises(TypeError, operator.concat, a, b) + assert_raises(TypeError, operator.concat, a, n) + assert_raises(TypeError, operator.concat, n, a) + assert_raises(TypeError, operator.concat, a, 1) + assert_raises(TypeError, operator.concat, 1, a) + + def test_bad_out_shape(self): + a = array([1, 2]) + b = array([3, 4]) + + assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4))) + concatenate((a, b), out=np.empty(4)) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"]) + @pytest.mark.parametrize("casting", + ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) + def test_out_and_dtype(self, axis, out_dtype, casting): + # Compare usage of `out=out` with `dtype=out.dtype` + out = np.empty(4, dtype=out_dtype) + to_concat = (array([1.1, 2.2]), array([3.3, 4.4])) + + if not np.can_cast(to_concat[0], out_dtype, casting=casting): + with assert_raises(TypeError): + concatenate(to_concat, out=out, axis=axis, casting=casting) + with assert_raises(TypeError): + concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + else: + res_out = concatenate(to_concat, out=out, + axis=axis, casting=casting) + res_dtype = concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + assert res_out is out + assert_array_equal(out, res_dtype) + assert res_dtype.dtype == out_dtype + + with assert_raises(TypeError): + concatenate(to_concat, out=out, dtype=out_dtype, axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"]) + @pytest.mark.parametrize("arrs", + [([0.],), ([0.], [1]), ([0], ["string"], [1.])]) + def test_dtype_with_promotion(self, arrs, string_dt, axis): + # Note that U0 and S0 should be deprecated eventually and changed to + # actually give the empty string result (together with `np.array`) + res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe") + # The actual dtype should be identical to a cast (of a double array): + assert res.dtype == np.array(1.).astype(string_dt).dtype + + @pytest.mark.parametrize("axis", [None, 0]) + def test_string_dtype_does_not_inspect(self, axis): + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="S", axis=axis) + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="U", axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + def test_subarray_error(self, axis): + with pytest.raises(TypeError, match=".*subarray dtype"): + np.concatenate(([1], [1]), dtype="(2,)i", axis=axis) + + +def test_stack(): + # non-iterable input + assert_raises(TypeError, stack, 1) + + # 0d input + for input_ in [(1, 2, 3), + [np.int32(1), np.int32(2), np.int32(3)], + [np.array(1), np.array(2), np.array(3)]]: + assert_array_equal(stack(input_), [1, 2, 3]) + # 1d input examples + a = np.array([1, 2, 3]) + b = np.array([4, 5, 6]) + r1 = array([[1, 2, 3], [4, 5, 6]]) + assert_array_equal(np.stack((a, b)), r1) + assert_array_equal(np.stack((a, b), axis=1), r1.T) + # all input types + assert_array_equal(np.stack(list([a, b])), r1) + assert_array_equal(np.stack(array([a, b])), r1) + # all shapes for 1d input + arrays = [np.random.randn(3) for _ in range(10)] + axes = [0, 1, -1, -2] + expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2) + assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3) + # all shapes for 2d input + arrays = [np.random.randn(3, 4) for _ in range(10)] + axes = [0, 1, 2, -1, -2, -3] + expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10), + (3, 4, 10), (3, 10, 4), (10, 3, 4)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + # empty arrays + assert_(stack([[], [], []]).shape == (3, 0)) + assert_(stack([[], [], []], axis=1).shape == (0, 3)) + # out + out = np.zeros_like(r1) + np.stack((a, b), out=out) + assert_array_equal(out, r1) + # edge cases + assert_raises_regex(ValueError, 'need at least one array', stack, []) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [1, np.arange(3)]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.zeros((3, 3)), np.zeros(3)], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(2), np.arange(3)]) + # generator is deprecated + with assert_warns(FutureWarning): + result = stack((x for x in range(3))) + assert_array_equal(result, np.array([0, 1, 2])) + + +class TestBlock: + @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing']) + def block(self, request): + # blocking small arrays and large arrays go through different paths. + # the algorithm is triggered depending on the number of element + # copies required. + # We define a test fixture that forces most tests to go through + # both code paths. + # Ultimately, this should be removed if a single algorithm is found + # to be faster for both small and large arrays. + def _block_force_concatenate(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_concatenate(arrays, list_ndim, result_ndim) + + def _block_force_slicing(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_slicing(arrays, list_ndim, result_ndim) + + if request.param == 'force_concatenate': + return _block_force_concatenate + elif request.param == 'force_slicing': + return _block_force_slicing + elif request.param == 'block': + return block + else: + raise ValueError('Unknown blocking request. There is a typo in the tests.') + + def test_returns_copy(self, block): + a = np.eye(3) + b = block(a) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_block_total_size_estimate(self, block): + _, _, _, total_size = _block_setup([1]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1]]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1, 1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1], [1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1, 2], [3, 4]]) + assert total_size == 4 + + def test_block_simple_row_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + desired = np.array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + result = block([a_2d, b_2d]) + assert_equal(desired, result) + + def test_block_simple_column_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + expected = np.array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + result = block([[a_2d], [b_2d]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_row_wise(self, block): + # # # 1-D vectors are treated as row arrays + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([1, 2, 3, 2, 3, 4]) + result = block([a, b]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_multiple_rows(self, block): + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3, 2, 3, 4], + [1, 2, 3, 2, 3, 4]]) + result = block([[a, b], [a, b]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_column_wise(self, block): + # # # 1-D vectors are treated as row arrays + a_1d = np.array([1, 2, 3]) + b_1d = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3], + [2, 3, 4]]) + result = block([[a_1d], [b_1d]]) + assert_equal(expected, result) + + def test_block_mixed_1d_and_2d(self, block): + a_2d = np.ones((2, 2)) + b_1d = np.array([2, 2]) + result = block([[a_2d], [b_1d]]) + expected = np.array([[1, 1], + [1, 1], + [2, 2]]) + assert_equal(expected, result) + + def test_block_complicated(self, block): + # a bit more complicated + one_2d = np.array([[1, 1, 1]]) + two_2d = np.array([[2, 2, 2]]) + three_2d = np.array([[3, 3, 3, 3, 3, 3]]) + four_1d = np.array([4, 4, 4, 4, 4, 4]) + five_0d = np.array(5) + six_1d = np.array([6, 6, 6, 6, 6]) + zero_2d = np.zeros((2, 6)) + + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 3, 3, 3], + [4, 4, 4, 4, 4, 4], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + result = block([[one_2d, two_2d], + [three_2d], + [four_1d], + [five_0d, six_1d], + [zero_2d]]) + assert_equal(result, expected) + + def test_nested(self, block): + one = np.array([1, 1, 1]) + two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) + three = np.array([3, 3, 3]) + four = np.array([4, 4, 4]) + five = np.array(5) + six = np.array([6, 6, 6, 6, 6]) + zero = np.zeros((2, 6)) + + result = block([ + [ + block([ + [one], + [three], + [four] + ]), + two + ], + [five, six], + [zero] + ]) + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 2, 2, 2], + [4, 4, 4, 2, 2, 2], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + assert_equal(result, expected) + + def test_3d(self, block): + a000 = np.ones((2, 2, 2), int) * 1 + + a100 = np.ones((3, 2, 2), int) * 2 + a010 = np.ones((2, 3, 2), int) * 3 + a001 = np.ones((2, 2, 3), int) * 4 + + a011 = np.ones((2, 3, 3), int) * 5 + a101 = np.ones((3, 2, 3), int) * 6 + a110 = np.ones((3, 3, 2), int) * 7 + + a111 = np.ones((3, 3, 3), int) * 8 + + result = block([ + [ + [a000, a001], + [a010, a011], + ], + [ + [a100, a101], + [a110, a111], + ] + ]) + expected = array([[[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]]]) + + assert_array_equal(result, expected) + + def test_block_with_mismatched_shape(self, block): + a = np.array([0, 0]) + b = np.eye(2) + assert_raises(ValueError, block, [a, b]) + assert_raises(ValueError, block, [b, a]) + + to_block = [[np.ones((2,3)), np.ones((2,2))], + [np.ones((2,2)), np.ones((2,2))]] + assert_raises(ValueError, block, to_block) + def test_no_lists(self, block): + assert_equal(block(1), np.array(1)) + assert_equal(block(np.eye(3)), np.eye(3)) + + def test_invalid_nesting(self, block): + msg = 'depths are mismatched' + assert_raises_regex(ValueError, msg, block, [1, [2]]) + assert_raises_regex(ValueError, msg, block, [1, []]) + assert_raises_regex(ValueError, msg, block, [[1], 2]) + assert_raises_regex(ValueError, msg, block, [[], 2]) + assert_raises_regex(ValueError, msg, block, [ + [[1], [2]], + [[3, 4]], + [5] # missing brackets + ]) + + def test_empty_lists(self, block): + assert_raises_regex(ValueError, 'empty', block, []) + assert_raises_regex(ValueError, 'empty', block, [[]]) + assert_raises_regex(ValueError, 'empty', block, [[1], []]) + + def test_tuple(self, block): + assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4])) + assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)]) + + def test_different_ndims(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 1, 3)) + + result = block([a, b, c]) + expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_different_ndims_depths(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 2, 3)) + + result = block([[a, b], [c]]) + expected = np.array([[[1., 2., 2.], + [3., 3., 3.], + [3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_block_memory_order(self, block): + # 3D + arr_c = np.zeros((3,)*3, order='C') + arr_f = np.zeros((3,)*3, order='F') + + b_c = [[[arr_c, arr_c], + [arr_c, arr_c]], + [[arr_c, arr_c], + [arr_c, arr_c]]] + + b_f = [[[arr_f, arr_f], + [arr_f, arr_f]], + [[arr_f, arr_f], + [arr_f, arr_f]]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + arr_c = np.zeros((3, 3), order='C') + arr_f = np.zeros((3, 3), order='F') + # 2D + b_c = [[arr_c, arr_c], + [arr_c, arr_c]] + + b_f = [[arr_f, arr_f], + [arr_f, arr_f]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + +def test_block_dispatcher(): + class ArrayLike: + pass + a = ArrayLike() + b = ArrayLike() + c = ArrayLike() + assert_equal(list(_block_dispatcher(a)), [a]) + assert_equal(list(_block_dispatcher([a])), [a]) + assert_equal(list(_block_dispatcher([a, b])), [a, b]) + assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c]) + # don't recurse into non-lists + assert_equal(list(_block_dispatcher((a, b))), [(a, b)]) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_simd.py b/MLPY/Lib/site-packages/numpy/core/tests/test_simd.py new file mode 100644 index 0000000000000000000000000000000000000000..4de54771030c2e8bf705620f22b81e4de32a525e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_simd.py @@ -0,0 +1,975 @@ +# NOTE: Please avoid the use of numpy.testing since NPYV intrinsics +# may be involved in their functionality. +import pytest, math, re +import itertools +from numpy.core._simd import targets +from numpy.core._multiarray_umath import __cpu_baseline__ + +class _Test_Utility: + # submodule of the desired SIMD extension, e.g. targets["AVX512F"] + npyv = None + # the current data type suffix e.g. 's8' + sfx = None + # target name can be 'baseline' or one or more of CPU features + target_name = None + + def __getattr__(self, attr): + """ + To call NPV intrinsics without the attribute 'npyv' and + auto suffixing intrinsics according to class attribute 'sfx' + """ + return getattr(self.npyv, attr + "_" + self.sfx) + + def _data(self, start=None, count=None, reverse=False): + """ + Create list of consecutive numbers according to number of vector's lanes. + """ + if start is None: + start = 1 + if count is None: + count = self.nlanes + rng = range(start, start + count) + if reverse: + rng = reversed(rng) + if self._is_fp(): + return [x / 1.0 for x in rng] + return list(rng) + + def _is_unsigned(self): + return self.sfx[0] == 'u' + + def _is_signed(self): + return self.sfx[0] == 's' + + def _is_fp(self): + return self.sfx[0] == 'f' + + def _scalar_size(self): + return int(self.sfx[1:]) + + def _int_clip(self, seq): + if self._is_fp(): + return seq + max_int = self._int_max() + min_int = self._int_min() + return [min(max(v, min_int), max_int) for v in seq] + + def _int_max(self): + if self._is_fp(): + return None + max_u = self._to_unsigned(self.setall(-1))[0] + if self._is_signed(): + return max_u // 2 + return max_u + + def _int_min(self): + if self._is_fp(): + return None + if self._is_unsigned(): + return 0 + return -(self._int_max() + 1) + + def _true_mask(self): + max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1) + return max_unsig[0] + + def _to_unsigned(self, vector): + if isinstance(vector, (list, tuple)): + return getattr(self.npyv, "load_u" + self.sfx[1:])(vector) + else: + sfx = vector.__name__.replace("npyv_", "") + if sfx[0] == "b": + cvt_intrin = "cvt_u{0}_b{0}" + else: + cvt_intrin = "reinterpret_u{0}_{1}" + return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector) + + def _pinfinity(self): + v = self.npyv.setall_u32(0x7f800000) + return self.npyv.reinterpret_f32_u32(v)[0] + + def _ninfinity(self): + v = self.npyv.setall_u32(0xff800000) + return self.npyv.reinterpret_f32_u32(v)[0] + + def _nan(self): + v = self.npyv.setall_u32(0x7fc00000) + return self.npyv.reinterpret_f32_u32(v)[0] + + def _cpu_features(self): + target = self.target_name + if target == "baseline": + target = __cpu_baseline__ + else: + target = target.split('__') # multi-target separator + return ' '.join(target) + +class _SIMD_BOOL(_Test_Utility): + """ + To test all boolean vector types at once + """ + def _data(self, start=None, count=None, reverse=False): + nlanes = getattr(self.npyv, "nlanes_u" + self.sfx[1:]) + true_mask = self._true_mask() + rng = range(nlanes) + if reverse: + rng = reversed(rng) + return [true_mask if x % 2 else 0 for x in rng] + + def _load_b(self, data): + len_str = self.sfx[1:] + load = getattr(self.npyv, "load_u" + len_str) + cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}") + return cvt(load(data)) + + def test_operators_logical(self): + """ + Logical operations for boolean types. + Test intrinsics: + npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX + """ + data_a = self._data() + data_b = self._data(reverse=True) + vdata_a = self._load_b(data_a) + vdata_b = self._load_b(data_b) + + data_and = [a & b for a, b in zip(data_a, data_b)] + vand = getattr(self, "and")(vdata_a, vdata_b) + assert vand == data_and + + data_or = [a | b for a, b in zip(data_a, data_b)] + vor = getattr(self, "or")(vdata_a, vdata_b) + assert vor == data_or + + data_xor = [a ^ b for a, b in zip(data_a, data_b)] + vxor = getattr(self, "xor")(vdata_a, vdata_b) + assert vxor == data_xor + + vnot = getattr(self, "not")(vdata_a) + assert vnot == data_b + + def test_tobits(self): + data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)]) + for data in (self._data(), self._data(reverse=True)): + vdata = self._load_b(data) + data_bits = data2bits(data) + tobits = bin(self.tobits(vdata)) + assert tobits == bin(data_bits) + +class _SIMD_INT(_Test_Utility): + """ + To test all integer vector types at once + """ + def test_operators_shift(self): + if self.sfx in ("u8", "s8"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + for count in range(self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift + shl = self.shl(vdata_a, count) + assert shl == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift + shr = self.shr(vdata_a, count) + assert shr == data_shr_a + + # shift by zero or max or out-range immediate constant is not applicable and illogical + for count in range(1, self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift by an immediate constant + shli = self.shli(vdata_a, count) + assert shli == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift by an immediate constant + shri = self.shri(vdata_a, count) + assert shri == data_shr_a + + def test_arithmetic_subadd_saturated(self): + if self.sfx in ("u32", "s32", "u64", "s64"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)]) + adds = self.adds(vdata_a, vdata_b) + assert adds == data_adds + + data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)]) + subs = self.subs(vdata_a, vdata_b) + assert subs == data_subs + + def test_math_max_min(self): + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_max = [max(a, b) for a, b in zip(data_a, data_b)] + simd_max = self.max(vdata_a, vdata_b) + assert simd_max == data_max + + data_min = [min(a, b) for a, b in zip(data_a, data_b)] + simd_min = self.min(vdata_a, vdata_b) + assert simd_min == data_min + +class _SIMD_FP32(_Test_Utility): + """ + To only test single precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + features = self._cpu_features() + if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features): + # very costly to emulate nearest even on Armv7 + # instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1 + _round = lambda v: int(v + (0.5 if v >= 0 else -0.5)) + else: + _round = round + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + data_round = [_round(x) for x in vdata_a] + vround = self.round_s32(vdata_a) + assert vround == data_round + +class _SIMD_FP64(_Test_Utility): + """ + To only test double precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + vdata_b = self.mul(vdata_a, self.setall(-1.5)) + data_round = [round(x) for x in list(vdata_a) + list(vdata_b)] + vround = self.round_s32(vdata_a, vdata_b) + assert vround == data_round + +class _SIMD_FP(_Test_Utility): + """ + To test all float vector types at once + """ + def test_arithmetic_fused(self): + vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3 + vdata_cx2 = self.add(vdata_c, vdata_c) + # multiply and add, a*b + c + data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)]) + fma = self.muladd(vdata_a, vdata_b, vdata_c) + assert fma == data_fma + # multiply and subtract, a*b - c + fms = self.mulsub(vdata_a, vdata_b, vdata_c) + data_fms = self.sub(data_fma, vdata_cx2) + assert fms == data_fms + # negate multiply and add, -(a*b) + c + nfma = self.nmuladd(vdata_a, vdata_b, vdata_c) + data_nfma = self.sub(vdata_cx2, data_fma) + assert nfma == data_nfma + # negate multiply and subtract, -(a*b) - c + nfms = self.nmulsub(vdata_a, vdata_b, vdata_c) + data_nfms = self.mul(data_fma, self.setall(-1)) + assert nfms == data_nfms + + def test_abs(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan)) + for case, desired in abs_cases: + data_abs = [desired]*self.nlanes + vabs = self.abs(self.setall(case)) + assert vabs == pytest.approx(data_abs, nan_ok=True) + + vabs = self.abs(self.mul(vdata, self.setall(-1))) + assert vabs == data + + def test_sqrt(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf)) + for case, desired in sqrt_cases: + data_sqrt = [desired]*self.nlanes + sqrt = self.sqrt(self.setall(case)) + assert sqrt == pytest.approx(data_sqrt, nan_ok=True) + + data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + sqrt = self.sqrt(vdata) + assert sqrt == data_sqrt + + def test_square(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + # square + square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf)) + for case, desired in square_cases: + data_square = [desired]*self.nlanes + square = self.square(self.setall(case)) + assert square == pytest.approx(data_square, nan_ok=True) + + data_square = [x*x for x in data] + square = self.square(vdata) + assert square == data_square + + def test_max(self): + """ + Test intrinsics: + npyv_max_##SFX + npyv_maxp_##SFX + """ + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + data_max = [max(a, b) for a, b in zip(data_a, data_b)] + _max = self.max(vdata_a, vdata_b) + assert _max == data_max + maxp = self.maxp(vdata_a, vdata_b) + assert maxp == data_max + # test IEEE standards + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + max_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10), + (pinf, pinf, pinf), (pinf, 10, pinf), (10, pinf, pinf), + (ninf, ninf, ninf), (ninf, 10, 10), (10, ninf, 10), + (10, 0, 10), (10, -10, 10)) + for case_operand1, case_operand2, desired in max_cases: + data_max = [desired]*self.nlanes + vdata_a = self.setall(case_operand1) + vdata_b = self.setall(case_operand2) + maxp = self.maxp(vdata_a, vdata_b) + assert maxp == pytest.approx(data_max, nan_ok=True) + if nan in (case_operand1, case_operand2, desired): + continue + _max = self.max(vdata_a, vdata_b) + assert _max == data_max + + def test_min(self): + """ + Test intrinsics: + npyv_min_##SFX + npyv_minp_##SFX + """ + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + data_min = [min(a, b) for a, b in zip(data_a, data_b)] + _min = self.min(vdata_a, vdata_b) + assert _min == data_min + minp = self.minp(vdata_a, vdata_b) + assert minp == data_min + # test IEEE standards + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + min_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10), + (pinf, pinf, pinf), (pinf, 10, 10), (10, pinf, 10), + (ninf, ninf, ninf), (ninf, 10, ninf), (10, ninf, ninf), + (10, 0, 0), (10, -10, -10)) + for case_operand1, case_operand2, desired in min_cases: + data_min = [desired]*self.nlanes + vdata_a = self.setall(case_operand1) + vdata_b = self.setall(case_operand2) + minp = self.minp(vdata_a, vdata_b) + assert minp == pytest.approx(data_min, nan_ok=True) + if nan in (case_operand1, case_operand2, desired): + continue + _min = self.min(vdata_a, vdata_b) + assert _min == data_min + + def test_reciprocal(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf)) + for case, desired in recip_cases: + data_recip = [desired]*self.nlanes + recip = self.recip(self.setall(case)) + assert recip == pytest.approx(data_recip, nan_ok=True) + + data_recip = self.load([1/x for x in data]) # load to truncate precision + recip = self.recip(vdata) + assert recip == data_recip + + def test_special_cases(self): + """ + Compare Not NaN. Test intrinsics: + npyv_notnan_##SFX + """ + nnan = self.notnan(self.setall(self._nan())) + assert nnan == [0]*self.nlanes + +class _SIMD_ALL(_Test_Utility): + """ + To test all vector types at once + """ + def test_memory_load(self): + data = self._data() + # unaligned load + load_data = self.load(data) + assert load_data == data + # aligned load + loada_data = self.loada(data) + assert loada_data == data + # stream load + loads_data = self.loads(data) + assert loads_data == data + # load lower part + loadl = self.loadl(data) + loadl_half = list(loadl)[:self.nlanes//2] + data_half = data[:self.nlanes//2] + assert loadl_half == data_half + assert loadl != data # detect overflow + + def test_memory_store(self): + data = self._data() + vdata = self.load(data) + # unaligned store + store = [0] * self.nlanes + self.store(store, vdata) + assert store == data + # aligned store + store_a = [0] * self.nlanes + self.storea(store_a, vdata) + assert store_a == data + # stream store + store_s = [0] * self.nlanes + self.stores(store_s, vdata) + assert store_s == data + # store lower part + store_l = [0] * self.nlanes + self.storel(store_l, vdata) + assert store_l[:self.nlanes//2] == data[:self.nlanes//2] + assert store_l != vdata # detect overflow + # store higher part + store_h = [0] * self.nlanes + self.storeh(store_h, vdata) + assert store_h[:self.nlanes//2] == data[self.nlanes//2:] + assert store_h != vdata # detect overflow + + def test_memory_partial_load(self): + if self.sfx in ("u8", "s8", "u16", "s16"): + return + + data = self._data() + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] # test out of range + for n in lanes: + load_till = self.load_till(data, n, 15) + data_till = data[:n] + [15] * (self.nlanes-n) + assert load_till == data_till + load_tillz = self.load_tillz(data, n) + data_tillz = data[:n] + [0] * (self.nlanes-n) + assert load_tillz == data_tillz + + def test_memory_partial_store(self): + if self.sfx in ("u8", "s8", "u16", "s16"): + return + + data = self._data() + data_rev = self._data(reverse=True) + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for n in lanes: + data_till = data_rev.copy() + data_till[:n] = data[:n] + store_till = self._data(reverse=True) + self.store_till(store_till, n, vdata) + assert store_till == data_till + + def test_memory_noncont_load(self): + if self.sfx in ("u8", "s8", "u16", "s16"): + return + + for stride in range(1, 64): + data = self._data(count=stride*self.nlanes) + data_stride = data[::stride] + loadn = self.loadn(data, stride) + assert loadn == data_stride + + for stride in range(-64, 0): + data = self._data(stride, -stride*self.nlanes) + data_stride = self.load(data[::stride]) # cast unsigned + loadn = self.loadn(data, stride) + assert loadn == data_stride + + def test_memory_noncont_partial_load(self): + if self.sfx in ("u8", "s8", "u16", "s16"): + return + + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for stride in range(1, 64): + data = self._data(count=stride*self.nlanes) + data_stride = data[::stride] + for n in lanes: + data_stride_till = data_stride[:n] + [15] * (self.nlanes-n) + loadn_till = self.loadn_till(data, stride, n, 15) + assert loadn_till == data_stride_till + data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n) + loadn_tillz = self.loadn_tillz(data, stride, n) + assert loadn_tillz == data_stride_tillz + + for stride in range(-64, 0): + data = self._data(stride, -stride*self.nlanes) + data_stride = list(self.load(data[::stride])) # cast unsigned + for n in lanes: + data_stride_till = data_stride[:n] + [15] * (self.nlanes-n) + loadn_till = self.loadn_till(data, stride, n, 15) + assert loadn_till == data_stride_till + data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n) + loadn_tillz = self.loadn_tillz(data, stride, n) + assert loadn_tillz == data_stride_tillz + + def test_memory_noncont_store(self): + if self.sfx in ("u8", "s8", "u16", "s16"): + return + + vdata = self.load(self._data()) + for stride in range(1, 64): + data = [15] * stride * self.nlanes + data[::stride] = vdata + storen = [15] * stride * self.nlanes + storen += [127]*64 + self.storen(storen, stride, vdata) + assert storen[:-64] == data + assert storen[-64:] == [127]*64 # detect overflow + + for stride in range(-64, 0): + data = [15] * -stride * self.nlanes + data[::stride] = vdata + storen = [127]*64 + storen += [15] * -stride * self.nlanes + self.storen(storen, stride, vdata) + assert storen[64:] == data + assert storen[:64] == [127]*64 # detect overflow + + def test_memory_noncont_partial_store(self): + if self.sfx in ("u8", "s8", "u16", "s16"): + return + + data = self._data() + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for stride in range(1, 64): + for n in lanes: + data_till = [15] * stride * self.nlanes + data_till[::stride] = data[:n] + [15] * (self.nlanes-n) + storen_till = [15] * stride * self.nlanes + storen_till += [127]*64 + self.storen_till(storen_till, stride, n, vdata) + assert storen_till[:-64] == data_till + assert storen_till[-64:] == [127]*64 # detect overflow + + for stride in range(-64, 0): + for n in lanes: + data_till = [15] * -stride * self.nlanes + data_till[::stride] = data[:n] + [15] * (self.nlanes-n) + storen_till = [127]*64 + storen_till += [15] * -stride * self.nlanes + self.storen_till(storen_till, stride, n, vdata) + assert storen_till[64:] == data_till + assert storen_till[:64] == [127]*64 # detect overflow + + def test_misc(self): + broadcast_zero = self.zero() + assert broadcast_zero == [0] * self.nlanes + for i in range(1, 10): + broadcasti = self.setall(i) + assert broadcasti == [i] * self.nlanes + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # py level of npyv_set_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with zero. + vset = self.set(*data_a) + assert vset == data_a + # py level of npyv_setf_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with the specified scalar. + vsetf = self.setf(10, *data_a) + assert vsetf == data_a + + # We're testing the sanity of _simd's type-vector, + # reinterpret* intrinsics itself are tested via compiler + # during the build of _simd module + sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64", "f32"] + if self.npyv.simd_f64: + sfxes.append("f64") + for sfx in sfxes: + vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__ + assert vec_name == "npyv_" + sfx + + # select & mask operations + select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_a == data_a + select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_b == data_b + + # cleanup intrinsic is only used with AVX for + # zeroing registers to avoid the AVX-SSE transition penalty, + # so nothing to test here + self.npyv.cleanup() + + def test_reorder(self): + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + # lower half part + data_a_lo = data_a[:self.nlanes//2] + data_b_lo = data_b[:self.nlanes//2] + # higher half part + data_a_hi = data_a[self.nlanes//2:] + data_b_hi = data_b[self.nlanes//2:] + # combine two lower parts + combinel = self.combinel(vdata_a, vdata_b) + assert combinel == data_a_lo + data_b_lo + # combine two higher parts + combineh = self.combineh(vdata_a, vdata_b) + assert combineh == data_a_hi + data_b_hi + # combine x2 + combine = self.combine(vdata_a, vdata_b) + assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi) + # zip(interleave) + data_zipl = [v for p in zip(data_a_lo, data_b_lo) for v in p] + data_ziph = [v for p in zip(data_a_hi, data_b_hi) for v in p] + vzip = self.zip(vdata_a, vdata_b) + assert vzip == (data_zipl, data_ziph) + + def test_reorder_rev64(self): + # Reverse elements of each 64-bit lane + ssize = self._scalar_size() + if ssize == 64: + return + data_rev64 = [ + y for x in range(0, self.nlanes, 64//ssize) + for y in reversed(range(x, x + 64//ssize)) + ] + rev64 = self.rev64(self.load(range(self.nlanes))) + assert rev64 == data_rev64 + + def test_operators_comparison(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + mask_true = self._true_mask() + def to_bool(vector): + return [lane == mask_true for lane in vector] + # equal + data_eq = [a == b for a, b in zip(data_a, data_b)] + cmpeq = to_bool(self.cmpeq(vdata_a, vdata_b)) + assert cmpeq == data_eq + # not equal + data_neq = [a != b for a, b in zip(data_a, data_b)] + cmpneq = to_bool(self.cmpneq(vdata_a, vdata_b)) + assert cmpneq == data_neq + # greater than + data_gt = [a > b for a, b in zip(data_a, data_b)] + cmpgt = to_bool(self.cmpgt(vdata_a, vdata_b)) + assert cmpgt == data_gt + # greater than and equal + data_ge = [a >= b for a, b in zip(data_a, data_b)] + cmpge = to_bool(self.cmpge(vdata_a, vdata_b)) + assert cmpge == data_ge + # less than + data_lt = [a < b for a, b in zip(data_a, data_b)] + cmplt = to_bool(self.cmplt(vdata_a, vdata_b)) + assert cmplt == data_lt + # less than and equal + data_le = [a <= b for a, b in zip(data_a, data_b)] + cmple = to_bool(self.cmple(vdata_a, vdata_b)) + assert cmple == data_le + + def test_operators_logical(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + if self._is_fp(): + data_cast_a = self._to_unsigned(vdata_a) + data_cast_b = self._to_unsigned(vdata_b) + cast, cast_data = self._to_unsigned, self._to_unsigned + else: + data_cast_a, data_cast_b = data_a, data_b + cast, cast_data = lambda a: a, self.load + + data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)]) + vxor = cast(self.xor(vdata_a, vdata_b)) + assert vxor == data_xor + + data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)]) + vor = cast(getattr(self, "or")(vdata_a, vdata_b)) + assert vor == data_or + + data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)]) + vand = cast(getattr(self, "and")(vdata_a, vdata_b)) + assert vand == data_and + + data_not = cast_data([~a for a in data_cast_a]) + vnot = cast(getattr(self, "not")(vdata_a)) + assert vnot == data_not + + def test_conversion_boolean(self): + bsfx = "b" + self.sfx[1:] + to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx)) + from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx)) + + false_vb = to_boolean(self.setall(0)) + true_vb = self.cmpeq(self.setall(0), self.setall(0)) + assert false_vb != true_vb + + false_vsfx = from_boolean(false_vb) + true_vsfx = from_boolean(true_vb) + assert false_vsfx != true_vsfx + + def test_conversion_expand(self): + """ + Test expand intrinsics: + npyv_expand_u16_u8 + npyv_expand_u32_u16 + """ + if self.sfx not in ("u8", "u16"): + return + totype = self.sfx[0]+str(int(self.sfx[1:])*2) + expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}") + # close enough from the edge to detect any deviation + data = self._data(self._int_max() - self.nlanes) + vdata = self.load(data) + edata = expand(vdata) + # lower half part + data_lo = data[:self.nlanes//2] + # higher half part + data_hi = data[self.nlanes//2:] + assert edata == (data_lo, data_hi) + + def test_arithmetic_subadd(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # non-saturated + data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast + add = self.add(vdata_a, vdata_b) + assert add == data_add + data_sub = self.load([a - b for a, b in zip(data_a, data_b)]) + sub = self.sub(vdata_a, vdata_b) + assert sub == data_sub + + def test_arithmetic_mul(self): + if self.sfx in ("u64", "s64"): + return + + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_mul = self.load([a * b for a, b in zip(data_a, data_b)]) + mul = self.mul(vdata_a, vdata_b) + assert mul == data_mul + + def test_arithmetic_div(self): + if not self._is_fp(): + return + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # load to truncate f64 to precision of f32 + data_div = self.load([a / b for a, b in zip(data_a, data_b)]) + div = self.div(vdata_a, vdata_b) + assert div == data_div + + def test_arithmetic_intdiv(self): + """ + Test integer division intrinsics: + npyv_divisor_##sfx + npyv_divc_##sfx + """ + if self._is_fp(): + return + + def trunc_div(a, d): + """ + Divide towards zero works with large integers > 2^53, + and wrap around overflow similar to what C does. + """ + if d == -1 and a == int_min: + return a + sign_a, sign_d = a < 0, d < 0 + if a == 0 or sign_a == sign_d: + return a // d + return (a + sign_d - sign_a) // d + 1 + + int_min = self._int_min() if self._is_signed() else 1 + int_max = self._int_max() + rdata = ( + 0, 1, self.nlanes, int_max-self.nlanes, + int_min, int_min//2 + 1 + ) + divisors = (1, 2, 9, 13, self.nlanes, int_min, int_max, int_max//2) + + for x, d in itertools.product(rdata, divisors): + data = self._data(x) + vdata = self.load(data) + data_divc = [trunc_div(a, d) for a in data] + divisor = self.divisor(d) + divc = self.divc(vdata, divisor) + assert divc == data_divc + + if not self._is_signed(): + return + + safe_neg = lambda x: -x-1 if -x > int_max else -x + # test round divison for signed integers + for x, d in itertools.product(rdata, divisors): + d_neg = safe_neg(d) + data = self._data(x) + data_neg = [safe_neg(a) for a in data] + vdata = self.load(data) + vdata_neg = self.load(data_neg) + divisor = self.divisor(d) + divisor_neg = self.divisor(d_neg) + + # round towards zero + data_divc = [trunc_div(a, d_neg) for a in data] + divc = self.divc(vdata, divisor_neg) + assert divc == data_divc + data_divc = [trunc_div(a, d) for a in data_neg] + divc = self.divc(vdata_neg, divisor) + assert divc == data_divc + + # test truncate sign if the dividend is zero + vzero = self.zero() + for d in (-1, -10, -100, int_min//2, int_min): + divisor = self.divisor(d) + divc = self.divc(vzero, divisor) + assert divc == vzero + + # test overflow + vmin = self.setall(int_min) + divisor = self.divisor(-1) + divc = self.divc(vmin, divisor) + assert divc == vmin + + def test_arithmetic_reduce_sum(self): + """ + Test reduce sum intrinsics: + npyv_sum_##sfx + """ + if self.sfx not in ("u32", "u64", "f32", "f64"): + return + # reduce sum + data = self._data() + vdata = self.load(data) + + data_sum = sum(data) + vsum = self.sum(vdata) + assert vsum == data_sum + + def test_arithmetic_reduce_sumup(self): + """ + Test extend reduce sum intrinsics: + npyv_sumup_##sfx + """ + if self.sfx not in ("u8", "u16"): + return + rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes) + for r in rdata: + data = self._data(r) + vdata = self.load(data) + data_sum = sum(data) + vsum = self.sumup(vdata) + assert vsum == data_sum + + def test_mask_conditional(self): + """ + Conditional addition and subtraction for all supported data types. + Test intrinsics: + npyv_ifadd_##SFX, npyv_ifsub_##SFX + """ + vdata_a = self.load(self._data()) + vdata_b = self.load(self._data(reverse=True)) + true_mask = self.cmpeq(self.zero(), self.zero()) + false_mask = self.cmpneq(self.zero(), self.zero()) + + data_sub = self.sub(vdata_b, vdata_a) + ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b) + assert ifsub == data_sub + ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b) + assert ifsub == vdata_b + + data_add = self.add(vdata_b, vdata_a) + ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b) + assert ifadd == data_add + ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b) + assert ifadd == vdata_b + +bool_sfx = ("b8", "b16", "b32", "b64") +int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64") +fp_sfx = ("f32", "f64") +all_sfx = int_sfx + fp_sfx +tests_registry = { + bool_sfx: _SIMD_BOOL, + int_sfx : _SIMD_INT, + fp_sfx : _SIMD_FP, + ("f32",): _SIMD_FP32, + ("f64",): _SIMD_FP64, + all_sfx : _SIMD_ALL +} +for target_name, npyv in targets.items(): + simd_width = npyv.simd if npyv else '' + pretty_name = target_name.split('__') # multi-target separator + if len(pretty_name) > 1: + # multi-target + pretty_name = f"({' '.join(pretty_name)})" + else: + pretty_name = pretty_name[0] + + skip = "" + skip_sfx = dict() + if not npyv: + skip = f"target '{pretty_name}' isn't supported by current machine" + elif not npyv.simd: + skip = f"target '{pretty_name}' isn't supported by NPYV" + elif not npyv.simd_f64: + skip_sfx["f64"] = f"target '{pretty_name}' doesn't support double-precision" + + for sfxes, cls in tests_registry.items(): + for sfx in sfxes: + skip_m = skip_sfx.get(sfx, skip) + inhr = (cls,) + attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name) + tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + if skip_m: + pytest.mark.skip(reason=skip_m)(tcls) + globals()[tcls.__name__] = tcls diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_simd_module.py b/MLPY/Lib/site-packages/numpy/core/tests/test_simd_module.py new file mode 100644 index 0000000000000000000000000000000000000000..4b7e65c83e7c98028e9ad3403007d850988583d1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_simd_module.py @@ -0,0 +1,97 @@ +import pytest +from numpy.core._simd import targets +""" +This testing unit only for checking the sanity of common functionality, +therefore all we need is just to take one submodule that represents any +of enabled SIMD extensions to run the test on it and the second submodule +required to run only one check related to the possibility of mixing +the data types among each submodule. +""" +npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd] +npyv, npyv2 = (npyvs + [None, None])[:2] + +unsigned_sfx = ["u8", "u16", "u32", "u64"] +signed_sfx = ["s8", "s16", "s32", "s64"] +fp_sfx = ["f32"] +if npyv and npyv.simd_f64: + fp_sfx.append("f64") + +int_sfx = unsigned_sfx + signed_sfx +all_sfx = unsigned_sfx + int_sfx + +@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +class Test_SIMD_MODULE: + + @pytest.mark.parametrize('sfx', all_sfx) + def test_num_lanes(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + vector = getattr(npyv, "setall_" + sfx)(1) + assert len(vector) == nlanes + + @pytest.mark.parametrize('sfx', all_sfx) + def test_type_name(self, sfx): + vector = getattr(npyv, "setall_" + sfx)(1) + assert vector.__name__ == "npyv_" + sfx + + def test_raises(self): + a, b = [npyv.setall_u32(1)]*2 + for sfx in all_sfx: + vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}") + pytest.raises(TypeError, vcb("add"), a) + pytest.raises(TypeError, vcb("add"), a, b, a) + pytest.raises(TypeError, vcb("setall")) + pytest.raises(TypeError, vcb("setall"), [1]) + pytest.raises(TypeError, vcb("load"), 1) + pytest.raises(ValueError, vcb("load"), [1]) + pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + + @pytest.mark.skipif(not npyv2, reason=( + "could not find a second SIMD extension with NPYV support" + )) + def test_nomix(self): + # mix among submodules isn't allowed + a = npyv.setall_u32(1) + a2 = npyv2.setall_u32(1) + pytest.raises(TypeError, npyv.add_u32, a2, a2) + pytest.raises(TypeError, npyv2.add_u32, a, a) + + @pytest.mark.parametrize('sfx', unsigned_sfx) + def test_unsigned_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxu = (1 << int(sfx[1:])) - 1 + maxu_72 = (1 << 72) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes) + assert lanes == [maxu] * nlanes + lane = getattr(npyv, "setall_" + sfx)(-1)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes) + assert lanes == [maxu] * nlanes + + @pytest.mark.parametrize('sfx', signed_sfx) + def test_signed_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxs_72 = (1 << 71) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0] + assert lane == -1 + lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes) + assert lanes == [-1] * nlanes + mins_72 = -1 << 71 + lane = getattr(npyv, "setall_" + sfx)(mins_72)[0] + assert lane == 0 + lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes) + assert lanes == [0] * nlanes + + def test_truncate_f32(self): + f32 = npyv.setall_f32(0.1)[0] + assert f32 != 0.1 + assert round(f32, 1) == 0.1 + + def test_compare(self): + data_range = range(0, npyv.nlanes_u32) + vdata = npyv.load_u32(data_range) + assert vdata == list(data_range) + assert vdata == tuple(data_range) + for i in data_range: + assert vdata[i] == data_range[i] diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_ufunc.py b/MLPY/Lib/site-packages/numpy/core/tests/test_ufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..c5937c41b85dc5b6a83f79f3157b97bfc3cd04a7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_ufunc.py @@ -0,0 +1,2265 @@ +import warnings +import itertools +import sys + +import pytest + +import numpy as np +import numpy.core._umath_tests as umt +import numpy.linalg._umath_linalg as uml +import numpy.core._operand_flag_tests as opflag_tests +import numpy.core._rational_tests as _rational_tests +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, + assert_almost_equal, assert_array_almost_equal, assert_no_warnings, + assert_allclose, HAS_REFCOUNT, + ) +from numpy.compat import pickle + + +UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values() + if isinstance(obj, np.ufunc)] +UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] + + +class TestUfuncKwargs: + def test_kwarg_exact(self): + assert_raises(TypeError, np.add, 1, 2, castingx='safe') + assert_raises(TypeError, np.add, 1, 2, dtypex=int) + assert_raises(TypeError, np.add, 1, 2, extobjx=[4096]) + assert_raises(TypeError, np.add, 1, 2, outx=None) + assert_raises(TypeError, np.add, 1, 2, sigx='ii->i') + assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i') + assert_raises(TypeError, np.add, 1, 2, subokx=False) + assert_raises(TypeError, np.add, 1, 2, wherex=[True]) + + def test_sig_signature(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + signature='ii->i') + + def test_sig_dtype(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + dtype=int) + assert_raises(TypeError, np.add, 1, 2, signature='ii->i', + dtype=int) + + def test_extobj_refcount(self): + # Should not segfault with USE_DEBUG. + assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True) + + +class TestUfuncGenericLoops: + """Test generic loops. + + The loops to be tested are: + + PyUFunc_ff_f_As_dd_d + PyUFunc_ff_f + PyUFunc_dd_d + PyUFunc_gg_g + PyUFunc_FF_F_As_DD_D + PyUFunc_DD_D + PyUFunc_FF_F + PyUFunc_GG_G + PyUFunc_OO_O + PyUFunc_OO_O_method + PyUFunc_f_f_As_d_d + PyUFunc_d_d + PyUFunc_f_f + PyUFunc_g_g + PyUFunc_F_F_As_D_D + PyUFunc_F_F + PyUFunc_D_D + PyUFunc_G_G + PyUFunc_O_O + PyUFunc_O_O_method + PyUFunc_On_Om + + Where: + + f -- float + d -- double + g -- long double + F -- complex float + D -- complex double + G -- complex long double + O -- python object + + It is difficult to assure that each of these loops is entered from the + Python level as the special cased loops are a moving target and the + corresponding types are architecture dependent. We probably need to + define C level testing ufuncs to get at them. For the time being, I've + just looked at the signatures registered in the build directory to find + relevant functions. + + """ + np_dtypes = [ + (np.single, np.single), (np.single, np.double), + (np.csingle, np.csingle), (np.csingle, np.cdouble), + (np.double, np.double), (np.longdouble, np.longdouble), + (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)] + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + def f2(x, y): + return x**y + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs, xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + # class to use in testing object method loops + class foo: + def conjugate(self): + return np.bool_(1) + + def logical_xor(self, obj): + return np.bool_(1) + + def test_unary_PyUFunc_O_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.abs(x) == 1)) + + def test_unary_PyUFunc_O_O_method_simple(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.conjugate(x) == True)) + + def test_binary_PyUFunc_OO_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.add(x, x) == 2)) + + def test_binary_PyUFunc_OO_O_method(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_binary_PyUFunc_On_Om_method(self, foo=foo): + x = np.full((10, 2, 3), foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_python_complex_conjugate(self): + # The conjugate ufunc should fall back to calling the method: + arr = np.array([1+2j, 3-4j], dtype="O") + assert isinstance(arr[0], complex) + res = np.conjugate(arr) + assert res.dtype == np.dtype("O") + assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O")) + + @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) + def test_unary_PyUFunc_O_O_method_full(self, ufunc): + """Compare the result of the object loop with non-object one""" + val = np.float64(np.pi/4) + + class MyFloat(np.float64): + def __getattr__(self, attr): + try: + return super().__getattr__(attr) + except AttributeError: + return lambda: getattr(np.core.umath, attr)(val) + + num_arr = np.array([val], dtype=np.float64) + obj_arr = np.array([MyFloat(val)], dtype="O") + + with np.errstate(all="raise"): + try: + res_num = ufunc(num_arr) + except Exception as exc: + with assert_raises(type(exc)): + ufunc(obj_arr) + else: + res_obj = ufunc(obj_arr) + assert_array_almost_equal(res_num.astype("O"), res_obj) + + +def _pickleable_module_global(): + pass + + +class TestUfunc: + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_(pickle.loads(pickle.dumps(np.sin, + protocol=proto)) is np.sin) + + # Check that ufunc not defined in the top level numpy namespace + # such as numpy.core._rational_tests.test_add can also be pickled + res = pickle.loads(pickle.dumps(_rational_tests.test_add, + protocol=proto)) + assert_(res is _rational_tests.test_add) + + def test_pickle_withstring(self): + astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n" + b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") + assert_(pickle.loads(astring) is np.cos) + + def test_pickle_name_is_qualname(self): + # This tests that a simplification of our ufunc pickle code will + # lead to allowing qualnames as names. Future ufuncs should + # possible add a specific qualname, or a hook into pickling instead + # (dask+numba may benefit). + _pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc + obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc)) + assert obj is umt._pickleable_module_global_ufunc + + def test_reduceat_shifting_sum(self): + L = 6 + x = np.arange(L) + idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() + assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) + + def test_all_ufunc(self): + """Try to check presence and results of all ufuncs. + + The list of ufuncs comes from generate_umath.py and is as follows: + + ===== ==== ============= =============== ======================== + done args function types notes + ===== ==== ============= =============== ======================== + n 1 conjugate nums + O + n 1 absolute nums + O complex -> real + n 1 negative nums + O + n 1 sign nums + O -> int + n 1 invert bool + ints + O flts raise an error + n 1 degrees real + M cmplx raise an error + n 1 radians real + M cmplx raise an error + n 1 arccos flts + M + n 1 arccosh flts + M + n 1 arcsin flts + M + n 1 arcsinh flts + M + n 1 arctan flts + M + n 1 arctanh flts + M + n 1 cos flts + M + n 1 sin flts + M + n 1 tan flts + M + n 1 cosh flts + M + n 1 sinh flts + M + n 1 tanh flts + M + n 1 exp flts + M + n 1 expm1 flts + M + n 1 log flts + M + n 1 log10 flts + M + n 1 log1p flts + M + n 1 sqrt flts + M real x < 0 raises error + n 1 ceil real + M + n 1 trunc real + M + n 1 floor real + M + n 1 fabs real + M + n 1 rint flts + M + n 1 isnan flts -> bool + n 1 isinf flts -> bool + n 1 isfinite flts -> bool + n 1 signbit real -> bool + n 1 modf real -> (frac, int) + n 1 logical_not bool + nums + M -> bool + n 2 left_shift ints + O flts raise an error + n 2 right_shift ints + O flts raise an error + n 2 add bool + nums + O boolean + is || + n 2 subtract bool + nums + O boolean - is ^ + n 2 multiply bool + nums + O boolean * is & + n 2 divide nums + O + n 2 floor_divide nums + O + n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d + n 2 fmod nums + M + n 2 power nums + O + n 2 greater bool + nums + O -> bool + n 2 greater_equal bool + nums + O -> bool + n 2 less bool + nums + O -> bool + n 2 less_equal bool + nums + O -> bool + n 2 equal bool + nums + O -> bool + n 2 not_equal bool + nums + O -> bool + n 2 logical_and bool + nums + M -> bool + n 2 logical_or bool + nums + M -> bool + n 2 logical_xor bool + nums + M -> bool + n 2 maximum bool + nums + O + n 2 minimum bool + nums + O + n 2 bitwise_and bool + ints + O flts raise an error + n 2 bitwise_or bool + ints + O flts raise an error + n 2 bitwise_xor bool + ints + O flts raise an error + n 2 arctan2 real + M + n 2 remainder ints + real + O + n 2 hypot real + M + ===== ==== ============= =============== ======================== + + Types other than those listed will be accepted, but they are cast to + the smallest compatible type for which the function is defined. The + casting rules are: + + bool -> int8 -> float32 + ints -> double + + """ + pass + + # from include/numpy/ufuncobject.h + size_inferred = 2 + can_ignore = 4 + def test_signature0(self): + # the arguments to test_signature are: nin, nout, core_signature + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i),(i)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 1, 0)) + assert_equal(ixs, (0, 0)) + assert_equal(flags, (self.size_inferred,)) + assert_equal(sizes, (-1,)) + + def test_signature1(self): + # empty core signature; treat as plain ufunc (with trivial core) + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(),()->()") + assert_equal(enabled, 0) + assert_equal(num_dims, (0, 0, 0)) + assert_equal(ixs, ()) + assert_equal(flags, ()) + assert_equal(sizes, ()) + + def test_signature2(self): + # more complicated names for variables + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1,i2),(J_1)->(_kAB)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 1)) + assert_equal(ixs, (0, 1, 2, 3)) + assert_equal(flags, (self.size_inferred,)*4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature3(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, u"(i1, i12), (J_1)->(i12, i2)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 2)) + assert_equal(ixs, (0, 1, 2, 1, 3)) + assert_equal(flags, (self.size_inferred,)*4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature4(self): + # matrix_multiply signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n,k),(k,m)->(n,m)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred,)*3) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature5(self): + # matmul signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n?,k),(k,m?)->(n?,m?)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred | self.can_ignore, + self.size_inferred, + self.size_inferred | self.can_ignore)) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature6(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "(3)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature7(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3),(03,3),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (0, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature8(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3?),(3?,3?),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature_failure_extra_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "((i)),(i)->()") + + def test_signature_failure_mismatching_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),)i(->()") + + def test_signature_failure_signature_missing_input_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),->()") + + def test_signature_failure_signature_missing_output_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 2, "(i),(i)->()") + + def test_get_signature(self): + assert_equal(umt.inner1d.signature, "(i),(i)->()") + + def test_forced_sig(self): + a = 0.5*np.arange(3, dtype='f8') + assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) + with pytest.warns(DeprecationWarning): + assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) + with pytest.warns(DeprecationWarning): + assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), + [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), + casting='unsafe'), [0, 0, 1]) + + b = np.zeros((3,), dtype='f8') + np.add(a, 0.5, out=b) + assert_equal(b, [0.5, 1, 1.5]) + b[:] = 0 + with pytest.warns(DeprecationWarning): + np.add(a, 0.5, sig='i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + with pytest.warns(DeprecationWarning): + np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + + def test_signature_all_None(self): + # signature all None, is an acceptable alternative (since 1.21) + # to not providing a signature. + res1 = np.add([3], [4], sig=(None, None, None)) + res2 = np.add([3], [4]) + assert_array_equal(res1, res2) + res1 = np.maximum([3], [4], sig=(None, None, None)) + res2 = np.maximum([3], [4]) + assert_array_equal(res1, res2) + + with pytest.raises(TypeError): + # special case, that would be deprecated anyway, so errors: + np.add(3, 4, signature=(None,)) + + def test_signature_dtype_type(self): + # Since that will be the normal behaviour (past NumPy 1.21) + # we do support the types already: + float_dtype = type(np.dtype(np.float64)) + np.add(3, 4, signature=(float_dtype, float_dtype, None)) + + @pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"]) + def test_partial_signature_mismatch(self, casting): + # If the second argument matches already, no need to specify it: + res = np.ldexp(np.float32(1.), np.int_(2), dtype="d") + assert res.dtype == "d" + res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d")) + assert res.dtype == "d" + + # ldexp only has a loop for long input as second argument, overriding + # the output cannot help with that (no matter the casting) + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), dtype="d") + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), signature=(None, None, "d")) + + def test_use_output_signature_for_all_arguments(self): + # Test that providing only `dtype=` or `signature=(None, None, dtype)` + # is sufficient if falling back to a homogeneous signature works. + # In this case, the `intp, intp -> intp` loop is chosen. + res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe") + assert res == 1 # the cast happens first. + res = np.power(1.5, 2.8, signature=(None, None, np.intp), + casting="unsafe") + assert res == 1 + with pytest.raises(TypeError): + # the unsafe casting would normally cause errors though: + np.power(1.5, 2.8, dtype=np.intp) + + def test_signature_errors(self): + with pytest.raises(TypeError, + match="the signature object to ufunc must be a string or"): + np.add(3, 4, signature=123.) # neither a string nor a tuple + + with pytest.raises(ValueError): + # bad symbols that do not translate to dtypes + np.add(3, 4, signature="%^->#") + + with pytest.raises(ValueError): + np.add(3, 4, signature=b"ii-i") # incomplete and byte string + + with pytest.raises(ValueError): + np.add(3, 4, signature="ii>i") # incomplete string + + with pytest.raises(ValueError): + np.add(3, 4, signature=(None, "f8")) # bad length + + with pytest.raises(UnicodeDecodeError): + np.add(3, 4, signature=b"\xff\xff->i") + + def test_forced_dtype_times(self): + # Signatures only set the type numbers (not the actual loop dtypes) + # so using `M` in a signature/dtype should generally work: + a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]') + np.maximum(a, a, dtype="M") + np.maximum.reduce(a, dtype="M") + + arr = np.arange(10, dtype="m8[s]") + np.add(arr, arr, dtype="m") + np.maximum(arr, arr, dtype="m") + + def test_true_divide(self): + a = np.array(10) + b = np.array(20) + tgt = np.array(0.5) + + for tc in 'bhilqBHILQefdgFDG': + dt = np.dtype(tc) + aa = a.astype(dt) + bb = b.astype(dt) + + # Check result value and dtype. + for x, y in itertools.product([aa, -aa], [bb, -bb]): + + # Check with no output type specified + if tc in 'FDG': + tgt = complex(x)/complex(y) + else: + tgt = float(x)/float(y) + + res = np.true_divide(x, y) + rtol = max(np.finfo(res).resolution, 1e-15) + assert_allclose(res, tgt, rtol=rtol) + + if tc in 'bhilqBHILQ': + assert_(res.dtype.name == 'float64') + else: + assert_(res.dtype.name == dt.name ) + + # Check with output type specified. This also checks for the + # incorrect casts in issue gh-3484 because the unary '-' does + # not change types, even for unsigned types, Hence casts in the + # ufunc from signed to unsigned and vice versa will lead to + # errors in the values. + for tcout in 'bhilqBHILQ': + dtout = np.dtype(tcout) + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + + for tcout in 'efdg': + dtout = np.dtype(tcout) + if tc in 'FDG': + # Casting complex to float is not allowed + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + else: + tgt = float(x)/float(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + atol = max(np.finfo(dtout).tiny, 3e-308) + # Some test values result in invalid for float16. + with np.errstate(invalid='ignore'): + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res) and tcout == 'e': + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + for tcout in 'FDG': + dtout = np.dtype(tcout) + tgt = complex(x)/complex(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + atol = max(np.finfo(dtout).tiny, 3e-308) + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res): + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + # Check booleans + a = np.ones((), dtype=np.bool_) + res = np.true_divide(a, a) + assert_(res == 1.0) + assert_(res.dtype.name == 'float64') + res = np.true_divide(~a, a) + assert_(res == 0.0) + assert_(res.dtype.name == 'float64') + + def test_sum_stability(self): + a = np.ones(500, dtype=np.float32) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) + + a = np.ones(500, dtype=np.float64) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) + + def test_sum(self): + for dt in (int, np.float16, np.float32, np.float64, np.longdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) + d = np.arange(1, v + 1, dtype=dt) + + # warning if sum overflows, which it does in float16 + overflow = not np.isfinite(tgt) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + assert_almost_equal(np.sum(d), tgt) + assert_equal(len(w), 1 * overflow) + + assert_almost_equal(np.sum(d[::-1]), tgt) + assert_equal(len(w), 2 * overflow) + + d = np.ones(500, dtype=dt) + assert_almost_equal(np.sum(d[::2]), 250.) + assert_almost_equal(np.sum(d[1::2]), 250.) + assert_almost_equal(np.sum(d[::3]), 167.) + assert_almost_equal(np.sum(d[1::3]), 167.) + assert_almost_equal(np.sum(d[::-2]), 250.) + assert_almost_equal(np.sum(d[-1::-2]), 250.) + assert_almost_equal(np.sum(d[::-3]), 167.) + assert_almost_equal(np.sum(d[-1::-3]), 167.) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + d += d + assert_almost_equal(d, 2.) + + def test_sum_complex(self): + for dt in (np.complex64, np.complex128, np.clongdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j) + d = np.empty(v, dtype=dt) + d.real = np.arange(1, v + 1) + d.imag = -np.arange(1, v + 1) + assert_almost_equal(np.sum(d), tgt) + assert_almost_equal(np.sum(d[::-1]), tgt) + + d = np.ones(500, dtype=dt) + 1j + assert_almost_equal(np.sum(d[::2]), 250. + 250j) + assert_almost_equal(np.sum(d[1::2]), 250. + 250j) + assert_almost_equal(np.sum(d[::3]), 167. + 167j) + assert_almost_equal(np.sum(d[1::3]), 167. + 167j) + assert_almost_equal(np.sum(d[::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[::-3]), 167. + 167j) + assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + 1j + d += d + assert_almost_equal(d, 2. + 2j) + + def test_sum_initial(self): + # Integer, single axis + assert_equal(np.sum([3], initial=2), 5) + + # Floating point + assert_almost_equal(np.sum([0.2], initial=0.1), 0.3) + + # Multiple non-adjacent axes + assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2), + [12, 12, 12]) + + def test_sum_where(self): + # More extensive tests done in test_reduction_with_where. + assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.) + assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5., + where=[True, False]), [9., 5.]) + + def test_inner1d(self): + a = np.arange(6).reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1)) + a = np.arange(6) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a)) + + def test_broadcast(self): + msg = "broadcast" + a = np.arange(4).reshape((2, 1, 2)) + b = np.arange(4).reshape((1, 2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + msg = "extend & broadcast loop dimensions" + b = np.arange(4).reshape((2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + # Broadcast in core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.arange(4).reshape((4, 1)) + assert_raises(ValueError, umt.inner1d, a, b) + # Extend core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.array(7) + assert_raises(ValueError, umt.inner1d, a, b) + # Broadcast should fail + a = np.arange(2).reshape((2, 1, 1)) + b = np.arange(3).reshape((3, 1, 1)) + assert_raises(ValueError, umt.inner1d, a, b) + + # Writing to a broadcasted array with overlap should warn, gh-2705 + a = np.arange(2) + b = np.arange(4).reshape((2, 2)) + u, v = np.broadcast_arrays(a, b) + assert_equal(u.strides[0], 0) + x = u + v + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + u += v + assert_equal(len(w), 1) + assert_(x[0, 0] != u[0, 0]) + + # Output reduction should not be allowed. + # See gh-15139 + a = np.arange(6).reshape(3, 2) + b = np.ones(2) + out = np.empty(()) + assert_raises(ValueError, umt.inner1d, a, b, out) + out2 = np.empty(3) + c = umt.inner1d(a, b, out2) + assert_(c is out2) + + def test_out_broadcasts(self): + # For ufuncs and gufuncs (not for reductions), we currently allow + # the output to cause broadcasting of the input arrays. + # both along dimensions with shape 1 and dimensions which do not + # exist at all in the inputs. + arr = np.arange(3).reshape(1, 3) + out = np.empty((5, 4, 3)) + np.add(arr, arr, out=out) + assert (out == np.arange(3) * 2).all() + + # The same holds for gufuncs (gh-16484) + umt.inner1d(arr, arr, out=out) + # the result would be just a scalar `5`, but is broadcast fully: + assert (out == 5).all() + + def test_type_cast(self): + msg = "type cast" + a = np.arange(6, dtype='short').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), + err_msg=msg) + msg = "type cast on one argument" + a = np.arange(6).reshape((2, 3)) + b = a + 0.1 + assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), + err_msg=msg) + + def test_endian(self): + msg = "big endian" + a = np.arange(6, dtype='>i4').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), + err_msg=msg) + msg = "little endian" + a = np.arange(6, dtype='()' + inner1d = umt.inner1d + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + # basic tests on inputs (outputs tested below with matrix_multiply). + c = inner1d(a, b) + assert_array_equal(c, (a * b).sum(-1)) + # default + c = inner1d(a, b, axes=[(-1,), (-1,), ()]) + assert_array_equal(c, (a * b).sum(-1)) + # integers ok for single axis. + c = inner1d(a, b, axes=[-1, -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # mix fine + c = inner1d(a, b, axes=[(-1,), -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # can omit last axis. + c = inner1d(a, b, axes=[-1, -1]) + assert_array_equal(c, (a * b).sum(-1)) + # can pass in other types of integer (with __index__ protocol) + c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)]) + assert_array_equal(c, (a * b).sum(-1)) + # swap some axes + c = inner1d(a, b, axes=[0, 0]) + assert_array_equal(c, (a * b).sum(0)) + c = inner1d(a, b, axes=[0, 2]) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + # Check errors for improperly constructed axes arguments. + # should have list. + assert_raises(TypeError, inner1d, a, b, axes=-1) + # needs enough elements + assert_raises(ValueError, inner1d, a, b, axes=[-1]) + # should pass in indices. + assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0]) + assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1]) + assert_raises(TypeError, inner1d, a, b, axes=[None, 1]) + # cannot pass an index unless there is only one dimension + # (output is wrong in this case) + assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1]) + # or pass in generally the wrong number of axes + assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)]) + assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()]) + # axes need to have same length. + assert_raises(ValueError, inner1d, a, b, axes=[0, 1]) + + # matrix_multiply signature: '(m,n),(n,p)->(m,p)' + mm = umt.matrix_multiply + a = np.arange(12).reshape((2, 3, 2)) + b = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # Sanity check. + c = mm(a, b) + assert_array_equal(c, np.matmul(a, b)) + # Default axes. + c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)]) + assert_array_equal(c, np.matmul(a, b)) + # Default with explicit axes. + c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)]) + assert_array_equal(c, np.matmul(a, b)) + # swap some axes. + c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)]) + assert_array_equal(c, np.matmul(a.transpose(1, 0, 2), + b.transpose(0, 3, 1, 2))) + # Default with output array. + c = np.empty((2, 2, 3, 1)) + d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b)) + # Transposed output array + c = np.empty((1, 2, 2, 3)) + d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2)) + # Check errors for improperly constructed axes arguments. + # wrong argument + assert_raises(TypeError, mm, a, b, axis=1) + # axes should be list + assert_raises(TypeError, mm, a, b, axes=1) + assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1))) + # list needs to have right length + assert_raises(ValueError, mm, a, b, axes=[]) + assert_raises(ValueError, mm, a, b, axes=[(-2, -1)]) + # list should contain tuples for multiple axes + assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1]) + assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1]) + assert_raises(TypeError, + mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]]) + assert_raises(TypeError, + mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]]) + assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None]) + # tuples should not have duplicated values + assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)]) + # arrays should have enough axes. + z = np.zeros((2, 2)) + assert_raises(ValueError, mm, z, z[0]) + assert_raises(ValueError, mm, z, z, out=z[:, 0]) + assert_raises(ValueError, mm, z[1], z, axes=[0, 1]) + assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1]) + # Regular ufuncs should not accept axes. + assert_raises(TypeError, np.add, 1., 1., axes=[0]) + # should be able to deal with bad unrelated kwargs. + assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True) + + def test_axis_argument(self): + # inner1d signature: '(i),(i)->()' + inner1d = umt.inner1d + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = inner1d(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, axis=-1) + assert_array_equal(c, (a * b).sum(-1)) + out = np.zeros_like(c) + d = inner1d(a, b, axis=-1, out=out) + assert_(d is out) + assert_array_equal(d, c) + c = inner1d(a, b, axis=0) + assert_array_equal(c, (a * b).sum(0)) + # Sanity checks on innerwt and cumsum. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, axis=0), + np.sum(a * b * w, axis=0)) + assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0)) + assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1)) + out = np.empty_like(a) + b = umt.cumsum(a, out=out, axis=0) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=0)) + b = umt.cumsum(a, out=out, axis=1) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=-1)) + # Check errors. + # Cannot pass in both axis and axes. + assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0]) + # Not an integer. + assert_raises(TypeError, inner1d, a, b, axis=[0]) + # more than 1 core dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, axis=1) + # Output wrong size in axis. + out = np.empty((1, 2, 3), dtype=a.dtype) + assert_raises(ValueError, umt.cumsum, a, out=out, axis=0) + # Regular ufuncs should not accept axis. + assert_raises(TypeError, np.add, 1., 1., axis=0) + + def test_keepdims_argument(self): + # inner1d signature: '(i),(i)->()' + inner1d = umt.inner1d + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = inner1d(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + out = np.zeros_like(c) + d = inner1d(a, b, keepdims=True, out=out) + assert_(d is out) + assert_array_equal(d, c) + # Now combined with axis and axes. + c = inner1d(a, b, axis=-1, keepdims=False) + assert_array_equal(c, (a * b).sum(-1, keepdims=False)) + c = inner1d(a, b, axis=-1, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = inner1d(a, b, axis=0, keepdims=False) + assert_array_equal(c, (a * b).sum(0, keepdims=False)) + c = inner1d(a, b, axis=0, keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = inner1d(a, b, axes=[0, 0], keepdims=False) + assert_array_equal(c, (a * b).sum(0)) + c = inner1d(a, b, axes=[0, 0, 0], keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = inner1d(a, b, axes=[0, 2], keepdims=False) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + c = inner1d(a, b, axes=[0, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = inner1d(a, b, axes=[0, 2, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = inner1d(a, b, axes=[0, 2, 0], keepdims=True) + assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True)) + # Hardly useful, but should work. + c = inner1d(a, b, axes=[0, 2, 1], keepdims=True) + assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1)) + .sum(1, keepdims=True)) + # Check with two core dimensions. + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected = uml.det(a) + c = uml.det(a, keepdims=False) + assert_array_equal(c, expected) + c = uml.det(a, keepdims=True) + assert_array_equal(c, expected[:, np.newaxis, np.newaxis]) + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected_s, expected_l = uml.slogdet(a) + cs, cl = uml.slogdet(a, keepdims=False) + assert_array_equal(cs, expected_s) + assert_array_equal(cl, expected_l) + cs, cl = uml.slogdet(a, keepdims=True) + assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis]) + assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis]) + # Sanity check on innerwt. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, keepdims=True), + np.sum(a * b * w, axis=-1, keepdims=True)) + assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True), + np.sum(a * b * w, axis=0, keepdims=True)) + # Check errors. + # Not a boolean + assert_raises(TypeError, inner1d, a, b, keepdims='true') + # More than 1 core dimension, and core output dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, keepdims=True) + assert_raises(TypeError, mm, a, b, keepdims=False) + # Regular ufuncs should not accept keepdims. + assert_raises(TypeError, np.add, 1., 1., keepdims=False) + + def test_innerwt(self): + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + a = np.arange(100, 124).reshape((2, 3, 4)) + b = np.arange(200, 224).reshape((2, 3, 4)) + w = np.arange(300, 324).reshape((2, 3, 4)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + + def test_innerwt_empty(self): + """Test generalized ufunc with zero-sized operands""" + a = np.array([], dtype='f8') + b = np.array([], dtype='f8') + w = np.array([], dtype='f8') + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + + def test_cross1d(self): + """Test with fixed-sized signature.""" + a = np.eye(3) + assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3))) + out = np.zeros((3, 3)) + result = umt.cross1d(a[0], a, out) + assert_(result is out) + assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1]))) + assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4)) + assert_raises(ValueError, umt.cross1d, a, np.arange(4.)) + # Wrong output core dimension. + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4))) + # Wrong output broadcast dimension (see gh-15139). + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3)) + + def test_can_ignore_signature(self): + # Comparing the effects of ? in signature: + # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there. + # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p. + mat = np.arange(12).reshape((2, 3, 2)) + single_vec = np.arange(2) + col_vec = single_vec[:, np.newaxis] + col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # matrix @ single column vector with proper dimension + mm_col_vec = umt.matrix_multiply(mat, col_vec) + # matmul does the same thing + matmul_col_vec = umt.matmul(mat, col_vec) + assert_array_equal(matmul_col_vec, mm_col_vec) + # matrix @ vector without dimension making it a column vector. + # matrix multiply fails -> missing core dim. + assert_raises(ValueError, umt.matrix_multiply, mat, single_vec) + # matmul mimicker passes, and returns a vector. + matmul_col = umt.matmul(mat, single_vec) + assert_array_equal(matmul_col, mm_col_vec.squeeze()) + # Now with a column array: same as for column vector, + # broadcasting sensibly. + mm_col_vec = umt.matrix_multiply(mat, col_vec_array) + matmul_col_vec = umt.matmul(mat, col_vec_array) + assert_array_equal(matmul_col_vec, mm_col_vec) + # As above, but for row vector + single_vec = np.arange(3) + row_vec = single_vec[np.newaxis, :] + row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1 + # row vector @ matrix + mm_row_vec = umt.matrix_multiply(row_vec, mat) + matmul_row_vec = umt.matmul(row_vec, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # single row vector @ matrix + assert_raises(ValueError, umt.matrix_multiply, single_vec, mat) + matmul_row = umt.matmul(single_vec, mat) + assert_array_equal(matmul_row, mm_row_vec.squeeze()) + # row vector array @ matrix + mm_row_vec = umt.matrix_multiply(row_vec_array, mat) + matmul_row_vec = umt.matmul(row_vec_array, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # Now for vector combinations + # row vector @ column vector + col_vec = row_vec.T + col_vec_array = row_vec_array.swapaxes(-2, -1) + mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec) + matmul_row_col_vec = umt.matmul(row_vec, col_vec) + assert_array_equal(matmul_row_col_vec, mm_row_col_vec) + # single row vector @ single col vector + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec) + matmul_row_col = umt.matmul(single_vec, single_vec) + assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze()) + # row vector array @ matrix + mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array) + matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array) + assert_array_equal(matmul_row_col_array, mm_row_col_array) + # Finally, check that things are *not* squeezed if one gives an + # output. + out = np.zeros_like(mm_row_col_array) + out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + out[:] = 0 + out = umt.matmul(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + # And check one cannot put missing dimensions back. + out = np.zeros_like(mm_row_col_vec) + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec, + out) + # But fine for matmul, since it is just a broadcast. + out = umt.matmul(single_vec, single_vec, out) + assert_array_equal(out, mm_row_col_vec.squeeze()) + + def test_matrix_multiply(self): + self.compare_matrix_multiply_results(np.int64) + self.compare_matrix_multiply_results(np.double) + + def test_matrix_multiply_umath_empty(self): + res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0))) + assert_array_equal(res, np.zeros((0, 0))) + res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10))) + assert_array_equal(res, np.zeros((10, 10))) + + def compare_matrix_multiply_results(self, tp): + d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) + d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) + msg = "matrix multiply on type %s" % d1.dtype.name + + def permute_n(n): + if n == 1: + return ([0],) + ret = () + base = permute_n(n-1) + for perm in base: + for i in range(n): + new = perm + [n-1] + new[n-1] = new[i] + new[i] = n-1 + ret += (new,) + return ret + + def slice_n(n): + if n == 0: + return ((),) + ret = () + base = slice_n(n-1) + for sl in base: + ret += (sl+(slice(None),),) + ret += (sl+(slice(0, 1),),) + return ret + + def broadcastable(s1, s2): + return s1 == s2 or s1 == 1 or s2 == 1 + + permute_3 = permute_n(3) + slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,) + + ref = True + for p1 in permute_3: + for p2 in permute_3: + for s1 in slice_3: + for s2 in slice_3: + a1 = d1.transpose(p1)[s1] + a2 = d2.transpose(p2)[s2] + ref = ref and a1.base is not None + ref = ref and a2.base is not None + if (a1.shape[-1] == a2.shape[-2] and + broadcastable(a1.shape[0], a2.shape[0])): + assert_array_almost_equal( + umt.matrix_multiply(a1, a2), + np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * + a1[..., np.newaxis,:], axis=-1), + err_msg=msg + ' %s %s' % (str(a1.shape), + str(a2.shape))) + + assert_equal(ref, True, err_msg="reference check") + + def test_euclidean_pdist(self): + a = np.arange(12, dtype=float).reshape(4, 3) + out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) + umt.euclidean_pdist(a, out) + b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) + b = b[~np.tri(a.shape[0], dtype=bool)] + assert_almost_equal(out, b) + # An output array is required to determine p with signature (n,d)->(p) + assert_raises(ValueError, umt.euclidean_pdist, a) + + def test_cumsum(self): + a = np.arange(10) + result = umt.cumsum(a) + assert_array_equal(result, a.cumsum()) + + def test_object_logical(self): + a = np.array([3, None, True, False, "test", ""], dtype=object) + assert_equal(np.logical_or(a, None), + np.array([x or None for x in a], dtype=object)) + assert_equal(np.logical_or(a, True), + np.array([x or True for x in a], dtype=object)) + assert_equal(np.logical_or(a, 12), + np.array([x or 12 for x in a], dtype=object)) + assert_equal(np.logical_or(a, "blah"), + np.array([x or "blah" for x in a], dtype=object)) + + assert_equal(np.logical_and(a, None), + np.array([x and None for x in a], dtype=object)) + assert_equal(np.logical_and(a, True), + np.array([x and True for x in a], dtype=object)) + assert_equal(np.logical_and(a, 12), + np.array([x and 12 for x in a], dtype=object)) + assert_equal(np.logical_and(a, "blah"), + np.array([x and "blah" for x in a], dtype=object)) + + assert_equal(np.logical_not(a), + np.array([not x for x in a], dtype=object)) + + assert_equal(np.logical_or.reduce(a), 3) + assert_equal(np.logical_and.reduce(a), None) + + def test_object_comparison(self): + class HasComparisons: + def __eq__(self, other): + return '==' + + arr0d = np.array(HasComparisons()) + assert_equal(arr0d == arr0d, True) + assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast + + arr1d = np.array([HasComparisons()]) + assert_equal(arr1d == arr1d, np.array([True])) + assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) + + def test_object_array_reduction(self): + # Reductions on object arrays + a = np.array(['a', 'b', 'c'], dtype=object) + assert_equal(np.sum(a), 'abc') + assert_equal(np.max(a), 'c') + assert_equal(np.min(a), 'a') + a = np.array([True, False, True], dtype=object) + assert_equal(np.sum(a), 2) + assert_equal(np.prod(a), 0) + assert_equal(np.any(a), True) + assert_equal(np.all(a), False) + assert_equal(np.max(a), True) + assert_equal(np.min(a), False) + assert_equal(np.array([[1]], dtype=object).sum(), 1) + assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2]) + assert_equal(np.array([1], dtype=object).sum(initial=1), 2) + assert_equal(np.array([[1], [2, 3]], dtype=object) + .sum(initial=[0], where=[False, True]), [0, 2, 3]) + + def test_object_array_accumulate_inplace(self): + # Checks that in-place accumulates work, see also gh-7402 + arr = np.ones(4, dtype=object) + arr[:] = [[1] for i in range(4)] + # Twice reproduced also for tuples: + np.add.accumulate(arr, out=arr) + np.add.accumulate(arr, out=arr) + assert_array_equal(arr, + np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object), + ) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + np.add.accumulate(arr, out=arr, axis=-1) + np.add.accumulate(arr, out=arr, axis=-1) + assert_array_equal(arr[0, :], + np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object), + ) + + def test_object_array_reduceat_inplace(self): + # Checks that in-place reduceats work, see also gh-7465 + arr = np.empty(4, dtype=object) + arr[:] = [[1] for i in range(4)] + out = np.empty(4, dtype=object) + out[:] = [[1] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr) + np.add.reduceat(arr, np.arange(4), out=arr) + assert_array_equal(arr, out) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + out = np.ones((2, 4), dtype=object) + out[0, :] = [[2] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + assert_array_equal(arr, out) + + def test_zerosize_reduction(self): + # Test with default dtype and object dtype + for a in [[], np.array([], dtype=object)]: + assert_equal(np.sum(a), 0) + assert_equal(np.prod(a), 1) + assert_equal(np.any(a), False) + assert_equal(np.all(a), True) + assert_raises(ValueError, np.max, a) + assert_raises(ValueError, np.min, a) + + def test_axis_out_of_bounds(self): + a = np.array([False, False]) + assert_raises(np.AxisError, a.all, axis=1) + a = np.array([False, False]) + assert_raises(np.AxisError, a.all, axis=-2) + + a = np.array([False, False]) + assert_raises(np.AxisError, a.any, axis=1) + a = np.array([False, False]) + assert_raises(np.AxisError, a.any, axis=-2) + + def test_scalar_reduction(self): + # The functions 'sum', 'prod', etc allow specifying axis=0 + # even for scalars + assert_equal(np.sum(3, axis=0), 3) + assert_equal(np.prod(3.5, axis=0), 3.5) + assert_equal(np.any(True, axis=0), True) + assert_equal(np.all(False, axis=0), False) + assert_equal(np.max(3, axis=0), 3) + assert_equal(np.min(2.5, axis=0), 2.5) + + # Check scalar behaviour for ufuncs without an identity + assert_equal(np.power.reduce(3), 3) + + # Make sure that scalars are coming out from this operation + assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) + + # check if scalars/0-d arrays get cast + assert_(type(np.any(0, axis=0)) is np.bool_) + + # assert that 0-d arrays get wrapped + class MyArray(np.ndarray): + pass + a = np.array(1).view(MyArray) + assert_(type(np.any(a)) is MyArray) + + def test_casting_out_param(self): + # Test that it's possible to do casts on output + a = np.ones((200, 100), np.int64) + b = np.ones((200, 100), np.int64) + c = np.ones((200, 100), np.float64) + np.add(a, b, out=c) + assert_equal(c, 2) + + a = np.zeros(65536) + b = np.zeros(65536, dtype=np.float32) + np.subtract(a, 0, out=b) + assert_equal(b, 0) + + def test_where_param(self): + # Test that the where= ufunc parameter works with regular arrays + a = np.arange(7) + b = np.ones(7) + c = np.zeros(7) + np.add(a, b, out=c, where=(a % 2 == 1)) + assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) + + a = np.arange(4).reshape(2, 2) + 2 + np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) + assert_equal(a, [[2, 27], [16, 5]]) + # Broadcasting the where= parameter + np.subtract(a, 2, out=a, where=[True, False]) + assert_equal(a, [[0, 27], [14, 5]]) + + def test_where_param_buffer_output(self): + # This test is temporarily skipped because it requires + # adding masking features to the nditer to work properly + + # With casting on output + a = np.ones(10, np.int64) + b = np.ones(10, np.int64) + c = 1.5 * np.ones(10, np.float64) + np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) + assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) + + def test_where_param_alloc(self): + # With casting and allocated output + a = np.array([1], dtype=np.int64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + # No casting and allocated output + a = np.array([1], dtype=np.float64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + def test_where_with_broadcasting(self): + # See gh-17198 + a = np.random.random((5000, 4)) + b = np.random.random((5000, 1)) + + where = a > 0.3 + out = np.full_like(a, 0) + np.less(a, b, where=where, out=out) + b_where = np.broadcast_to(b, a.shape)[where] + assert_array_equal((a[where] < b_where), out[where].astype(bool)) + assert not out[~where].any() # outside mask, out remains all 0 + + def check_identityless_reduction(self, a): + # np.minimum.reduce is an identityless reduction + + # Verify that it sees the zero at various positions + a[...] = 1 + a[1, 0, 0] = 0 + assert_equal(np.minimum.reduce(a, axis=None), 0) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) + assert_equal(np.minimum.reduce(a, axis=0), + [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=1), + [[1, 1, 1, 1], [0, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=2), + [[1, 1, 1], [0, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=()), a) + + a[...] = 1 + a[0, 1, 0] = 0 + assert_equal(np.minimum.reduce(a, axis=None), 0) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) + assert_equal(np.minimum.reduce(a, axis=0), + [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=1), + [[0, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=2), + [[1, 0, 1], [1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=()), a) + + a[...] = 1 + a[0, 0, 1] = 0 + assert_equal(np.minimum.reduce(a, axis=None), 0) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) + assert_equal(np.minimum.reduce(a, axis=0), + [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=1), + [[1, 0, 1, 1], [1, 1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=2), + [[0, 1, 1], [1, 1, 1]]) + assert_equal(np.minimum.reduce(a, axis=()), a) + + def test_identityless_reduction_corder(self): + a = np.empty((2, 3, 4), order='C') + self.check_identityless_reduction(a) + + def test_identityless_reduction_forder(self): + a = np.empty((2, 3, 4), order='F') + self.check_identityless_reduction(a) + + def test_identityless_reduction_otherorder(self): + a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) + self.check_identityless_reduction(a) + + def test_identityless_reduction_noncontig(self): + a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) + a = a[1:, 1:, 1:] + self.check_identityless_reduction(a) + + def test_identityless_reduction_noncontig_unaligned(self): + a = np.empty((3*4*5*8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + self.check_identityless_reduction(a) + + def test_initial_reduction(self): + # np.minimum.reduce is an identityless reduction + + # For cases like np.maximum(np.abs(...), initial=0) + # More generally, a supremum over non-negative numbers. + assert_equal(np.maximum.reduce([], initial=0), 0) + + # For cases like reduction of an empty array over the reals. + assert_equal(np.minimum.reduce([], initial=np.inf), np.inf) + assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf) + + # Random tests + assert_equal(np.minimum.reduce([5], initial=4), 4) + assert_equal(np.maximum.reduce([4], initial=5), 5) + assert_equal(np.maximum.reduce([5], initial=4), 5) + assert_equal(np.minimum.reduce([4], initial=5), 4) + + # Check initial=None raises ValueError for both types of ufunc reductions + assert_raises(ValueError, np.minimum.reduce, [], initial=None) + assert_raises(ValueError, np.add.reduce, [], initial=None) + + # Check that np._NoValue gives default behavior. + assert_equal(np.add.reduce([], initial=np._NoValue), 0) + + # Check that initial kwarg behaves as intended for dtype=object + a = np.array([10], dtype=object) + res = np.add.reduce(a, initial=5) + assert_equal(res, 15) + + @pytest.mark.parametrize('axis', (0, 1, None)) + @pytest.mark.parametrize('where', (np.array([False, True, True]), + np.array([[True], [False], [True]]), + np.array([[True, False, False], + [False, True, False], + [False, True, True]]))) + def test_reduction_with_where(self, axis, where): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.zeros_like(a) + np.positive(a, out=a_check, where=where) + + res = np.add.reduce(a, axis=axis, where=where) + check = a_check.sum(axis) + assert_equal(res, check) + # Check we do not overwrite elements of a internally. + assert_array_equal(a, a_copy) + + @pytest.mark.parametrize(('axis', 'where'), + ((0, np.array([True, False, True])), + (1, [True, True, False]), + (None, True))) + @pytest.mark.parametrize('initial', (-np.inf, 5.)) + def test_reduction_with_where_and_initial(self, axis, where, initial): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.full(a.shape, -np.inf) + np.positive(a, out=a_check, where=where) + + res = np.maximum.reduce(a, axis=axis, where=where, initial=initial) + check = a_check.max(axis, initial=initial) + assert_equal(res, check) + + def test_reduction_where_initial_needed(self): + a = np.arange(9.).reshape(3, 3) + m = [False, True, False] + assert_raises(ValueError, np.maximum.reduce, a, where=m) + + def test_identityless_reduction_nonreorderable(self): + a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) + + res = np.divide.reduce(a, axis=0) + assert_equal(res, [8.0, 4.0, 8.0]) + + res = np.divide.reduce(a, axis=1) + assert_equal(res, [2.0, 8.0]) + + res = np.divide.reduce(a, axis=()) + assert_equal(res, a) + + assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) + + def test_reduce_zero_axis(self): + # If we have a n x m array and do a reduction with axis=1, then we are + # doing n reductions, and each reduction takes an m-element array. For + # a reduction operation without an identity, then: + # n > 0, m > 0: fine + # n = 0, m > 0: fine, doing 0 reductions of m-element arrays + # n > 0, m = 0: can't reduce a 0-element array, ValueError + # n = 0, m = 0: can't reduce a 0-element array, ValueError (for + # consistency with the above case) + # This test doesn't actually look at return values, it just checks to + # make sure that error we get an error in exactly those cases where we + # expect one, and assumes the calculations themselves are done + # correctly. + + def ok(f, *args, **kwargs): + f(*args, **kwargs) + + def err(f, *args, **kwargs): + assert_raises(ValueError, f, *args, **kwargs) + + def t(expect, func, n, m): + expect(func, np.zeros((n, m)), axis=1) + expect(func, np.zeros((m, n)), axis=0) + expect(func, np.zeros((n // 2, n // 2, m)), axis=2) + expect(func, np.zeros((n // 2, m, n // 2)), axis=1) + expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) + expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) + expect(func, np.zeros((m // 3, m // 3, m // 3, + n // 2, n // 2)), + axis=(0, 1, 2)) + # Check what happens if the inner (resp. outer) dimensions are a + # mix of zero and non-zero: + expect(func, np.zeros((10, m, n)), axis=(0, 1)) + expect(func, np.zeros((10, n, m)), axis=(0, 2)) + expect(func, np.zeros((m, 10, n)), axis=0) + expect(func, np.zeros((10, m, n)), axis=1) + expect(func, np.zeros((10, n, m)), axis=2) + + # np.maximum is just an arbitrary ufunc with no reduction identity + assert_equal(np.maximum.identity, None) + t(ok, np.maximum.reduce, 30, 30) + t(ok, np.maximum.reduce, 0, 30) + t(err, np.maximum.reduce, 30, 0) + t(err, np.maximum.reduce, 0, 0) + err(np.maximum.reduce, []) + np.maximum.reduce(np.zeros((0, 0)), axis=()) + + # all of the combinations are fine for a reduction that has an + # identity + t(ok, np.add.reduce, 30, 30) + t(ok, np.add.reduce, 0, 30) + t(ok, np.add.reduce, 30, 0) + t(ok, np.add.reduce, 0, 0) + np.add.reduce([]) + np.add.reduce(np.zeros((0, 0)), axis=()) + + # OTOH, accumulate always makes sense for any combination of n and m, + # because it maps an m-element array to an m-element array. These + # tests are simpler because accumulate doesn't accept multiple axes. + for uf in (np.maximum, np.add): + uf.accumulate(np.zeros((30, 0)), axis=0) + uf.accumulate(np.zeros((0, 30)), axis=0) + uf.accumulate(np.zeros((30, 30)), axis=0) + uf.accumulate(np.zeros((0, 0)), axis=0) + + def test_safe_casting(self): + # In old versions of numpy, in-place operations used the 'unsafe' + # casting rules. In versions >= 1.10, 'same_kind' is the + # default and an exception is raised instead of a warning. + # when 'same_kind' is not satisfied. + a = np.array([1, 2, 3], dtype=int) + # Non-in-place addition is fine + assert_array_equal(assert_no_warnings(np.add, a, 1.1), + [2.1, 3.1, 4.1]) + assert_raises(TypeError, np.add, a, 1.1, out=a) + + def add_inplace(a, b): + a += b + + assert_raises(TypeError, add_inplace, a, 1.1) + # Make sure that explicitly overriding the exception is allowed: + assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") + assert_array_equal(a, [2, 3, 4]) + + def test_ufunc_custom_out(self): + # Test ufunc with built in input types and custom output type + + a = np.array([0, 1, 2], dtype='i8') + b = np.array([0, 1, 2], dtype='i8') + c = np.empty(3, dtype=_rational_tests.rational) + + # Output must be specified so numpy knows what + # ufunc signature to look for + result = _rational_tests.test_add(a, b, c) + target = np.array([0, 2, 4], dtype=_rational_tests.rational) + assert_equal(result, target) + + # no output type should raise TypeError + with assert_raises(TypeError): + _rational_tests.test_add(a, b) + + def test_operand_flags(self): + a = np.arange(16, dtype='l').reshape(4, 4) + b = np.arange(9, dtype='l').reshape(3, 3) + opflag_tests.inplace_add(a[:-1, :-1], b) + assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], + [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l')) + + a = np.array(0) + opflag_tests.inplace_add(a, 3) + assert_equal(a, 3) + opflag_tests.inplace_add(a, [3, 4]) + assert_equal(a, 10) + + def test_struct_ufunc(self): + import numpy.core._struct_ufunc_tests as struct_ufunc + + a = np.array([(1, 2, 3)], dtype='u8,u8,u8') + b = np.array([(1, 2, 3)], dtype='u8,u8,u8') + + result = struct_ufunc.add_triplet(a, b) + assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) + assert_raises(RuntimeError, struct_ufunc.register_fail) + + def test_custom_ufunc(self): + a = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + b = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + + result = _rational_tests.test_add_rationals(a, b) + expected = np.array( + [_rational_tests.rational(1), + _rational_tests.rational(2, 3), + _rational_tests.rational(1, 2)], + dtype=_rational_tests.rational) + assert_equal(result, expected) + + def test_custom_ufunc_forced_sig(self): + # gh-9351 - looking for a non-first userloop would previously hang + with assert_raises(TypeError): + np.multiply(_rational_tests.rational(1), 1, + signature=(_rational_tests.rational, int, None)) + + def test_custom_array_like(self): + + class MyThing: + __array_priority__ = 1000 + + rmul_count = 0 + getitem_count = 0 + + def __init__(self, shape): + self.shape = shape + + def __len__(self): + return self.shape[0] + + def __getitem__(self, i): + MyThing.getitem_count += 1 + if not isinstance(i, tuple): + i = (i,) + if len(i) > self.ndim: + raise IndexError("boo") + + return MyThing(self.shape[len(i):]) + + def __rmul__(self, other): + MyThing.rmul_count += 1 + return self + + np.float64(5)*MyThing((3, 3)) + assert_(MyThing.rmul_count == 1, MyThing.rmul_count) + assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) + + def test_inplace_fancy_indexing(self): + + a = np.arange(10) + np.add.at(a, [2, 5, 2], 1) + assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) + + a = np.arange(10) + b = np.array([100, 100, 100]) + np.add.at(a, [2, 5, 2], b) + assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, (slice(None), [1, 2, 1]), b) + assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) + assert_equal(a, + [[[0, 401, 202], + [3, 404, 205], + [6, 407, 208]], + + [[9, 410, 211], + [12, 413, 214], + [15, 416, 217]], + + [[18, 419, 220], + [21, 422, 223], + [24, 425, 226]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, ([1, 2, 1], slice(None)), b) + assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [203, 404, 605], + [106, 207, 308]], + + [[9, 10, 11], + [212, 413, 614], + [115, 216, 317]], + + [[18, 19, 20], + [221, 422, 623], + [124, 225, 326]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (0, [1, 2, 1]), b) + assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, ([1, 2, 1], 0, slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [3, 4, 5], + [6, 7, 8]], + + [[209, 410, 611], + [12, 13, 14], + [15, 16, 17]], + + [[118, 219, 320], + [21, 22, 23], + [24, 25, 26]]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), slice(None)), b) + assert_equal(a, + [[[100, 201, 302], + [103, 204, 305], + [106, 207, 308]], + + [[109, 210, 311], + [112, 213, 314], + [115, 216, 317]], + + [[118, 219, 320], + [121, 222, 323], + [124, 225, 326]]]) + + a = np.arange(10) + np.negative.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9]) + + # Test 0-dim array + a = np.array(0) + np.add.at(a, (), 1) + assert_equal(a, 1) + + assert_raises(IndexError, np.add.at, a, 0, 1) + assert_raises(IndexError, np.add.at, a, [], 1) + + # Test mixed dtypes + a = np.arange(10) + np.power.at(a, [1, 2, 3, 2], 3.5) + assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) + + # Test boolean indexing and boolean ufuncs + a = np.arange(10) + index = a % 2 == 0 + np.equal.at(a, index, [0, 2, 4, 6, 8]) + assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) + + # Test unary operator + a = np.arange(10, dtype='u4') + np.invert.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) + + # Test empty subspace + orig = np.arange(4) + a = orig[:, None][:, 0:0] + np.add.at(a, [0, 1], 3) + assert_array_equal(orig, np.arange(4)) + + # Test with swapped byte order + index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) + values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) + np.add.at(values, index, 3) + assert_array_equal(values, [1, 8, 6, 4]) + + # Test exception thrown + values = np.array(['a', 1], dtype=object) + assert_raises(TypeError, np.add.at, values, [0, 1], 1) + assert_array_equal(values, np.array(['a', 1], dtype=object)) + + # Test multiple output ufuncs raise error, gh-5665 + assert_raises(ValueError, np.modf.at, np.arange(10), [1]) + + def test_reduce_arguments(self): + f = np.add.reduce + d = np.ones((5,2), dtype=int) + o = np.ones((2,), dtype=d.dtype) + r = o * 5 + assert_equal(f(d), r) + # a, axis=0, dtype=None, out=None, keepdims=False + assert_equal(f(d, axis=0), r) + assert_equal(f(d, 0), r) + assert_equal(f(d, 0, dtype=None), r) + assert_equal(f(d, 0, dtype='i'), r) + assert_equal(f(d, 0, 'i'), r) + assert_equal(f(d, 0, None), r) + assert_equal(f(d, 0, None, out=None), r) + assert_equal(f(d, 0, None, out=o), r) + assert_equal(f(d, 0, None, o), r) + assert_equal(f(d, 0, None, None), r) + assert_equal(f(d, 0, None, None, keepdims=False), r) + assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) + assert_equal(f(d, 0, None, None, False, 0), r) + assert_equal(f(d, 0, None, None, False, initial=0), r) + assert_equal(f(d, 0, None, None, False, 0, True), r) + assert_equal(f(d, 0, None, None, False, 0, where=True), r) + # multiple keywords + assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0, + where=True), r) + + # too little + assert_raises(TypeError, f) + # too much + assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1) + # invalid axis + assert_raises(TypeError, f, d, "invalid") + assert_raises(TypeError, f, d, axis="invalid") + assert_raises(TypeError, f, d, axis="invalid", dtype=None, + keepdims=True) + # invalid dtype + assert_raises(TypeError, f, d, 0, "invalid") + assert_raises(TypeError, f, d, dtype="invalid") + assert_raises(TypeError, f, d, dtype="invalid", out=None) + # invalid out + assert_raises(TypeError, f, d, 0, None, "invalid") + assert_raises(TypeError, f, d, out="invalid") + assert_raises(TypeError, f, d, out="invalid", dtype=None) + # keepdims boolean, no invalid value + # assert_raises(TypeError, f, d, 0, None, None, "invalid") + # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) + # invalid mix + assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", + out=None) + + # invalid keyword + assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) + assert_raises(TypeError, f, d, invalid=0) + assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", + out=None) + assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, + out=None, invalid=0) + + def test_structured_equal(self): + # https://github.com/numpy/numpy/issues/4855 + + class MyA(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*(input.view(np.ndarray) + for input in inputs), **kwargs) + a = np.arange(12.).reshape(4,3) + ra = a.view(dtype=('f8,f8,f8')).squeeze() + mra = ra.view(MyA) + + target = np.array([ True, False, False, False], dtype=bool) + assert_equal(np.all(target == (mra == ra[0])), True) + + def test_scalar_equal(self): + # Scalar comparisons should always work, without deprecation warnings. + # even when the ufunc fails. + a = np.array(0.) + b = np.array('a') + assert_(a != b) + assert_(b != a) + assert_(not (a == b)) + assert_(not (b == a)) + + def test_NotImplemented_not_returned(self): + # See gh-5964 and gh-2091. Some of these functions are not operator + # related and were fixed for other reasons in the past. + binary_funcs = [ + np.power, np.add, np.subtract, np.multiply, np.divide, + np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, + np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, + np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, + np.logical_and, np.logical_or, np.logical_xor, np.maximum, + np.minimum, np.mod, + np.greater, np.greater_equal, np.less, np.less_equal, + np.equal, np.not_equal] + + a = np.array('1') + b = 1 + c = np.array([1., 2.]) + for f in binary_funcs: + assert_raises(TypeError, f, a, b) + assert_raises(TypeError, f, c, a) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or]) # logical_xor object loop is bad + @pytest.mark.parametrize("signature", + [(None, None, object), (object, None, None), + (None, object, None)]) + def test_logical_ufuncs_object_signatures(self, ufunc, signature): + a = np.array([True, None, False], dtype=object) + res = ufunc(a, a, signature=signature) + assert res.dtype == object + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + @pytest.mark.parametrize("signature", + [(bool, None, object), (object, None, bool), + (None, object, bool)]) + def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature): + # Most mixed signatures fail (except those with bool out, e.g. `OO->?`) + a = np.array([True, None, False]) + with pytest.raises(TypeError): + ufunc(a, a, signature=signature) + + def test_reduce_noncontig_output(self): + # Check that reduction deals with non-contiguous output arrays + # appropriately. + # + # gh-8036 + + x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8) + x = x[4:6,1:11:6,1:5].transpose(1, 2, 0) + y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4) + y = y_base[::2,:] + + y_base_copy = y_base.copy() + + r0 = np.add.reduce(x, out=y.copy(), axis=2) + r1 = np.add.reduce(x, out=y, axis=2) + + # The results should match, and y_base shouldn't get clobbered + assert_equal(r0, r1) + assert_equal(y_base[1,:], y_base_copy[1,:]) + assert_equal(y_base[3,:], y_base_copy[3,:]) + + @pytest.mark.parametrize('out_shape', + [(), (1,), (3,), (1, 1), (1, 3), (4, 3)]) + @pytest.mark.parametrize('keepdims', [True, False]) + @pytest.mark.parametrize('f_reduce', [np.add.reduce, np.minimum.reduce]) + def test_reduce_wrong_dimension_output(self, f_reduce, keepdims, out_shape): + # Test that we're not incorrectly broadcasting dimensions. + # See gh-15144 (failed for np.add.reduce previously). + a = np.arange(12.).reshape(4, 3) + out = np.empty(out_shape, a.dtype) + + correct_out = f_reduce(a, axis=0, keepdims=keepdims) + if out_shape != correct_out.shape: + with assert_raises(ValueError): + f_reduce(a, axis=0, out=out, keepdims=keepdims) + else: + check = f_reduce(a, axis=0, out=out, keepdims=keepdims) + assert_(check is out) + assert_array_equal(check, correct_out) + + def test_reduce_output_does_not_broadcast_input(self): + # Test that the output shape cannot broadcast an input dimension + # (it never can add dimensions, but it might expand an existing one) + a = np.ones((1, 10)) + out_correct = (np.empty((1, 1))) + out_incorrect = np.empty((3, 1)) + np.add.reduce(a, axis=-1, out=out_correct, keepdims=True) + np.add.reduce(a, axis=-1, out=out_correct[:, 0], keepdims=False) + with assert_raises(ValueError): + np.add.reduce(a, axis=-1, out=out_incorrect, keepdims=True) + with assert_raises(ValueError): + np.add.reduce(a, axis=-1, out=out_incorrect[:, 0], keepdims=False) + + def test_reduce_output_subclass_ok(self): + class MyArr(np.ndarray): + pass + + out = np.empty(()) + np.add.reduce(np.ones(5), out=out) # no subclass, all fine + out = out.view(MyArr) + assert np.add.reduce(np.ones(5), out=out) is out + assert type(np.add.reduce(out)) is MyArr + + def test_no_doc_string(self): + # gh-9337 + assert_('\n' not in umt.inner1d_no_doc.__doc__) + + def test_invalid_args(self): + # gh-7961 + exc = pytest.raises(TypeError, np.sqrt, None) + # minimally check the exception text + assert exc.match('loop of ufunc does not support') + + @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + def test_nat_is_not_finite(self, nat): + try: + assert not np.isfinite(nat) + except TypeError: + pass # ok, just not implemented + + @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + def test_nat_is_nan(self, nat): + try: + assert np.isnan(nat) + except TypeError: + pass # ok, just not implemented + + @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + def test_nat_is_not_inf(self, nat): + try: + assert not np.isinf(nat) + except TypeError: + pass # ok, just not implemented + + +@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) + if isinstance(getattr(np, x), np.ufunc)]) +def test_ufunc_types(ufunc): + ''' + Check all ufuncs that the correct type is returned. Avoid + object and boolean types since many operations are not defined for + for them. + + Choose the shape so even dot and matmul will succeed + ''' + for typ in ufunc.types: + # types is a list of strings like ii->i + if 'O' in typ or '?' in typ: + continue + inp, out = typ.split('->') + args = [np.ones((3, 3), t) for t in inp] + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res = ufunc(*args) + if isinstance(res, tuple): + outs = tuple(out) + assert len(res) == len(outs) + for r, t in zip(res, outs): + assert r.dtype == np.dtype(t) + else: + assert res.dtype == np.dtype(out) + +@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) + if isinstance(getattr(np, x), np.ufunc)]) +def test_ufunc_noncontiguous(ufunc): + ''' + Check that contiguous and non-contiguous calls to ufuncs + have the same results for values in range(9) + ''' + for typ in ufunc.types: + # types is a list of strings like ii->i + if any(set('O?mM') & set(typ)): + # bool, object, datetime are too irregular for this simple test + continue + inp, out = typ.split('->') + args_c = [np.empty(6, t) for t in inp] + args_n = [np.empty(18, t)[::3] for t in inp] + for a in args_c: + a.flat = range(1,7) + for a in args_n: + a.flat = range(1,7) + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res_c = ufunc(*args_c) + res_n = ufunc(*args_n) + if len(out) == 1: + res_c = (res_c,) + res_n = (res_n,) + for c_ar, n_ar in zip(res_c, res_n): + dt = c_ar.dtype + if np.issubdtype(dt, np.floating): + # for floating point results allow a small fuss in comparisons + # since different algorithms (libm vs. intrinsics) can be used + # for different input strides + res_eps = np.finfo(dt).eps + tol = 2*res_eps + assert_allclose(res_c, res_n, atol=tol, rtol=tol) + else: + assert_equal(c_ar, n_ar) + + +@pytest.mark.parametrize('ufunc', [np.sign, np.equal]) +def test_ufunc_warn_with_nan(ufunc): + # issue gh-15127 + # test that calling certain ufuncs with a non-standard `nan` value does not + # emit a warning + # `b` holds a 64 bit signaling nan: the most significant bit of the + # significand is zero. + b = np.array([0x7ff0000000000001], 'i8').view('f8') + assert np.isnan(b) + if ufunc.nin == 1: + ufunc(b) + elif ufunc.nin == 2: + ufunc(b, b.copy()) + else: + raise ValueError('ufunc with more than 2 inputs') + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_ufunc_casterrors(): + # Tests that casting errors are correctly reported and buffers are + # cleared. + # The following array can be added to itself as an object array, but + # the result cannot be cast to an integer output: + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * int(np.BUFSIZE * 1.5) + + ["string"] + + [value] * int(1.5 * np.BUFSIZE), dtype=object) + out = np.ones(len(arr), dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError): + # Output casting failure: + np.add(arr, arr, out=out, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + with pytest.raises(ValueError): + # Input casting failure: + np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.parametrize("offset", + [0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)]) +def test_reduce_casterrors(offset): + # Test reporting of casting errors in reductions, we test various + # offsets to where the casting error will occur, since these may occur + # at different places during the reduction procedure. For example + # the first item may be special. + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * offset + + ["string"] + + [value] * int(1.5 * np.BUFSIZE), dtype=object) + out = np.array(-1, dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError): + # This is an unsafe cast, but we currently always allow that: + np.add.reduce(arr, dtype=np.intp, out=out) + assert count == sys.getrefcount(value) + # If an error occurred during casting, the operation is done at most until + # the error occurs (the result of which would be `value * offset`) and -1 + # if the error happened immediately. + # This does not define behaviour, the output is invalid and thus undefined + assert out[()] < value * offset diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_umath.py b/MLPY/Lib/site-packages/numpy/core/tests/test_umath.py new file mode 100644 index 0000000000000000000000000000000000000000..bc1a3fd62567163f1e1dcce74ba06521bc7843a1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_umath.py @@ -0,0 +1,3643 @@ +import platform +import warnings +import fnmatch +import itertools +import pytest +import sys +import os +from fractions import Fraction +from functools import reduce + +import numpy.core.umath as ncu +from numpy.core import _umath_tests as ncu_tests +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex, + assert_array_equal, assert_almost_equal, assert_array_almost_equal, + assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, + _gen_alignment_data, assert_array_almost_equal_nulp, assert_warns + ) + +def get_glibc_version(): + try: + ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1] + except Exception as inst: + ver = '0.0' + + return ver + + +glibcver = get_glibc_version() +glibc_newerthan_2_17 = pytest.mark.xfail( + glibcver != '0.0' and glibcver < '2.17', + reason="Older glibc versions may not raise appropriate FP exceptions") + +def on_powerpc(): + """ True if we are running on a Power PC platform.""" + return platform.processor() == 'powerpc' or \ + platform.machine().startswith('ppc') + + +def bad_arcsinh(): + """The blocklisted trig functions are not accurate on aarch64 for + complex256. Rather than dig through the actual problem skip the + test. This should be fixed when we can move past glibc2.17 + which is the version in manylinux2014 + """ + x = 1.78e-10 + v1 = np.arcsinh(np.float128(x)) + v2 = np.arcsinh(np.complex256(x)).real + # The eps for float128 is 1-e33, so this is way bigger + return abs((v1 / v2) - 1.0) > 1e-23 + +if platform.machine() == 'aarch64' and bad_arcsinh(): + skip_longcomplex_msg = ('Trig functions of np.longcomplex values known to be ' + 'inaccurate on aarch64 for some compilation ' + 'configurations, should be fixed by building on a ' + 'platform using glibc>2.17') +else: + skip_longcomplex_msg = '' + + +class _FilterInvalids: + def setup(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown(self): + np.seterr(**self.olderr) + + +class TestConstants: + def test_pi(self): + assert_allclose(ncu.pi, 3.141592653589793, 1e-15) + + def test_e(self): + assert_allclose(ncu.e, 2.718281828459045, 1e-15) + + def test_euler_gamma(self): + assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) + + +class TestOut: + def test_out_subok(self): + for subok in (True, False): + a = np.array(0.5) + o = np.empty(()) + + r = np.add(a, 2, o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=(o,), subok=subok) + assert_(r is o) + + d = np.array(5.7) + o1 = np.empty(()) + o2 = np.empty((), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, None, o2, subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, o1, o2, subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, out=(o1, o2), subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + assert_raises(TypeError, np.add, a, 2, o, o, subok=subok) + assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok) + assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) + assert_raises(TypeError, np.add, a, 2, [], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) + o.flags.writeable = False + assert_raises(ValueError, np.add, a, 2, o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok) + + def test_out_wrap_subok(self): + class ArrayWrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls, arr): + return np.asarray(arr).view(cls).copy() + + def __array_wrap__(self, arr, context): + return arr.view(type(self)) + + for subok in (True, False): + a = ArrayWrap([0.5]) + + r = np.add(a, 2, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=(None,), subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + d = ArrayWrap([5.7]) + o1 = np.empty((1,)) + o2 = np.empty((1,), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, None, o2, subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + +class TestComparisons: + def test_ignore_object_identity_in_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __eq__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.equal(a, a), [False]) + + def test_ignore_object_identity_in_not_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.not_equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __ne__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.not_equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.not_equal(a, a), [True]) + + +class TestAdd: + def test_reduce_alignment(self): + # gh-9876 + # make sure arrays with weird strides work with the optimizations in + # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a + # 4 byte offset, even though its itemsize is 8. + a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)]) + a['a'] = -1 + assert_equal(a['b'].sum(), 0) + + +class TestDivision: + def test_division_int(self): + # int division should follow Python + x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) + if 5 / 10 == 0.5: + assert_equal(x / 100, [0.05, 0.1, 0.9, 1, + -0.05, -0.1, -0.9, -1, -1.2]) + else: + assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) + + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + np.sctypes['int'] + np.sctypes['uint'], ( + ( + # dividend + "np.arange(fo.max-lsize, fo.max, dtype=dtype)," + # divisors + "np.arange(lsize, dtype=dtype)," + # scalar divisors + "range(15)" + ), + ( + # dividend + "np.arange(fo.min, fo.min+lsize, dtype=dtype)," + # divisors + "np.arange(lsize//-2, lsize//2, dtype=dtype)," + # scalar divisors + "range(fo.min, fo.min + 15)" + ), ( + # dividend + "np.arange(fo.max-lsize, fo.max, dtype=dtype)," + # divisors + "np.arange(lsize, dtype=dtype)," + # scalar divisors + "[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]" + ) + ) + )) + def test_division_int_boundary(self, dtype, ex_val): + fo = np.iinfo(dtype) + neg = -1 if fo.min < 0 else 1 + # Large enough to test SIMD loops and remaind elements + lsize = 512 + 7 + a, b, divisors = eval(ex_val) + a_lst, b_lst = a.tolist(), b.tolist() + + c_div = lambda n, d: ( + 0 if d == 0 or (n and n == fo.min and d == -1) else n//d + ) + with np.errstate(divide='ignore'): + ac = a.copy() + ac //= b + div_ab = a // b + div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)] + + msg = "Integer arrays floor division check (//)" + assert all(div_ab == div_lst), msg + msg_eq = "Integer arrays floor division check (//=)" + assert all(ac == div_lst), msg_eq + + for divisor in divisors: + ac = a.copy() + with np.errstate(divide='ignore'): + div_a = a // divisor + ac //= divisor + div_lst = [c_div(i, divisor) for i in a_lst] + + assert all(div_a == div_lst), msg + assert all(ac == div_lst), msg_eq + + with np.errstate(divide='raise'): + if 0 in b or (fo.min and -1 in b and fo.min in a): + # Verify overflow case + with pytest.raises(FloatingPointError): + a // b + else: + a // b + if fo.min and fo.min in a: + with pytest.raises(FloatingPointError): + a // -1 + elif fo.min: + a // -1 + with pytest.raises(FloatingPointError): + a // 0 + with pytest.raises(FloatingPointError): + ac = a.copy() + ac //= 0 + + np.array([], dtype=dtype) // 0 + + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + np.sctypes['int'] + np.sctypes['uint'], ( + "np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)", + "np.array([fo.min, 1, -2, 1, 1, 2, -3], dtype=dtype)", + "np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)", + "np.arange(fo.max-(100*7), fo.max, 7, dtype=dtype)", + ) + )) + def test_division_int_reduce(self, dtype, ex_val): + fo = np.iinfo(dtype) + a = eval(ex_val) + lst = a.tolist() + c_div = lambda n, d: ( + 0 if d == 0 or (n and n == fo.min and d == -1) else n//d + ) + + with np.errstate(divide='ignore'): + div_a = np.floor_divide.reduce(a) + div_lst = reduce(c_div, lst) + msg = "Reduce floor integer division check" + assert div_a == div_lst, msg + + with np.errstate(divide='raise'): + with pytest.raises(FloatingPointError): + np.floor_divide.reduce(np.arange(-100, 100, dtype=dtype)) + if fo.min: + with pytest.raises(FloatingPointError): + np.floor_divide.reduce( + np.array([fo.min, 1, -1], dtype=dtype) + ) + + @pytest.mark.parametrize( + "dividend,divisor,quotient", + [(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12), + (np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12), + (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12), + (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12), + (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1), + (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0), + (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')), + (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')), + (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')), + (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), + (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')), + (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')), + (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')), + ]) + def test_division_int_timedelta(self, dividend, divisor, quotient): + # If either divisor is 0 or quotient is Nat, check for division by 0 + if divisor and (isinstance(quotient, int) or not np.isnat(quotient)): + msg = "Timedelta floor division check" + assert dividend // divisor == quotient, msg + + # Test for arrays as well + msg = "Timedelta arrays floor division check" + dividend_array = np.array([dividend]*5) + quotient_array = np.array([quotient]*5) + assert all(dividend_array // divisor == quotient_array), msg + else: + with np.errstate(divide='raise', invalid='raise'): + with pytest.raises(FloatingPointError): + dividend // divisor + + def test_division_complex(self): + # check that implementation is correct + msg = "Complex division implementation check" + x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) + assert_almost_equal(x**2/x, x, err_msg=msg) + # check overflow, underflow + msg = "Complex division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = x**2/x + assert_almost_equal(y/x, [1, 1], err_msg=msg) + + def test_zero_division_complex(self): + with np.errstate(invalid="ignore", divide="ignore"): + x = np.array([0.0], dtype=np.complex128) + y = 1.0/x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.nan)/x + assert_(np.isinf(y)[0]) + y = complex(np.nan, np.inf)/x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.inf)/x + assert_(np.isinf(y)[0]) + y = 0.0/x + assert_(np.isnan(y)[0]) + + def test_floor_division_complex(self): + # check that implementation is correct + msg = "Complex floor division implementation check" + x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) + y = np.array([0., -1., 0., 0.], dtype=np.complex128) + assert_equal(np.floor_divide(x**2, x), y, err_msg=msg) + # check overflow, underflow + msg = "Complex floor division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = np.floor_divide(x**2, x) + assert_equal(y, [1.e+110, 0], err_msg=msg) + + def test_floor_division_signed_zero(self): + # Check that the sign bit is correctly set when dividing positive and + # negative zero by one. + x = np.zeros(10) + assert_equal(np.signbit(x//1), 0) + assert_equal(np.signbit((-x)//1), 1) + + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_errors(self, dtype): + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + # divide by zero error check + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.floor_divide, fone, fzer) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.floor_divide, fnan, fone) + assert_raises(FloatingPointError, np.floor_divide, fone, fnan) + assert_raises(FloatingPointError, np.floor_divide, fnan, fzer) + + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_corner_cases(self, dtype): + # test corner cases like 1.0//0.0 for errors and return vals + x = np.zeros(10, dtype=dtype) + y = np.ones(10, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + div = np.floor_divide(fnan, fone) + assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + div = np.floor_divide(fone, fnan) + assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + div = np.floor_divide(fnan, fzer) + assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + # verify 1.0//0.0 computations return inf + with np.errstate(divide='ignore'): + z = np.floor_divide(y, x) + assert_(np.isinf(z).all()) + +def floor_divide_and_remainder(x, y): + return (np.floor_divide(x, y), np.remainder(x, y)) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestRemainder: + + def test_remainder_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*71, dtype=dt1) + b = np.array(sg2*19, dtype=dt2) + div, rem = op(a, b) + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_remainder_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = list(divmod(*t) for t in arg) + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floor_divide_and_remainder, np.divmod]: + for dt in np.typecodes['Float']: + msg = 'op: %s, dtype: %s' % (op.__name__, dt) + fa = a.astype(dt) + fb = b.astype(dt) + div, rem = op(fa, fb) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_remainder_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1*78*6e-8, dtype=dt1) + b = np.array(sg2*6e-8, dtype=dt2) + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div*b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_float_divmod_errors(self, dtype): + # Check valid errors raised for divmod and remainder + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + # since divmod is combination of both remainder and divide + # ops it will set both dividebyzero and invalid flags + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fzero, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, finf) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, fzero) + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, finf, fzero) + + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + @pytest.mark.parametrize('fn', [np.fmod, np.remainder]) + def test_float_remainder_errors(self, dtype, fn): + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, fn, fone, fzero) + assert_raises(FloatingPointError, fn, fnan, fzero) + assert_raises(FloatingPointError, fn, fone, fnan) + assert_raises(FloatingPointError, fn, fnan, fone) + + def test_float_remainder_overflow(self): + a = np.finfo(np.float64).tiny + with np.errstate(over='ignore', invalid='ignore'): + div, mod = np.divmod(4, a) + np.isinf(div) + assert_(mod == 0) + with np.errstate(over='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + with np.errstate(invalid='raise', over='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + + def test_float_divmod_corner_cases(self): + # check nan cases + for dt in np.typecodes['Float']: + fnan = np.array(np.nan, dtype=dt) + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in divmod") + sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + div, rem = np.divmod(fone, fzer) + assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem) + assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + div, rem = np.divmod(fzer, fzer) + assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem) + div, rem = np.divmod(finf, finf) + assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem) + assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + div, rem = np.divmod(finf, fzer) + assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem) + assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem) + div, rem = np.divmod(fnan, fone) + assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) + assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + div, rem = np.divmod(fone, fnan) + assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) + assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + div, rem = np.divmod(fnan, fzer) + assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem) + assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem) + + def test_float_remainder_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = np.remainder(a, b) + assert_(rem <= b, 'dt: %s' % dt) + rem = np.remainder(-a, -b) + assert_(rem >= -b, 'dt: %s' % dt) + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + sup.filter(RuntimeWarning, "invalid value encountered in fmod") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = np.remainder(fone, fzer) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + # MSVC 2008 returns NaN here, so disable the check. + #rem = np.remainder(fone, finf) + #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) + rem = np.remainder(finf, fone) + fmod = np.fmod(finf, fone) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + rem = np.remainder(finf, finf) + fmod = np.fmod(finf, fone) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + rem = np.remainder(finf, fzer) + fmod = np.fmod(finf, fzer) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + rem = np.remainder(fone, fnan) + fmod = np.fmod(fone, fnan) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod)) + rem = np.remainder(fnan, fzer) + fmod = np.fmod(fnan, fzer) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem)) + rem = np.remainder(fnan, fone) + fmod = np.fmod(fnan, fone) + assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) + assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem)) + + +class TestCbrt: + def test_cbrt_scalar(self): + assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) + + def test_cbrt(self): + x = np.array([1., 2., -3., np.inf, -np.inf]) + assert_almost_equal(np.cbrt(x**3), x) + + assert_(np.isnan(np.cbrt(np.nan))) + assert_equal(np.cbrt(np.inf), np.inf) + assert_equal(np.cbrt(-np.inf), -np.inf) + + +class TestPower: + def test_power_float(self): + x = np.array([1., 2., 3.]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_equal(x**2, [1., 4., 9.]) + y = x.copy() + y **= 2 + assert_equal(y, [1., 4., 9.]) + assert_almost_equal(x**(-1), [1., 0.5, 1./3]) + assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) + + for out, inp, msg in _gen_alignment_data(dtype=np.float32, + type='unary', + max_size=11): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + for out, inp, msg in _gen_alignment_data(dtype=np.float64, + type='unary', + max_size=7): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + def test_power_complex(self): + x = np.array([1+2j, 2+3j, 3+4j]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j]) + assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3]) + assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4]) + assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)]) + assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2]) + assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197, + (-117-44j)/15625]) + assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j), + ncu.sqrt(3+4j)]) + norm = 1./((x**14)[0]) + assert_almost_equal(x**14 * norm, + [i * norm for i in [-76443+16124j, 23161315+58317492j, + 5583548873 + 2465133864j]]) + + # Ticket #836 + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + for z in [complex(0, np.inf), complex(1, np.inf)]: + z = np.array([z], dtype=np.complex_) + with np.errstate(invalid="ignore"): + assert_complex_equal(z**1, z) + assert_complex_equal(z**2, z*z) + assert_complex_equal(z**3, z*z*z) + + def test_power_zero(self): + # ticket #1271 + zero = np.array([0j]) + one = np.array([1+0j]) + cnan = np.array([complex(np.nan, np.nan)]) + # FIXME cinf not tested. + #cinf = np.array([complex(np.inf, 0)]) + + def assert_complex_equal(x, y): + x, y = np.asarray(x), np.asarray(y) + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # positive powers + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, p), zero) + + # zero power + assert_complex_equal(np.power(zero, 0), one) + with np.errstate(invalid="ignore"): + assert_complex_equal(np.power(zero, 0+1j), cnan) + + # negative power + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, -p), cnan) + assert_complex_equal(np.power(zero, -1+0.2j), cnan) + + def test_fast_power(self): + x = np.array([1, 2, 3], np.int16) + res = x**2.0 + assert_((x**2.00001).dtype is res.dtype) + assert_array_equal(res, [1, 4, 9]) + # check the inplace operation on the casted copy doesn't mess with x + assert_(not np.may_share_memory(res, x)) + assert_array_equal(x, [1, 2, 3]) + + # Check that the fast path ignores 1-element not 0-d arrays + res = x ** np.array([[[2]]]) + assert_equal(res.shape, (1, 1, 3)) + + def test_integer_power(self): + a = np.array([15, 15], 'i8') + b = np.power(a, a) + assert_equal(b, [437893890380859375, 437893890380859375]) + + def test_integer_power_with_integer_zero_exponent(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + arr = np.arange(-10, 10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + dtypes = np.typecodes['UnsignedInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + def test_integer_power_of_1(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(1, arr), np.ones_like(arr)) + + def test_integer_power_of_zero(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(1, 10, dtype=dt) + assert_equal(np.power(0, arr), np.zeros_like(arr)) + + def test_integer_to_negative_power(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + a = np.array([0, 1, 2, 3], dtype=dt) + b = np.array([0, 1, 2, -3], dtype=dt) + one = np.array(1, dtype=dt) + minusone = np.array(-1, dtype=dt) + assert_raises(ValueError, np.power, a, b) + assert_raises(ValueError, np.power, a, minusone) + assert_raises(ValueError, np.power, one, b) + assert_raises(ValueError, np.power, one, minusone) + + +class TestFloat_power: + def test_type_conversion(self): + arg_type = '?bhilBHILefdgFDG' + res_type = 'ddddddddddddgDDG' + for dtin, dtout in zip(arg_type, res_type): + msg = "dtin: %s, dtout: %s" % (dtin, dtout) + arg = np.ones(1, dtype=dtin) + res = np.float_power(arg, arg) + assert_(res.dtype.name == np.dtype(dtout).name, msg) + + +class TestLog2: + def test_log2_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.log2(xf), yf) + + def test_log2_ints(self): + # a good log2 implementation should provide this, + # might fail on OS with bad libm + for i in range(1, 65): + v = np.log2(2.**i) + assert_equal(v, float(i), err_msg='at exponent %d' % i) + + def test_log2_special(self): + assert_equal(np.log2(1.), 0.) + assert_equal(np.log2(np.inf), np.inf) + assert_(np.isnan(np.log2(np.nan))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.log2(-1.))) + assert_(np.isnan(np.log2(-np.inf))) + assert_equal(np.log2(0.), -np.inf) + assert_(w[0].category is RuntimeWarning) + assert_(w[1].category is RuntimeWarning) + assert_(w[2].category is RuntimeWarning) + + +class TestExp2: + def test_exp2_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.exp2(yf), xf) + + +class TestLogAddExp2(_FilterInvalids): + # Need test for intermediate precisions + def test_logaddexp2_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log2(np.array(x, dtype=dt)) + yf = np.log2(np.array(y, dtype=dt)) + zf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_) + + def test_logaddexp2_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, 0))) + assert_(np.isnan(np.logaddexp2(0, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp2.identity, -np.inf) + assert_equal(np.logaddexp2.reduce([]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0) + + +class TestLog: + def test_log_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.log(xf), yf) + + # test aliasing(issue #17761) + x = np.array([2, 0.937500, 3, 0.947500, 1.054697]) + xf = np.log(x) + assert_almost_equal(np.log(x, out=x), xf) + + def test_log_strides(self): + np.random.seed(42) + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + sizes = np.arange(2,100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii)) + x_special = x_f64.copy() + x_special[3:-1:4] = 1.0 + y_true = np.log(x_f64) + y_special = np.log(x_special) + for jj in strides: + assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + +class TestExp: + def test_exp_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.exp(yf), xf) + + def test_exp_strides(self): + np.random.seed(42) + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + sizes = np.arange(2,100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii)) + y_true = np.exp(x_f64) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2) + +class TestSpecialFloats: + def test_exp_values(self): + x = [np.nan, np.nan, np.inf, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.exp(yf), xf) + + # Older version of glibc may not raise the correct FP exceptions + # See: https://github.com/numpy/numpy/issues/19192 + @glibc_newerthan_2_17 + def test_exp_exceptions(self): + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.exp, np.float32(100.)) + assert_raises(FloatingPointError, np.exp, np.float32(1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(800.)) + assert_raises(FloatingPointError, np.exp, np.float64(1E19)) + + with np.errstate(under='raise'): + assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) + + def test_log_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -1.0] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.log(yf), xf) + + with np.errstate(divide='raise'): + assert_raises(FloatingPointError, np.log, np.float32(0.)) + + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.log, np.float32(-np.inf)) + assert_raises(FloatingPointError, np.log, np.float32(-1.0)) + + # See https://github.com/numpy/numpy/issues/18005 + with assert_no_warnings(): + a = np.array(1e9, dtype='float32') + np.log(a) + + def test_sincos_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.nan, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.sin(yf), xf) + assert_equal(np.cos(yf), xf) + + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.sin, np.float32(-np.inf)) + assert_raises(FloatingPointError, np.sin, np.float32(np.inf)) + assert_raises(FloatingPointError, np.cos, np.float32(-np.inf)) + assert_raises(FloatingPointError, np.cos, np.float32(np.inf)) + + def test_sqrt_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.sqrt(yf), xf) + + #with np.errstate(invalid='raise'): + # for dt in ['f', 'd', 'g']: + # assert_raises(FloatingPointError, np.sqrt, np.array(-100., dtype=dt)) + + def test_abs_values(self): + x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.abs(yf), xf) + + def test_square_values(self): + x = [np.nan, np.nan, np.inf, np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf] + with np.errstate(all='ignore'): + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.square(yf), xf) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.square, np.array(1E32, dtype='f')) + assert_raises(FloatingPointError, np.square, np.array(1E200, dtype='d')) + + def test_reciprocal_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.reciprocal(yf), xf) + + with np.errstate(divide='raise'): + for dt in ['f', 'd', 'g']: + assert_raises(FloatingPointError, np.reciprocal, np.array(-0.0, dtype=dt)) + +class TestFPClass: + @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + def test_fpclass(self, stride): + arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d') + arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f') + nan = np.array([True, True, False, False, False, False, False, False, False, False]) + inf = np.array([False, False, True, True, False, False, False, False, False, False]) + sign = np.array([False, True, False, True, True, False, True, False, False, True]) + finite = np.array([False, False, False, False, True, True, True, True, True, True]) + assert_equal(np.isnan(arr_f32[::stride]), nan[::stride]) + assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) + assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) + assert_equal(np.isinf(arr_f64[::stride]), inf[::stride]) + assert_equal(np.signbit(arr_f32[::stride]), sign[::stride]) + assert_equal(np.signbit(arr_f64[::stride]), sign[::stride]) + assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride]) + assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride]) + +class TestLDExp: + @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + def test_ldexp(self, dtype, stride): + mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype) + exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') + out = np.zeros(8, dtype=dtype) + assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride]) + assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride]) + +class TestFRExp: + @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + @pytest.mark.skipif(not sys.platform.startswith('linux'), + reason="np.frexp gives different answers for NAN/INF on windows and linux") + def test_frexp(self, dtype, stride): + arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype) + mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype) + exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') + out_mant = np.ones(8, dtype=dtype) + out_exp = 2*np.ones(8, dtype='i') + mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride])) + assert_equal(mant_true[::stride], mant) + assert_equal(exp_true[::stride], exp) + assert_equal(out_mant[::stride], mant_true[::stride]) + assert_equal(out_exp[::stride], exp_true[::stride]) + +# func : [maxulperror, low, high] +avx_ufuncs = {'sqrt' :[1, 0., 100.], + 'absolute' :[0, -100., 100.], + 'reciprocal' :[1, 1., 100.], + 'square' :[1, -100., 100.], + 'rint' :[0, -100., 100.], + 'floor' :[0, -100., 100.], + 'ceil' :[0, -100., 100.], + 'trunc' :[0, -100., 100.]} + +class TestAVXUfuncs: + def test_avx_based_ufunc(self): + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + np.random.seed(42) + for func, prop in avx_ufuncs.items(): + maxulperr = prop[0] + minval = prop[1] + maxval = prop[2] + # various array sizes to ensure masking in AVX is tested + for size in range(1,32): + myfunc = getattr(np, func) + x_f32 = np.float32(np.random.uniform(low=minval, high=maxval, + size=size)) + x_f64 = np.float64(x_f32) + x_f128 = np.longdouble(x_f32) + y_true128 = myfunc(x_f128) + if maxulperr == 0: + assert_equal(myfunc(x_f32), np.float32(y_true128)) + assert_equal(myfunc(x_f64), np.float64(y_true128)) + else: + assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128), + maxulp=maxulperr) + assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128), + maxulp=maxulperr) + # various strides to test gather instruction + if size > 1: + y_true32 = myfunc(x_f32) + y_true64 = myfunc(x_f64) + for jj in strides: + assert_equal(myfunc(x_f64[::jj]), y_true64[::jj]) + assert_equal(myfunc(x_f32[::jj]), y_true32[::jj]) + +class TestAVXFloat32Transcendental: + def test_exp_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3) + + def test_log_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4) + + def test_sincos_float32(self): + np.random.seed(42) + N = 1000000 + M = np.int_(N/20) + index = np.random.randint(low=0, high=N, size=M) + x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N)) + # test coverage for elements > 117435.992f for which glibc is used + x_f32[index] = np.float32(10E+10*np.random.rand(M)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) + # test aliasing(issue #17761) + tx_f32 = x_f32.copy() + assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2) + + def test_strided_float32(self): + np.random.seed(42) + strides = np.array([-4,-3,-2,-1,1,2,3,4]) + sizes = np.arange(2,100) + for ii in sizes: + x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii)) + x_f32_large = x_f32.copy() + x_f32_large[3:-1:4] = 120000.0 + exp_true = np.exp(x_f32) + log_true = np.log(x_f32) + sin_true = np.sin(x_f32_large) + cos_true = np.cos(x_f32_large) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2) + +class TestLogAddExp(_FilterInvalids): + def test_logaddexp_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log(np.array(x, dtype=dt)) + yf = np.log(np.array(y, dtype=dt)) + zf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_) + + def test_logaddexp_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, 0))) + assert_(np.isnan(np.logaddexp(0, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp.identity, -np.inf) + assert_equal(np.logaddexp.reduce([]), -np.inf) + + +class TestLog1p: + def test_log1p(self): + assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) + assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) + + def test_special(self): + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(ncu.log1p(np.nan), np.nan) + assert_equal(ncu.log1p(np.inf), np.inf) + assert_equal(ncu.log1p(-1.), -np.inf) + assert_equal(ncu.log1p(-2.), np.nan) + assert_equal(ncu.log1p(-np.inf), np.nan) + + +class TestExpm1: + def test_expm1(self): + assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) + assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) + + def test_special(self): + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(0.), 0.) + assert_equal(ncu.expm1(-0.), -0.) + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(-np.inf), -1.) + + def test_complex(self): + x = np.asarray(1e-12) + assert_allclose(x, ncu.expm1(x)) + x = x.astype(np.complex128) + assert_allclose(x, ncu.expm1(x)) + + +class TestHypot: + def test_simple(self): + assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) + assert_almost_equal(ncu.hypot(0, 0), 0) + + def test_reduce(self): + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0) + assert_equal(ncu.hypot.reduce([]), 0.0) + + +def assert_hypot_isnan(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isnan(ncu.hypot(x, y)), + "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y))) + + +def assert_hypot_isinf(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isinf(ncu.hypot(x, y)), + "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) + + +class TestHypotSpecialValues: + def test_nan_outputs(self): + assert_hypot_isnan(np.nan, np.nan) + assert_hypot_isnan(np.nan, 1) + + def test_nan_outputs2(self): + assert_hypot_isinf(np.nan, np.inf) + assert_hypot_isinf(np.inf, np.nan) + assert_hypot_isinf(np.inf, 0) + assert_hypot_isinf(0, np.inf) + assert_hypot_isinf(np.inf, np.inf) + assert_hypot_isinf(np.inf, 23.0) + + def test_no_fpe(self): + assert_no_warnings(ncu.hypot, np.inf, 0) + + +def assert_arctan2_isnan(x, y): + assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_ispinf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_isninf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_ispzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y))) + + +def assert_arctan2_isnzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) + + +class TestArctan2SpecialValues: + def test_one_one(self): + # atan2(1, 1) returns pi/4. + assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) + assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) + + def test_zero_nzero(self): + # atan2(+-0, -0) returns +-pi. + assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi) + assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi) + + def test_zero_pzero(self): + # atan2(+-0, +0) returns +-0. + assert_arctan2_ispzero(np.PZERO, np.PZERO) + assert_arctan2_isnzero(np.NZERO, np.PZERO) + + def test_zero_negative(self): + # atan2(+-0, x) returns +-pi for x < 0. + assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi) + assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi) + + def test_zero_positive(self): + # atan2(+-0, x) returns +-0 for x > 0. + assert_arctan2_ispzero(np.PZERO, 1) + assert_arctan2_isnzero(np.NZERO, 1) + + def test_positive_zero(self): + # atan2(y, +-0) returns +pi/2 for y > 0. + assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi) + + def test_negative_zero(self): + # atan2(y, +-0) returns -pi/2 for y < 0. + assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi) + + def test_any_ninf(self): + # atan2(+-y, -infinity) returns +-pi for finite y > 0. + assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi) + assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) + + def test_any_pinf(self): + # atan2(+-y, +infinity) returns +-0 for finite y > 0. + assert_arctan2_ispzero(1, np.inf) + assert_arctan2_isnzero(-1, np.inf) + + def test_inf_any(self): + # atan2(+-infinity, x) returns +-pi/2 for finite x. + assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) + + def test_inf_ninf(self): + # atan2(+-infinity, -infinity) returns +-3*pi/4. + assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) + + def test_inf_pinf(self): + # atan2(+-infinity, +infinity) returns +-pi/4. + assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) + + def test_nan_any(self): + # atan2(nan, x) returns nan for any x, including inf + assert_arctan2_isnan(np.nan, np.inf) + assert_arctan2_isnan(np.inf, np.nan) + assert_arctan2_isnan(np.nan, np.nan) + + +class TestLdexp: + def _check_ldexp(self, tp): + assert_almost_equal(ncu.ldexp(np.array(2., np.float32), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float64), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), + np.array(3, tp)), 16.) + + def test_ldexp(self): + # The default Python int type should work + assert_almost_equal(ncu.ldexp(2., 3), 16.) + # The following int types should all be accepted + self._check_ldexp(np.int8) + self._check_ldexp(np.int16) + self._check_ldexp(np.int32) + self._check_ldexp('i') + self._check_ldexp('l') + + def test_ldexp_overflow(self): + # silence warning emitted on overflow + with np.errstate(over="ignore"): + imax = np.iinfo(np.dtype('l')).max + imin = np.iinfo(np.dtype('l')).min + assert_equal(ncu.ldexp(2., imax), np.inf) + assert_equal(ncu.ldexp(2., imin), 0) + + +class TestMaximum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.maximum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.maximum.reduce([1, 2j]), 1) + assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.maximum(x, y) == 1.0) + assert_(np.maximum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.maximum(arg1, arg2), arg2) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) + maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0]) + out = np.ones(8) + out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.maximum(arr1,arr2), maxtrue) + assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2]) + assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0])) + assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan])) + assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan])) + assert_equal(out, out_maxtrue) + + +class TestMinimum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.minimum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.minimum.reduce([1, 2j]), 2j) + assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.minimum(x, y) == 1.0) + assert_(np.minimum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.minimum(arg1, arg2), arg1) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) + mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf]) + out = np.ones(8) + out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.minimum(arr1,arr2), mintrue) + assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2]) + assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0])) + assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan])) + assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan])) + assert_equal(out, out_mintrue) + +class TestFmax(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmax.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 9) + assert_equal(func(tmp2), 9) + + def test_reduce_complex(self): + assert_equal(np.fmax.reduce([1, 2j]), 1) + assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmax(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmax(arg1, arg2), out) + + +class TestFmin(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmin.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 1) + assert_equal(func(tmp2), 1) + + def test_reduce_complex(self): + assert_equal(np.fmin.reduce([1, 2j]), 2j) + assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmin(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmin(arg1, arg2), out) + + +class TestBool: + def test_exceptions(self): + a = np.ones(1, dtype=np.bool_) + assert_raises(TypeError, np.negative, a) + assert_raises(TypeError, np.positive, a) + assert_raises(TypeError, np.subtract, a, a) + + def test_truth_table_logical(self): + # 2, 3 and 4 serves as true values + input1 = [0, 0, 3, 2] + input2 = [0, 4, 0, 2] + + typecodes = (np.typecodes['AllFloat'] + + np.typecodes['AllInteger'] + + '?') # boolean + for dtype in map(np.dtype, typecodes): + arg1 = np.asarray(input1, dtype=dtype) + arg2 = np.asarray(input2, dtype=dtype) + + # OR + out = [False, True, True, True] + for func in (np.logical_or, np.maximum): + assert_equal(func(arg1, arg2).astype(bool), out) + # AND + out = [False, False, False, True] + for func in (np.logical_and, np.minimum): + assert_equal(func(arg1, arg2).astype(bool), out) + # XOR + out = [False, True, True, False] + for func in (np.logical_xor, np.not_equal): + assert_equal(func(arg1, arg2).astype(bool), out) + + def test_truth_table_bitwise(self): + arg1 = [False, False, True, True] + arg2 = [False, True, False, True] + + out = [False, True, True, True] + assert_equal(np.bitwise_or(arg1, arg2), out) + + out = [False, False, False, True] + assert_equal(np.bitwise_and(arg1, arg2), out) + + out = [False, True, True, False] + assert_equal(np.bitwise_xor(arg1, arg2), out) + + def test_reduce(self): + none = np.array([0, 0, 0, 0], bool) + some = np.array([1, 0, 1, 1], bool) + every = np.array([1, 1, 1, 1], bool) + empty = np.array([], bool) + + arrs = [none, some, every, empty] + + for arr in arrs: + assert_equal(np.logical_and.reduce(arr), all(arr)) + + for arr in arrs: + assert_equal(np.logical_or.reduce(arr), any(arr)) + + for arr in arrs: + assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1) + + +class TestBitwiseUFuncs: + + bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O'] + + def test_values(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1], dtype=dt) + msg = "dt = '%s'" % dt.char + + assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) + assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg) + + assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg) + + def test_types(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1], dtype=dt) + msg = "dt = '%s'" % dt.char + + assert_(np.bitwise_not(zeros).dtype == dt, msg) + assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg) + + def test_identity(self): + assert_(np.bitwise_or.identity == 0, 'bitwise_or') + assert_(np.bitwise_xor.identity == 0, 'bitwise_xor') + assert_(np.bitwise_and.identity == -1, 'bitwise_and') + + def test_reduction(self): + binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and) + + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1], dtype=dt) + for f in binary_funcs: + msg = "dt: '%s', f: '%s'" % (dt, f) + assert_equal(f.reduce(zeros), zeros, err_msg=msg) + assert_equal(f.reduce(ones), ones, err_msg=msg) + + # Test empty reduction, no object dtype + for dt in self.bitwise_types[:-1]: + # No object array types + empty = np.array([], dtype=dt) + for f in binary_funcs: + msg = "dt: '%s', f: '%s'" % (dt, f) + tgt = np.array(f.identity, dtype=dt) + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + assert_(res.dtype == tgt.dtype, msg) + + # Empty object arrays use the identity. Note that the types may + # differ, the actual type used is determined by the assign_identity + # function and is not the same as the type returned by the identity + # method. + for f in binary_funcs: + msg = "dt: '%s'" % (f,) + empty = np.array([], dtype=object) + tgt = f.identity + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + + # Non-empty object arrays do not use the identity + for f in binary_funcs: + msg = "dt: '%s'" % (f,) + btype = np.array([True], dtype=object) + assert_(type(f.reduce(btype)) is bool, msg) + + +class TestInt: + def test_logical_not(self): + x = np.ones(10, dtype=np.int16) + o = np.ones(10 * 2, dtype=bool) + tgt = o.copy() + tgt[::2] = False + os = o[::2] + assert_array_equal(np.logical_not(x, out=os), False) + assert_array_equal(o, tgt) + + +class TestFloatingPoint: + def test_floating_point(self): + assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) + + +class TestDegrees: + def test_degrees(self): + assert_almost_equal(ncu.degrees(np.pi), 180.0) + assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) + + +class TestRadians: + def test_radians(self): + assert_almost_equal(ncu.radians(180.0), np.pi) + assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) + + +class TestHeavside: + def test_heaviside(self): + x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]]) + expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]]) + expected1 = expectedhalf.copy() + expected1[0, 2] = 1 + + h = ncu.heaviside(x, 0.5) + assert_equal(h, expectedhalf) + + h = ncu.heaviside(x, 1.0) + assert_equal(h, expected1) + + x = x.astype(np.float32) + + h = ncu.heaviside(x, np.float32(0.5)) + assert_equal(h, expectedhalf.astype(np.float32)) + + h = ncu.heaviside(x, np.float32(1.0)) + assert_equal(h, expected1.astype(np.float32)) + + +class TestSign: + def test_sign(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape) + tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + def test_sign_dtype_object(self): + # In reference to github issue #6229 + + foo = np.array([-.1, 0, .1]) + a = np.sign(foo.astype(object)) + b = np.sign(foo) + + assert_array_equal(a, b) + + def test_sign_dtype_nan_object(self): + # In reference to github issue #6229 + def test_nan(): + foo = np.array([np.nan]) + # FIXME: a not used + a = np.sign(foo.astype(object)) + + assert_raises(TypeError, test_nan) + +class TestMinMax: + def test_minmax_blocked(self): + # simd tests on max/min, test all alignments, slow but important + # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) + for dt, sz in [(np.float32, 15), (np.float64, 7)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + for i in range(inp.size): + inp[:] = np.arange(inp.size, dtype=dt) + inp[i] = np.nan + emsg = lambda: '%r\n%s' % (inp, msg) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + assert_(np.isnan(inp.max()), msg=emsg) + assert_(np.isnan(inp.min()), msg=emsg) + + inp[i] = 1e10 + assert_equal(inp.max(), 1e10, err_msg=msg) + inp[i] = -1e10 + assert_equal(inp.min(), -1e10, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(d.max(), d[0]) + assert_equal(d.min(), d[0]) + + def test_reduce_reorder(self): + # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus + # and put it before the call to an intrisic function that causes + # invalid status to be set. Also make sure warnings are not emitted + for n in (2, 4, 8, 16, 32): + for dt in (np.float32, np.float16, np.complex64): + for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): + assert_equal(np.min(r), np.nan) + + def test_minimize_no_warns(self): + a = np.minimum(np.nan, 1) + assert_equal(a, np.nan) + + +class TestAbsoluteNegative: + def test_abs_neg_blocked(self): + # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 5)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + tgt = [ncu.absolute(i) for i in inp] + np.absolute(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + assert_((out >= 0).all()) + + tgt = [-1*(i) for i in inp] + np.negative(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + + for v in [np.nan, -np.inf, np.inf]: + for i in range(inp.size): + d = np.arange(inp.size, dtype=dt) + inp[:] = -d + inp[i] = v + d[i] = -v if v == -np.inf else v + assert_array_equal(np.abs(inp), d, err_msg=msg) + np.abs(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + assert_array_equal(-inp, -1*inp, err_msg=msg) + d = -1 * inp + np.negative(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(np.abs(d), d) + assert_equal(np.negative(d), -d) + np.negative(d, out=d) + np.negative(np.ones_like(d), out=d) + np.abs(d, out=d) + np.abs(np.ones_like(d), out=d) + + +class TestPositive: + def test_valid(self): + valid_dtypes = [int, float, complex, object] + for dtype in valid_dtypes: + x = np.arange(5, dtype=dtype) + result = np.positive(x) + assert_equal(x, result, err_msg=str(dtype)) + + def test_invalid(self): + with assert_raises(TypeError): + np.positive(True) + with assert_raises(TypeError): + np.positive(np.datetime64('2000-01-01')) + with assert_raises(TypeError): + np.positive(np.array(['foo'], dtype=str)) + with assert_raises(TypeError): + np.positive(np.array(['bar'], dtype=object)) + + +class TestSpecialMethods: + def test_wrap(self): + + class with_wrap: + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context): + r = with_wrap() + r.arr = arr + r.context = context + return r + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + func, args, i = x.context + assert_(func is ncu.minimum) + assert_equal(len(args), 2) + assert_equal(args[0], a) + assert_equal(args[1], a) + assert_equal(i, 0) + + def test_wrap_and_prepare_out(self): + # Calling convention for out should not affect how special methods are + # called + + class StoreArrayPrepareWrap(np.ndarray): + _wrap_args = None + _prepare_args = None + def __new__(cls): + return np.zeros(()).view(cls) + def __array_wrap__(self, obj, context): + self._wrap_args = context[1] + return obj + def __array_prepare__(self, obj, context): + self._prepare_args = context[1] + return obj + @property + def args(self): + # We need to ensure these are fetched at the same time, before + # any other ufuncs are called by the assertions + return (self._prepare_args, self._wrap_args) + def __repr__(self): + return "a" # for short test output + + def do_test(f_call, f_expected): + a = StoreArrayPrepareWrap() + f_call(a) + p, w = a.args + expected = f_expected(a) + try: + assert_equal(p, expected) + assert_equal(w, expected) + except AssertionError as e: + # assert_equal produces truly useless error messages + raise AssertionError("\n".join([ + "Bad arguments passed in ufunc call", + " expected: {}".format(expected), + " __array_prepare__ got: {}".format(p), + " __array_wrap__ got: {}".format(w) + ])) + + # method not on the out argument + do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) + + # method on the out argument + do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) + + def test_wrap_with_iterable(self): + # test fix for bug #1026: + + class with_wrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1).view(cls).copy() + + def __array_wrap__(self, arr, context): + return arr.view(type(self)) + + a = with_wrap() + x = ncu.multiply(a, (1, 2, 3)) + assert_(isinstance(x, with_wrap)) + assert_array_equal(x, np.array((1, 2, 3))) + + def test_priority_with_scalar(self): + # test fix for bug #826: + + class A(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1.0, 'float64').view(cls).copy() + + a = A() + x = np.float64(1)*a + assert_(isinstance(x, A)) + assert_array_equal(x, np.array(1)) + + def test_old_wrap(self): + + class with_wrap: + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr): + r = with_wrap() + r.arr = arr + return r + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + + def test_priority(self): + + class A: + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context): + r = type(self)() + r.arr = arr + r.context = context + return r + + class B(A): + __array_priority__ = 20. + + class C(A): + __array_priority__ = 40. + + x = np.zeros(1) + a = A() + b = B() + c = C() + f = ncu.minimum + assert_(type(f(x, x)) is np.ndarray) + assert_(type(f(x, a)) is A) + assert_(type(f(x, b)) is B) + assert_(type(f(x, c)) is C) + assert_(type(f(a, x)) is A) + assert_(type(f(b, x)) is B) + assert_(type(f(c, x)) is C) + + assert_(type(f(a, a)) is A) + assert_(type(f(a, b)) is B) + assert_(type(f(b, a)) is B) + assert_(type(f(b, b)) is B) + assert_(type(f(b, c)) is C) + assert_(type(f(c, b)) is C) + assert_(type(f(c, c)) is C) + + assert_(type(ncu.exp(a) is A)) + assert_(type(ncu.exp(b) is B)) + assert_(type(ncu.exp(c) is C)) + + def test_failing_wrap(self): + + class A: + def __array__(self): + return np.zeros(2) + + def __array_wrap__(self, arr, context): + raise RuntimeError + + a = A() + assert_raises(RuntimeError, ncu.maximum, a, a) + assert_raises(RuntimeError, ncu.maximum.reduce, a) + + def test_failing_out_wrap(self): + + singleton = np.array([1.0]) + + class Ok(np.ndarray): + def __array_wrap__(self, obj): + return singleton + + class Bad(np.ndarray): + def __array_wrap__(self, obj): + raise RuntimeError + + ok = np.empty(1).view(Ok) + bad = np.empty(1).view(Bad) + # double-free (segfault) of "ok" if "bad" raises an exception + for i in range(10): + assert_raises(RuntimeError, ncu.frexp, 1, ok, bad) + + def test_none_wrap(self): + # Tests that issue #8507 is resolved. Previously, this would segfault + + class A: + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context=None): + return None + + a = A() + assert_equal(ncu.maximum(a, a), None) + + def test_default_prepare(self): + + class with_wrap: + __array_priority__ = 10 + + def __array__(self): + return np.zeros(1) + + def __array_wrap__(self, arr, context): + return arr + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x, np.zeros(1)) + assert_equal(type(x), np.ndarray) + + def test_prepare(self): + + class with_prepare(np.ndarray): + __array_priority__ = 10 + + def __array_prepare__(self, arr, context): + # make sure we can return a new + return np.array(arr).view(type=with_prepare) + + a = np.array(1).view(type=with_prepare) + x = np.add(a, a) + assert_equal(x, np.array(2)) + assert_equal(type(x), with_prepare) + + def test_prepare_out(self): + + class with_prepare(np.ndarray): + __array_priority__ = 10 + + def __array_prepare__(self, arr, context): + return np.array(arr).view(type=with_prepare) + + a = np.array([1]).view(type=with_prepare) + x = np.add(a, a, a) + # Returned array is new, because of the strange + # __array_prepare__ above + assert_(not np.shares_memory(x, a)) + assert_equal(x, np.array([2])) + assert_equal(type(x), with_prepare) + + def test_failing_prepare(self): + + class A: + def __array__(self): + return np.zeros(1) + + def __array_prepare__(self, arr, context=None): + raise RuntimeError + + a = A() + assert_raises(RuntimeError, ncu.maximum, a, a) + + def test_array_too_many_args(self): + + class A: + def __array__(self, dtype, context): + return np.zeros(1) + + a = A() + assert_raises_regex(TypeError, '2 required positional', np.sum, a) + + def test_ufunc_override(self): + # check override works even with instance with high priority. + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return self, func, method, inputs, kwargs + + class MyNDArray(np.ndarray): + __array_priority__ = 100 + + a = A() + b = np.array([1]).view(MyNDArray) + res0 = np.multiply(a, b) + res1 = np.multiply(b, b, out=a) + + # self + assert_equal(res0[0], a) + assert_equal(res1[0], a) + assert_equal(res0[1], np.multiply) + assert_equal(res1[1], np.multiply) + assert_equal(res0[2], '__call__') + assert_equal(res1[2], '__call__') + assert_equal(res0[3], (a, b)) + assert_equal(res1[3], (b, b)) + assert_equal(res0[4], {}) + assert_equal(res1[4], {'out': (a,)}) + + def test_ufunc_override_mro(self): + + # Some multi arg functions for testing. + def tres_mul(a, b, c): + return a * b * c + + def quatro_mul(a, b, c, d): + return a * b * c * d + + # Make these into ufuncs. + three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) + four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) + + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "A" + + class ASub(A): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "ASub" + + class B: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "B" + + class C: + def __init__(self): + self.count = 0 + + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + class CSub(C): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + a = A() + a_sub = ASub() + b = B() + c = C() + + # Standard + res = np.multiply(a, a_sub) + assert_equal(res, "ASub") + res = np.multiply(a_sub, b) + assert_equal(res, "ASub") + + # With 1 NotImplemented + res = np.multiply(c, a) + assert_equal(res, "A") + assert_equal(c.count, 1) + # Check our counter works, so we can trust tests below. + res = np.multiply(c, a) + assert_equal(c.count, 2) + + # Both NotImplemented. + c = C() + c_sub = CSub() + assert_raises(TypeError, np.multiply, c, c_sub) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = c_sub.count = 0 + assert_raises(TypeError, np.multiply, c_sub, c) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, c, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, 2, c) + assert_equal(c.count, 1) + + # Ternary testing. + assert_equal(three_mul_ufunc(a, 1, 2), "A") + assert_equal(three_mul_ufunc(1, a, 2), "A") + assert_equal(three_mul_ufunc(1, 2, a), "A") + + assert_equal(three_mul_ufunc(a, a, 6), "A") + assert_equal(three_mul_ufunc(a, 2, a), "A") + assert_equal(three_mul_ufunc(a, 2, b), "A") + assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") + assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") + c.count = 0 + assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") + assert_equal(c.count, 1) + c.count = 0 + assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") + assert_equal(c.count, 0) + + c.count = 0 + assert_equal(three_mul_ufunc(a, b, c), "A") + assert_equal(c.count, 0) + c_sub.count = 0 + assert_equal(three_mul_ufunc(a, b, c_sub), "A") + assert_equal(c_sub.count, 0) + assert_equal(three_mul_ufunc(1, 2, b), "B") + + assert_raises(TypeError, three_mul_ufunc, 1, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) + + # Quaternary testing. + assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, 3), "A") + assert_equal(four_mul_ufunc(1, 1, a, 3), "A") + assert_equal(four_mul_ufunc(1, 1, 2, a), "A") + + assert_equal(four_mul_ufunc(a, b, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, b), "A") + assert_equal(four_mul_ufunc(b, 1, a, 3), "B") + assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") + assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") + + c = C() + c_sub = CSub() + assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + c2 = C() + c.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + assert_equal(c2.count, 0) + c.count = c2.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 0) + assert_equal(c2.count, 1) + + def test_ufunc_override_methods(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + # __call__ + a = A() + with assert_raises(TypeError): + np.multiply.__call__(1, a, foo='bar', answer=42) + res = np.multiply.__call__(1, a, subok='bar', where=42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, a)) + assert_equal(res[4], {'subok': 'bar', 'where': 42}) + + # __call__, wrong args + assert_raises(TypeError, np.multiply, a) + assert_raises(TypeError, np.multiply, a, a, a, a) + assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a') + assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0]) + + # reduce, positional args + res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0'}) + + # reduce, kwargs + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', + keepdims='keep0', initial='init0', + where='where0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0', + 'initial': 'init0', + 'where': 'where0'}) + + # reduce, output equal to None removed, but not other explicit ones, + # even if they are at their default value. + res = np.multiply.reduce(a, 0, None, None, False) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False}) + res = np.multiply.reduce(a, out=None, axis=0, keepdims=True) + assert_equal(res[4], {'axis': 0, 'keepdims': True}) + res = np.multiply.reduce(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + res = np.multiply.reduce(a, 0, None, None, False, 2, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': 2, 'where': True}) + # np._NoValue ignored for initial + res = np.multiply.reduce(a, 0, None, None, False, + np._NoValue, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'where': True}) + # None kept for initial, True for where. + res = np.multiply.reduce(a, 0, None, None, False, None, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': None, 'where': True}) + + # reduce, wrong args + assert_raises(ValueError, np.multiply.reduce, a, out=()) + assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') + + # accumulate, pos args + res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, kwargs + res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, output equal to None removed. + res = np.multiply.accumulate(a, 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1') + assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'}) + res = np.multiply.accumulate(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # accumulate, wrong args + assert_raises(ValueError, np.multiply.accumulate, a, out=()) + assert_raises(ValueError, np.multiply.accumulate, a, + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.accumulate, a, + 'axis0', axis='axis0') + + # reduceat, pos args + res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, kwargs + res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype':'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, output equal to None removed. + res = np.multiply.reduceat(a, [4, 2], 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt') + assert_equal(res[4], {'axis': None, 'dtype': 'dt'}) + res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,)) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # reduceat, wrong args + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, [4, 2], + 'axis0', axis='axis0') + + # outer + res = np.multiply.outer(a, 42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'outer') + assert_equal(res[3], (a, 42)) + assert_equal(res[4], {}) + + # outer, wrong args + assert_raises(TypeError, np.multiply.outer, a) + assert_raises(TypeError, np.multiply.outer, a, a, a, a) + assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a') + + # at + res = np.multiply.at(a, [4, 2], 'b0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'at') + assert_equal(res[3], (a, [4, 2], 'b0')) + + # at, wrong args + assert_raises(TypeError, np.multiply.at, a) + assert_raises(TypeError, np.multiply.at, a, a, a, a) + + def test_ufunc_override_out(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + a = A() + b = B() + res0 = np.multiply(a, b, 'out_arg') + res1 = np.multiply(a, b, out='out_arg') + res2 = np.multiply(2, b, 'out_arg') + res3 = np.multiply(3, b, out='out_arg') + res4 = np.multiply(a, 4, 'out_arg') + res5 = np.multiply(a, 5, out='out_arg') + + assert_equal(res0['out'][0], 'out_arg') + assert_equal(res1['out'][0], 'out_arg') + assert_equal(res2['out'][0], 'out_arg') + assert_equal(res3['out'][0], 'out_arg') + assert_equal(res4['out'][0], 'out_arg') + assert_equal(res5['out'][0], 'out_arg') + + # ufuncs with multiple output modf and frexp. + res6 = np.modf(a, 'out0', 'out1') + res7 = np.frexp(a, 'out0', 'out1') + assert_equal(res6['out'][0], 'out0') + assert_equal(res6['out'][1], 'out1') + assert_equal(res7['out'][0], 'out0') + assert_equal(res7['out'][1], 'out1') + + # While we're at it, check that default output is never passed on. + assert_(np.sin(a, None) == {}) + assert_(np.sin(a, out=None) == {}) + assert_(np.sin(a, out=(None,)) == {}) + assert_(np.modf(a, None) == {}) + assert_(np.modf(a, None, None) == {}) + assert_(np.modf(a, out=(None, None)) == {}) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + np.modf(a, out=None) + + # don't give positional and output argument, or too many arguments. + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, np.multiply, a, b, 'one', out='two') + assert_raises(TypeError, np.multiply, a, b, 'one', 'two') + assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) + assert_raises(TypeError, np.multiply, a, out=()) + assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) + assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') + assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) + assert_raises(ValueError, np.modf, a, out=('one',)) + + def test_ufunc_override_exception(self): + + class A: + def __array_ufunc__(self, *a, **kwargs): + raise ValueError("oops") + + a = A() + assert_raises(ValueError, np.negative, 1, out=a) + assert_raises(ValueError, np.negative, a) + assert_raises(ValueError, np.divide, 1., a) + + def test_ufunc_override_not_implemented(self): + + class A: + def __array_ufunc__(self, *args, **kwargs): + return NotImplemented + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>): 'A'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.negative(A()) + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>, , " + "out=(1,)): 'A', 'object', 'int'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.add(A(), object(), out=1) + + def test_ufunc_override_disabled(self): + + class OptOut: + __array_ufunc__ = None + + opt_out = OptOut() + + # ufuncs always raise + msg = "operand 'OptOut' does not support ufuncs" + with assert_raises_regex(TypeError, msg): + np.add(opt_out, 1) + with assert_raises_regex(TypeError, msg): + np.add(1, opt_out) + with assert_raises_regex(TypeError, msg): + np.negative(opt_out) + + # opt-outs still hold even when other arguments have pathological + # __array_ufunc__ implementations + + class GreedyArray: + def __array_ufunc__(self, *args, **kwargs): + return self + + greedy = GreedyArray() + assert_(np.negative(greedy) is greedy) + with assert_raises_regex(TypeError, msg): + np.add(greedy, opt_out) + with assert_raises_regex(TypeError, msg): + np.add(greedy, 1, out=opt_out) + + def test_gufunc_override(self): + # gufunc are just ufunc instances, but follow a different path, + # so check __array_ufunc__ overrides them properly. + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + inner1d = ncu_tests.inner1d + a = A() + res = inner1d(a, a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (a, a)) + assert_equal(res[4], {}) + + res = inner1d(1, 1, out=a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, 1)) + assert_equal(res[4], {'out': (a,)}) + + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, inner1d, a, out='two') + assert_raises(TypeError, inner1d, a, a, 'one', out='two') + assert_raises(TypeError, inner1d, a, a, 'one', 'two') + assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) + assert_raises(ValueError, inner1d, a, a, out=()) + + def test_ufunc_override_with_super(self): + # NOTE: this class is used in doc/source/user/basics.subclassing.rst + # if you make any changes here, do update it there too. + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs): + args = [] + in_no = [] + for i, input_ in enumerate(inputs): + if isinstance(input_, A): + in_no.append(i) + args.append(input_.view(np.ndarray)) + else: + args.append(input_) + + outputs = out + out_no = [] + if outputs: + out_args = [] + for j, output in enumerate(outputs): + if isinstance(output, A): + out_no.append(j) + out_args.append(output.view(np.ndarray)) + else: + out_args.append(output) + kwargs['out'] = tuple(out_args) + else: + outputs = (None,) * ufunc.nout + + info = {} + if in_no: + info['inputs'] = in_no + if out_no: + info['outputs'] = out_no + + results = super().__array_ufunc__(ufunc, method, + *args, **kwargs) + if results is NotImplemented: + return NotImplemented + + if method == 'at': + if isinstance(inputs[0], A): + inputs[0].info = info + return + + if ufunc.nout == 1: + results = (results,) + + results = tuple((np.asarray(result).view(A) + if output is None else output) + for result, output in zip(results, outputs)) + if results and isinstance(results[0], A): + results[0].info = info + + return results[0] if len(results) == 1 else results + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if any(isinstance(input_, A) for input_ in inputs): + return "A!" + else: + return NotImplemented + + d = np.arange(5.) + # 1 input, 1 output + a = np.arange(5.).view(A) + b = np.sin(a) + check = np.sin(d) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0]}) + b = np.sin(d, out=(a,)) + assert_(np.all(check == b)) + assert_equal(b.info, {'outputs': [0]}) + assert_(b is a) + a = np.arange(5.).view(A) + b = np.sin(a, out=a) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0], 'outputs': [0]}) + + # 1 input, 2 outputs + a = np.arange(5.).view(A) + b1, b2 = np.modf(a) + assert_equal(b1.info, {'inputs': [0]}) + b1, b2 = np.modf(d, out=(None, a)) + assert_(b2 is a) + assert_equal(b1.info, {'outputs': [1]}) + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c1, c2 = np.modf(a, out=(a, b)) + assert_(c1 is a) + assert_(c2 is b) + assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]}) + + # 2 input, 1 output + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c = np.add(a, b, out=a) + assert_(c is a) + assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]}) + # some tests with a non-ndarray subclass + a = np.arange(5.) + b = B() + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_raises(TypeError, np.add, a, b) + a = a.view(A) + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!") + assert_(np.add(a, b) == "A!") + # regression check for gh-9102 -- tests ufunc.reduce implicitly. + d = np.array([[1, 2, 3], [1, 2, 3]]) + a = d.view(A) + c = a.any() + check = d.any() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + c = a.max() + check = d.max() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.array(0).view(A) + c = a.max(out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = a.max(axis=0) + b = np.zeros_like(check).view(A) + c = a.max(axis=0, out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # simple explicit tests of reduce, accumulate, reduceat + check = np.add.reduce(d, axis=1) + c = np.add.reduce(a, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduce(a, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = np.add.accumulate(d, axis=0) + c = np.add.accumulate(a, axis=0) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.accumulate(a, 0, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + indices = [0, 2, 1] + check = np.add.reduceat(d, indices, axis=1) + c = np.add.reduceat(a, indices, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduceat(a, indices, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # and a few tests for at + d = np.array([[1, 2, 3], [1, 2, 3]]) + check = d.copy() + a = d.copy().view(A) + np.add.at(check, ([0, 1], [0, 2]), 1.) + np.add.at(a, ([0, 1], [0, 2]), 1.) + assert_equal(a, check) + assert_(a.info, {'inputs': [0]}) + b = np.array(1.).view(A) + a = d.copy().view(A) + np.add.at(a, ([0, 1], [0, 2]), b) + assert_equal(a, check) + assert_(a.info, {'inputs': [0, 2]}) + + +class TestChoose: + def test_mixed(self): + c = np.array([True, True]) + a = np.array([True, True]) + assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) + + +class TestRationalFunctions: + def test_lcm(self): + self._test_lcm_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_lcm_object(self): + self._test_lcm_inner(np.object_) + + def test_gcd(self): + self._test_gcd_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_gcd_object(self): + self._test_gcd_inner(np.object_) + + def _test_lcm_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.lcm(a, b), [60, 600]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.lcm(a, b), [60]*4) + + # reduce + a = np.array([3, 12, 20], dtype=dtype) + assert_equal(np.lcm.reduce([3, 12, 20]), 60) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20]) + + def _test_gcd_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.gcd(a, b), [4, 40]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.gcd(a, b), [4]*4) + + # reduce + a = np.array([15, 25, 35], dtype=dtype) + assert_equal(np.gcd.reduce(a), 5) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5]) + + def test_lcm_overflow(self): + # verify that we don't overflow when a*b does overflow + big = np.int32(np.iinfo(np.int32).max // 11) + a = 2*big + b = 5*big + assert_equal(np.lcm(a, b), 10*big) + + def test_gcd_overflow(self): + for dtype in (np.int32, np.int64): + # verify that we don't overflow when taking abs(x) + # not relevant for lcm, where the result is unrepresentable anyway + a = dtype(np.iinfo(dtype).min) # negative power of two + q = -(a // 4) + assert_equal(np.gcd(a, q*3), q) + assert_equal(np.gcd(a, -q*3), q) + + def test_decimal(self): + from decimal import Decimal + a = np.array([1, 1, -1, -1]) * Decimal('0.20') + b = np.array([1, -1, 1, -1]) * Decimal('0.12') + + assert_equal(np.gcd(a, b), 4*[Decimal('0.04')]) + assert_equal(np.lcm(a, b), 4*[Decimal('0.60')]) + + def test_float(self): + # not well-defined on float due to rounding errors + assert_raises(TypeError, np.gcd, 0.3, 0.4) + assert_raises(TypeError, np.lcm, 0.3, 0.4) + + def test_builtin_long(self): + # sanity check that array coercion is alright for builtin longs + assert_equal(np.array(2**200).item(), 2**200) + + # expressed as prime factors + a = np.array(2**100 * 3**5) + b = np.array([2**100 * 5**7, 2**50 * 3**10]) + assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) + assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + + assert_equal(np.gcd(2**100, 3**100), 1) + + +class TestRoundingFunctions: + + def test_object_direct(self): + """ test direct implementation of these magic methods """ + class C: + def __floor__(self): + return 1 + def __ceil__(self): + return 2 + def __trunc__(self): + return 3 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [1, 1]) + assert_equal(np.ceil(arr), [2, 2]) + assert_equal(np.trunc(arr), [3, 3]) + + def test_object_indirect(self): + """ test implementations via __float__ """ + class C: + def __float__(self): + return -2.5 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [-3, -3]) + assert_equal(np.ceil(arr), [-2, -2]) + with pytest.raises(TypeError): + np.trunc(arr) # consistent with math.trunc + + def test_fraction(self): + f = Fraction(-4, 3) + assert_equal(np.floor(f), -2) + assert_equal(np.ceil(f), -1) + assert_equal(np.trunc(f), -1) + + +class TestComplexFunctions: + funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, + np.arctanh, np.sin, np.cos, np.tan, np.exp, + np.exp2, np.log, np.sqrt, np.log10, np.log2, + np.log1p] + + def test_it(self): + for f in self.funcs: + if f is np.arccosh: + x = 1.5 + else: + x = .5 + fr = f(x) + fz = f(complex(x)) + assert_almost_equal(fz.real, fr, err_msg='real part %s' % f) + assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f) + + def test_precisions_consistent(self): + z = 1 + 1j + for f in self.funcs: + fcf = f(np.csingle(z)) + fcd = f(np.cdouble(z)) + fcl = f(np.clongdouble(z)) + assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f) + assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f) + + def test_branch_cuts(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True) + + _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) + _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1) + + def test_branch_cuts_complex64(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + + _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + + def test_against_cmath(self): + import cmath + + points = [-1-1j, -1+1j, +1-1j, +1+1j] + name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', + 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} + atol = 4*np.finfo(complex).eps + for func in self.funcs: + fname = func.__name__.split('.')[-1] + cname = name_map.get(fname, fname) + try: + cfunc = getattr(cmath, cname) + except AttributeError: + continue + for p in points: + a = complex(func(np.complex_(p))) + b = cfunc(p) + assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b)) + + @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex]) + def test_loss_of_precision(self, dtype): + """Check loss of precision in complex arc* functions""" + + # Check against known-good functions + + info = np.finfo(dtype) + real_dtype = dtype(0.).real.dtype + eps = info.eps + + def check(x, rtol): + x = x.astype(real_dtype) + + z = x.astype(dtype) + d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsinh')) + + z = (1j*x).astype(dtype) + d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsin')) + + z = x.astype(dtype) + d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctanh')) + + z = (1j*x).astype(dtype) + d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctan')) + + # The switchover was chosen as 1e-3; hence there can be up to + # ~eps/1e-3 of relative cancellation error before it + + x_series = np.logspace(-20, -3.001, 200) + x_basic = np.logspace(-2.999, 0, 10, endpoint=False) + + if dtype is np.longcomplex: + # It's not guaranteed that the system-provided arc functions + # are accurate down to a few epsilons. (Eg. on Linux 64-bit) + # So, give more leeway for long complex tests here: + # Can use 2.1 for > Ubuntu LTS Trusty (2014), glibc = 2.19. + if skip_longcomplex_msg: + pytest.skip(skip_longcomplex_msg) + check(x_series, 50.0*eps) + else: + check(x_series, 2.1*eps) + check(x_basic, 2.0*eps/1e-3) + + # Check a few points + + z = np.array([1e-5*(1+1j)], dtype=dtype) + p = 9.999999999333333333e-6 + 1.000000000066666666e-5j + d = np.absolute(1-np.arctanh(z)/p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j + d = np.absolute(1-np.arcsinh(z)/p) + assert_(np.all(d < 1e-15)) + + p = 9.999999999333333333e-6j + 1.000000000066666666e-5 + d = np.absolute(1-np.arctan(z)/p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 + d = np.absolute(1-np.arcsin(z)/p) + assert_(np.all(d < 1e-15)) + + # Check continuity across switchover points + + def check(func, z0, d=1): + z0 = np.asarray(z0, dtype=dtype) + zp = z0 + abs(z0) * d * eps * 2 + zm = z0 - abs(z0) * d * eps * 2 + assert_(np.all(zp != zm), (zp, zm)) + + # NB: the cancellation error at the switchover is at least eps + good = (abs(func(zp) - func(zm)) < 2*eps) + assert_(np.all(good), (func, z0[~good])) + + for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): + pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) + if rp != 0 or ip != 0] + check(func, pts, 1) + check(func, pts, 1j) + check(func, pts, 1+1j) + + +class TestAttributes: + def test_attributes(self): + add = ncu.add + assert_equal(add.__name__, 'add') + assert_(add.ntypes >= 18) # don't fail if types added + assert_('ii->i' in add.types) + assert_equal(add.nin, 2) + assert_equal(add.nout, 1) + assert_equal(add.identity, 0) + + def test_doc(self): + # don't bother checking the long list of kwargs, which are likely to + # change + assert_(ncu.add.__doc__.startswith( + "add(x1, x2, /, out=None, *, where=True")) + assert_(ncu.frexp.__doc__.startswith( + "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True")) + + +class TestSubclass: + + def test_subclass_op(self): + + class simple(np.ndarray): + def __new__(subtype, shape): + self = np.ndarray.__new__(subtype, shape, dtype=object) + self.fill(0) + return self + + a = simple((3, 4)) + assert_equal(a+a, a) + + +class TestFrompyfunc: + + def test_identity(self): + def mul(a, b): + return a * b + + # with identity=value + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_equal(mul_ufunc.reduce([]), 1) + + # with identity=None (reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + # with no identity (not reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1))) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + +def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, + dtype=complex): + """ + Check for a branch cut in a function. + + Assert that `x0` lies on a branch cut of function `f` and `f` is + continuous from the direction `dx`. + + Parameters + ---------- + f : func + Function to check + x0 : array-like + Point on branch cut + dx : array-like + Direction to check continuity in + re_sign, im_sign : {1, -1} + Change of sign of the real or imaginary part expected + sig_zero_ok : bool + Whether to check if the branch cut respects signed zero (if applicable) + dtype : dtype + Dtype to check (should be complex) + + """ + x0 = np.atleast_1d(x0).astype(dtype) + dx = np.atleast_1d(dx).astype(dtype) + + if np.dtype(dtype).char == 'F': + scale = np.finfo(dtype).eps * 1e2 + atol = np.float32(1e-2) + else: + scale = np.finfo(dtype).eps * 1e3 + atol = 1e-4 + + y0 = f(x0) + yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx)) + ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx)) + + assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)) + + if sig_zero_ok: + # check that signed zeros also work as a displacement + jr = (x0.real == 0) & (dx.real != 0) + ji = (x0.imag == 0) & (dx.imag != 0) + if np.any(jr): + x = x0[jr] + x.real = np.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym)) + assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym)) + + if np.any(ji): + x = x0[ji] + x.imag = np.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym)) + assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym)) + +def test_copysign(): + assert_(np.copysign(1, -1) == -1) + with np.errstate(divide="ignore"): + assert_(1 / np.copysign(0, -1) < 0) + assert_(1 / np.copysign(0, 1) > 0) + assert_(np.signbit(np.copysign(np.nan, -1))) + assert_(not np.signbit(np.copysign(np.nan, 1))) + +def _test_nextafter(t): + one = t(1) + two = t(2) + zero = t(0) + eps = np.finfo(t).eps + assert_(np.nextafter(one, two) - one == eps) + assert_(np.nextafter(one, zero) - one < 0) + assert_(np.isnan(np.nextafter(np.nan, one))) + assert_(np.isnan(np.nextafter(one, np.nan))) + assert_(np.nextafter(one, one) == one) + +def test_nextafter(): + return _test_nextafter(np.float64) + + +def test_nextafterf(): + return _test_nextafter(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_nextafterl(): + return _test_nextafter(np.longdouble) + + +def test_nextafter_0(): + for t, direction in itertools.product(np.sctypes['float'], (1, -1)): + tiny = np.finfo(t).tiny + assert_(0. < direction * np.nextafter(t(0), t(direction)) < tiny) + assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0) + +def _test_spacing(t): + one = t(1) + eps = np.finfo(t).eps + nan = t(np.nan) + inf = t(np.inf) + with np.errstate(invalid='ignore'): + assert_(np.spacing(one) == eps) + assert_(np.isnan(np.spacing(nan))) + assert_(np.isnan(np.spacing(inf))) + assert_(np.isnan(np.spacing(-inf))) + assert_(np.spacing(t(1e30)) != 0) + +def test_spacing(): + return _test_spacing(np.float64) + +def test_spacingf(): + return _test_spacing(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_spacingl(): + return _test_spacing(np.longdouble) + +def test_spacing_gfortran(): + # Reference from this fortran file, built with gfortran 4.3.3 on linux + # 32bits: + # PROGRAM test_spacing + # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) + # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) + # + # WRITE(*,*) spacing(0.00001_DBL) + # WRITE(*,*) spacing(1.0_DBL) + # WRITE(*,*) spacing(1000._DBL) + # WRITE(*,*) spacing(10500._DBL) + # + # WRITE(*,*) spacing(0.00001_SGL) + # WRITE(*,*) spacing(1.0_SGL) + # WRITE(*,*) spacing(1000._SGL) + # WRITE(*,*) spacing(10500._SGL) + # END PROGRAM + ref = {np.float64: [1.69406589450860068E-021, + 2.22044604925031308E-016, + 1.13686837721616030E-013, + 1.81898940354585648E-012], + np.float32: [9.09494702E-13, + 1.19209290E-07, + 6.10351563E-05, + 9.76562500E-04]} + + for dt, dec_ in zip([np.float32, np.float64], (10, 20)): + x = np.array([1e-5, 1, 1000, 10500], dtype=dt) + assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_) + +def test_nextafter_vs_spacing(): + # XXX: spacing does not handle long double yet + for t in [np.float32, np.float64]: + for _f in [1, 1e-5, 1000]: + f = t(_f) + f1 = t(_f + 1) + assert_(np.nextafter(f, f1) - f == np.spacing(f)) + +def test_pos_nan(): + """Check np.nan is a positive nan.""" + assert_(np.signbit(np.nan) == 0) + +def test_reduceat(): + """Test bug in reduceat when structured arrays are not copied.""" + db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) + a = np.empty([100], dtype=db) + a['name'] = 'Simple' + a['time'] = 10 + a['value'] = 100 + indx = [0, 7, 15, 25] + + h2 = [] + val1 = indx[0] + for val2 in indx[1:]: + h2.append(np.add.reduce(a['value'][val1:val2])) + val1 = val2 + h2.append(np.add.reduce(a['value'][val1:])) + h2 = np.array(h2) + + # test buffered -- this should work + h1 = np.add.reduceat(a['value'], indx) + assert_array_almost_equal(h1, h2) + + # This is when the error occurs. + # test no buffer + np.setbufsize(32) + h1 = np.add.reduceat(a['value'], indx) + np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT) + assert_array_almost_equal(h1, h2) + +def test_reduceat_empty(): + """Reduceat should work with empty arrays""" + indices = np.array([], 'i4') + x = np.array([], 'f8') + result = np.add.reduceat(x, indices) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0,)) + # Another case with a slightly different zero-sized shape + x = np.ones((5, 2)) + result = np.add.reduceat(x, [], axis=0) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0, 2)) + result = np.add.reduceat(x, [], axis=1) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (5, 0)) + +def test_complex_nan_comparisons(): + nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] + fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), + complex(1, 1), complex(-1, -1), complex(0, 0)] + + with np.errstate(invalid='ignore'): + for x in nans + fins: + x = np.array([x]) + for y in nans + fins: + y = np.array([y]) + + if np.isfinite(x) and np.isfinite(y): + continue + + assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) + assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) + assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) + assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) + assert_equal(x == y, False, err_msg="%r == %r" % (x, y)) + + +def test_rint_big_int(): + # np.rint bug for large integer values on Windows 32-bit and MKL + # https://github.com/numpy/numpy/issues/6685 + val = 4607998452777363968 + # This is exactly representable in floating point + assert_equal(val, int(float(val))) + # Rint should not change the value + assert_equal(val, np.rint(val)) + +@pytest.mark.parametrize('ftype', [np.float32, np.float64]) +def test_memoverlap_accumulate(ftype): + # Reproduces bug https://github.com/numpy/numpy/issues/15597 + arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype) + out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype) + out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype) + assert_equal(np.maximum.accumulate(arr), out_max) + assert_equal(np.minimum.accumulate(arr), out_min) + +def test_signaling_nan_exceptions(): + with assert_no_warnings(): + a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff') + np.isnan(a) + +@pytest.mark.parametrize("arr", [ + np.arange(2), + np.matrix([0, 1]), + np.matrix([[0, 1], [2, 5]]), + ]) +def test_outer_subclass_preserve(arr): + # for gh-8661 + class foo(np.ndarray): pass + actual = np.multiply.outer(arr.view(foo), arr.view(foo)) + assert actual.__class__.__name__ == 'foo' + +def test_outer_bad_subclass(): + class BadArr1(np.ndarray): + def __array_finalize__(self, obj): + # The outer call reshapes to 3 dims, try to do a bad reshape. + if self.ndim == 3: + self.shape = self.shape + (1,) + + def __array_prepare__(self, obj, context=None): + return obj + + class BadArr2(np.ndarray): + def __array_finalize__(self, obj): + if isinstance(obj, BadArr2): + # outer inserts 1-sized dims. In that case disturb them. + if self.shape[-1] == 1: + self.shape = self.shape[::-1] + + def __array_prepare__(self, obj, context=None): + return obj + + for cls in [BadArr1, BadArr2]: + arr = np.ones((2, 3)).view(cls) + with assert_raises(TypeError) as a: + # The first array gets reshaped (not the second one) + np.add.outer(arr, [1, 2]) + + # This actually works, since we only see the reshaping error: + arr = np.ones((2, 3)).view(cls) + assert type(np.add.outer([1, 2], arr)) is cls + +def test_outer_exceeds_maxdims(): + deep = np.ones((1,) * 17) + with assert_raises(ValueError): + np.add.outer(deep, deep) + diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_umath_accuracy.py b/MLPY/Lib/site-packages/numpy/core/tests/test_umath_accuracy.py new file mode 100644 index 0000000000000000000000000000000000000000..c49418370e9518809c720dbb0f9cb54409076bdb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_umath_accuracy.py @@ -0,0 +1,60 @@ +import numpy as np +import platform +from os import path +import sys +import pytest +from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER +from numpy.testing import assert_array_max_ulp +from numpy.core._multiarray_umath import __cpu_features__ + +IS_AVX = __cpu_features__.get('AVX512F', False) or \ + (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) +runtest = sys.platform.startswith('linux') and IS_AVX +platform_skip = pytest.mark.skipif(not runtest, + reason="avoid testing inconsistent platform " + "library implementations") + +# convert string to hex function taken from: +# https://stackoverflow.com/questions/1592158/convert-hex-to-float # +def convert(s, datatype="np.float32"): + i = int(s, 16) # convert from hex to a Python int + if (datatype == "np.float64"): + cp = pointer(c_longlong(i)) # make this into a c long long integer + fp = cast(cp, POINTER(c_double)) # cast the int pointer to a double pointer + else: + cp = pointer(c_int(i)) # make this into a c integer + fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer + + return fp.contents.value # dereference the pointer, get the float + +str_to_float = np.vectorize(convert) +files = ['umath-validation-set-exp.csv', + 'umath-validation-set-log.csv', + 'umath-validation-set-sin.csv', + 'umath-validation-set-cos.csv'] + +class TestAccuracy: + @platform_skip + def test_validate_transcendentals(self): + with np.errstate(all='ignore'): + for filename in files: + data_dir = path.join(path.dirname(__file__), 'data') + filepath = path.join(data_dir, filename) + with open(filepath) as fid: + file_without_comments = (r for r in fid if not r[0] in ('$', '#')) + data = np.genfromtxt(file_without_comments, + dtype=('|S39','|S39','|S39',int), + names=('type','input','output','ulperr'), + delimiter=',', + skip_header=1) + npname = path.splitext(filename)[0].split('-')[3] + npfunc = getattr(np, npname) + for datatype in np.unique(data['type']): + data_subset = data[data['type'] == datatype] + inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + perm = np.random.permutation(len(inval)) + inval = inval[perm] + outval = outval[perm] + maxulperr = data_subset['ulperr'].max() + assert_array_max_ulp(npfunc(inval), outval, maxulperr) diff --git a/MLPY/Lib/site-packages/numpy/core/tests/test_umath_complex.py b/MLPY/Lib/site-packages/numpy/core/tests/test_umath_complex.py new file mode 100644 index 0000000000000000000000000000000000000000..5d2ee372fd41444566deafd064c5bbd12bf5c27d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/core/tests/test_umath_complex.py @@ -0,0 +1,624 @@ +import sys +import platform +import pytest + +import numpy as np +# import the c-extension module directly since _arg is not exported via umath +import numpy.core._multiarray_umath as ncu +from numpy.testing import ( + assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp + ) + +# TODO: branch cuts (use Pauli code) +# TODO: conj 'symmetry' +# TODO: FPU exceptions + +# At least on Windows the results of many complex functions are not conforming +# to the C99 standard. See ticket 1574. +# Ditto for Solaris (ticket 1642) and OS X on PowerPC. +#FIXME: this will probably change when we require full C99 campatibility +with np.errstate(all='ignore'): + functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) + or (np.log(complex(np.NZERO, 0)).imag != np.pi)) +# TODO: replace with a check on whether platform-provided C99 funcs are used +xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) + +# TODO This can be xfail when the generator functions are got rid of. +platform_skip = pytest.mark.skipif(xfail_complex_tests, + reason="Inadequate C99 complex support") + + + +class TestCexp: + def test_simple(self): + check = check_complex_value + f = np.exp + + check(f, 1, 0, np.exp(1), 0, False) + check(f, 0, 1, np.cos(1), np.sin(1), False) + + ref = np.exp(1) * complex(np.cos(1), np.sin(1)) + check(f, 1, 1, ref.real, ref.imag, False) + + @platform_skip + def test_special_values(self): + # C99: Section G 6.3.1 + + check = check_complex_value + f = np.exp + + # cexp(+-0 + 0i) is 1 + 0i + check(f, np.PZERO, 0, 1, 0, False) + check(f, np.NZERO, 0, 1, 0, False) + + # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU + # exception + check(f, 1, np.inf, np.nan, np.nan) + check(f, -1, np.inf, np.nan, np.nan) + check(f, 0, np.inf, np.nan, np.nan) + + # cexp(inf + 0i) is inf + 0i + check(f, np.inf, 0, np.inf, 0) + + # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y + check(f, -np.inf, 1, np.PZERO, np.PZERO) + check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO) + + # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y + check(f, np.inf, 1, np.inf, np.inf) + check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) + + # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) + def _check_ninf_inf(dummy): + msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.inf))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_inf(None) + + # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. + def _check_inf_inf(dummy): + msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.inf))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_inf(None) + + # cexp(-inf + nan i) is +-0 +- 0i + def _check_ninf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.nan))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # cexp(inf + nan i) is +-inf + nan + def _check_inf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.nan))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_nan(None) + + # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU + # ex) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, -1, np.nan, np.nan) + + check(f, np.nan, np.inf, np.nan, np.nan) + check(f, np.nan, -np.inf, np.nan, np.nan) + + # cexp(nan + nani) is nan + nani + check(f, np.nan, np.nan, np.nan, np.nan) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") + def test_special_values2(self): + # XXX: most implementations get it wrong here (including glibc <= 2.10) + # cexp(nan + 0i) is nan + 0i + check = check_complex_value + f = np.exp + + check(f, np.nan, 0, np.nan, 0) + +class TestClog: + def test_simple(self): + x = np.array([1+0j, 1+2j]) + y_r = np.log(np.abs(x)) + 1j * np.angle(x) + y = np.log(x) + for i in range(len(x)): + assert_almost_equal(y[i], y_r[i]) + + @platform_skip + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_special_values(self): + xl = [] + yl = [] + + # From C99 std (Sec 6.3.2) + # XXX: check exceptions raised + # --- raise for invalid fails. + + # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([np.NZERO], dtype=complex) + y = complex(-np.inf, np.pi) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([0], dtype=complex) + y = complex(-np.inf, 0) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(x + i inf returns +inf + i pi /2, for finite x. + x = np.array([complex(1, np.inf)], dtype=complex) + y = complex(np.inf, 0.5 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-1, np.inf)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(x + iNaN) returns NaN + iNaN and optionally raises the + # 'invalid' floating- point exception, for finite x. + with np.errstate(invalid='raise'): + x = np.array([complex(1., np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + with np.errstate(invalid='raise'): + x = np.array([np.inf + 1j * np.nan], dtype=complex) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. + x = np.array([-np.inf + 1j], dtype=complex) + y = complex(np.inf, np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. + x = np.array([np.inf + 1j], dtype=complex) + y = complex(np.inf, 0) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(- inf + i inf) returns +inf + i3pi /4. + x = np.array([complex(-np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.75 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + i inf) returns +inf + ipi /4. + x = np.array([complex(np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.25 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+/- inf + iNaN) returns +inf + iNaN. + x = np.array([complex(np.inf, np.nan)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-np.inf, np.nan)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iy) returns NaN + iNaN and optionally raises the + # 'invalid' floating-point exception, for finite y. + x = np.array([complex(np.nan, 1)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + i inf) returns +inf + iNaN. + x = np.array([complex(np.nan, np.inf)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iNaN) returns NaN + iNaN. + x = np.array([complex(np.nan, np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(conj(z)) = conj(clog(z)). + xa = np.array(xl, dtype=complex) + ya = np.array(yl, dtype=complex) + with np.errstate(divide='ignore'): + for i in range(len(xa)): + assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) + + +class TestCsqrt: + + def test_simple(self): + # sqrt(1) + check_complex_value(np.sqrt, 1, 0, 1, 0) + + # sqrt(1i) + rres = 0.5*np.sqrt(2) + ires = rres + check_complex_value(np.sqrt, 0, 1, rres, ires, False) + + # sqrt(-1) + check_complex_value(np.sqrt, -1, 0, 0, 1) + + def test_simple_conjugate(self): + ref = np.conj(np.sqrt(complex(1, 1))) + + def f(z): + return np.sqrt(np.conj(z)) + + check_complex_value(f, 1, 1, ref.real, ref.imag, False) + + #def test_branch_cut(self): + # _check_branch_cut(f, -1, 0, 1, -1) + + @platform_skip + def test_special_values(self): + # C99: Sec G 6.4.2 + + check = check_complex_value + f = np.sqrt + + # csqrt(+-0 + 0i) is 0 + 0i + check(f, np.PZERO, 0, 0, 0) + check(f, np.NZERO, 0, 0, 0) + + # csqrt(x + infi) is inf + infi for any x (including NaN) + check(f, 1, np.inf, np.inf, np.inf) + check(f, -1, np.inf, np.inf, np.inf) + + check(f, np.PZERO, np.inf, np.inf, np.inf) + check(f, np.NZERO, np.inf, np.inf, np.inf) + check(f, np.inf, np.inf, np.inf, np.inf) + check(f, -np.inf, np.inf, np.inf, np.inf) + check(f, -np.nan, np.inf, np.inf, np.inf) + + # csqrt(x + nani) is nan + nani for any finite x + check(f, 1, np.nan, np.nan, np.nan) + check(f, -1, np.nan, np.nan, np.nan) + check(f, 0, np.nan, np.nan, np.nan) + + # csqrt(-inf + yi) is +0 + infi for any finite y > 0 + check(f, -np.inf, 1, np.PZERO, np.inf) + + # csqrt(inf + yi) is +inf + 0i for any finite y > 0 + check(f, np.inf, 1, np.inf, np.PZERO) + + # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) + def _check_ninf_nan(dummy): + msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" + z = np.sqrt(np.array(complex(-np.inf, np.nan))) + #Fixme: ugly workaround for isinf bug. + with np.errstate(invalid='ignore'): + if not (np.isnan(z.real) and np.isinf(z.imag)): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # csqrt(+inf + nani) is inf + nani + check(f, np.inf, np.nan, np.inf, np.nan) + + # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x + # + nani) + check(f, np.nan, 0, np.nan, np.nan) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, np.nan, np.nan, np.nan) + + # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch + # cuts first) + +class TestCpow: + def setup(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + y_r = x ** 2 + y = np.power(x, 2) + for i in range(len(x)): + assert_almost_equal(y[i], y_r[i]) + + def test_scalar(self): + x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1+0j, + 0.20787957635076193+0j, + 0.35812203996480685+0.6097119028618724j, + 0.12659112128185032+0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = [x[i] ** y[i] for i in lx] + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + + def test_array(self): + x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1+0j, + 0.20787957635076193+0j, + 0.35812203996480685+0.6097119028618724j, + 0.12659112128185032+0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = x ** y + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + +class TestCabs: + def setup(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan]) + y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) + y = np.abs(x) + for i in range(len(x)): + assert_almost_equal(y[i], y_r[i]) + + def test_fabs(self): + # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) + x = np.array([1+0j], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(1, np.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.inf, np.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.nan, np.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + def test_cabs_inf_nan(self): + x, y = [], [] + + # cabs(+-nan + nani) returns nan + x.append(np.nan) + y.append(np.nan) + check_real_value(np.abs, np.nan, np.nan, np.nan) + + x.append(np.nan) + y.append(-np.nan) + check_real_value(np.abs, -np.nan, np.nan, np.nan) + + # According to C99 standard, if exactly one of the real/part is inf and + # the other nan, then cabs should return inf + x.append(np.inf) + y.append(np.nan) + check_real_value(np.abs, np.inf, np.nan, np.inf) + + x.append(-np.inf) + y.append(np.nan) + check_real_value(np.abs, -np.inf, np.nan, np.inf) + + # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) + def f(a): + return np.abs(np.conj(a)) + + def g(a, b): + return np.abs(complex(a, b)) + + xa = np.array(x, dtype=complex) + for i in range(len(xa)): + ref = g(x[i], y[i]) + check_real_value(f, x[i], y[i], ref) + +class TestCarg: + def test_simple(self): + check_real_value(ncu._arg, 1, 0, 0, False) + check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False) + + check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False) + check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip( + reason="Complex arithmetic with signed zero fails on most platforms") + def test_zero(self): + # carg(-0 +- 0i) returns +- pi + check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False) + check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False) + + # carg(+0 +- 0i) returns +- 0 + check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO) + check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO) + + # carg(x +- 0i) returns +- 0 for x > 0 + check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False) + check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False) + + # carg(x +- 0i) returns +- pi for x < 0 + check_real_value(ncu._arg, -1, np.PZERO, np.pi, False) + check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False) + + # carg(+- 0 + yi) returns pi/2 for y > 0 + check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False) + check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False) + + # carg(+- 0 + yi) returns -pi/2 for y < 0 + check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False) + check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False) + + #def test_branch_cuts(self): + # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) + + def test_special_values(self): + # carg(-np.inf +- yi) returns +-pi for finite y > 0 + check_real_value(ncu._arg, -np.inf, 1, np.pi, False) + check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) + + # carg(np.inf +- yi) returns +-0 for finite y > 0 + check_real_value(ncu._arg, np.inf, 1, np.PZERO, False) + check_real_value(ncu._arg, np.inf, -1, np.NZERO, False) + + # carg(x +- np.infi) returns +-pi/2 for finite x + check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) + check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) + + # carg(-np.inf +- np.infi) returns +-3pi/4 + check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) + check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) + + # carg(np.inf +- np.infi) returns +-pi/4 + check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) + check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) + + # carg(x + yi) returns np.nan if x or y is nan + check_real_value(ncu._arg, np.nan, 0, np.nan, False) + check_real_value(ncu._arg, 0, np.nan, np.nan, False) + + check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) + check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) + + +def check_real_value(f, x1, y1, x, exact=True): + z1 = np.array([complex(x1, y1)]) + if exact: + assert_equal(f(z1), x) + else: + assert_almost_equal(f(z1), x) + + +def check_complex_value(f, x1, y1, x2, y2, exact=True): + z1 = np.array([complex(x1, y1)]) + z2 = complex(x2, y2) + with np.errstate(invalid='ignore'): + if exact: + assert_equal(f(z1), z2) + else: + assert_almost_equal(f(z1), z2) + +class TestSpecialComplexAVX: + @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + def test_array(self, stride, astype): + arr = np.array([complex(np.nan , np.nan), + complex(np.nan , np.inf), + complex(np.inf , np.nan), + complex(np.inf , np.inf), + complex(0. , np.inf), + complex(np.inf , 0.), + complex(0. , 0.), + complex(0. , np.nan), + complex(np.nan , 0.)], dtype=astype) + abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) + sq_true = np.array([complex(np.nan, np.nan), + complex(np.nan, np.nan), + complex(np.nan, np.nan), + complex(np.nan, np.inf), + complex(-np.inf, np.nan), + complex(np.inf, np.nan), + complex(0., 0.), + complex(np.nan, np.nan), + complex(np.nan, np.nan)], dtype=astype) + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + with np.errstate(invalid='ignore'): + assert_equal(np.square(arr[::stride]), sq_true[::stride]) + +class TestComplexAbsoluteAVX: + @pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19]) + @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + # test to ensure masking and strides work as intended in the AVX implementation + def test_array(self, arraysize, stride, astype): + arr = np.ones(arraysize, dtype=astype) + abs_true = np.ones(arraysize, dtype=arr.real.dtype) + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + +# Testcase taken as is from https://github.com/numpy/numpy/issues/16660 +class TestComplexAbsoluteMixedDTypes: + @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate']) + + def test_array(self, stride, astype, func): + dtype = [('template_id', 'U') + uni_arr2 = str_arr.astype('>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP + +Our result type, an ndarray that must be of type double, be 1-dimensional +and is C-contiguous in memory: + +>>> array_1d_double = np.ctypeslib.ndpointer( +... dtype=np.double, +... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP + +Our C-function typically takes an array and updates its values +in-place. For example:: + + void foo_func(double* x, int length) + { + int i; + for (i = 0; i < length; i++) { + x[i] = i*i; + } + } + +We wrap it using: + +>>> _lib.foo_func.restype = None #doctest: +SKIP +>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP + +Then, we're ready to call ``foo_func``: + +>>> out = np.empty(15, dtype=np.double) +>>> _lib.foo_func(out, len(out)) #doctest: +SKIP + +""" +__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array', + 'as_ctypes_type'] + +import os +from numpy import ( + integer, ndarray, dtype as _dtype, asarray, frombuffer +) +from numpy.core.multiarray import _flagdict, flagsobj + +try: + import ctypes +except ImportError: + ctypes = None + +if ctypes is None: + def _dummy(*args, **kwds): + """ + Dummy object that raises an ImportError if ctypes is not available. + + Raises + ------ + ImportError + If ctypes is not available. + + """ + raise ImportError("ctypes is not available.") + load_library = _dummy + as_ctypes = _dummy + as_array = _dummy + from numpy import intp as c_intp + _ndptr_base = object +else: + import numpy.core._internal as nic + c_intp = nic._getintp_ctype() + del nic + _ndptr_base = ctypes.c_void_p + + # Adapted from Albert Strasheim + def load_library(libname, loader_path): + """ + It is possible to load a library using + >>> lib = ctypes.cdll[] # doctest: +SKIP + + But there are cross-platform considerations, such as library file extensions, + plus the fact Windows will just load the first library it finds with that name. + NumPy supplies the load_library function as a convenience. + + Parameters + ---------- + libname : str + Name of the library, which can have 'lib' as a prefix, + but without an extension. + loader_path : str + Where the library can be found. + + Returns + ------- + ctypes.cdll[libpath] : library object + A ctypes library object + + Raises + ------ + OSError + If there is no library with the expected extension, or the + library is defective and cannot be loaded. + """ + if ctypes.__version__ < '1.0.1': + import warnings + warnings.warn("All features of ctypes interface may not work " + "with ctypes < 1.0.1", stacklevel=2) + + ext = os.path.splitext(libname)[1] + if not ext: + # Try to load library with platform-specific name, otherwise + # default to libname.[so|pyd]. Sometimes, these files are built + # erroneously on non-linux platforms. + from numpy.distutils.misc_util import get_shared_lib_extension + so_ext = get_shared_lib_extension() + libname_ext = [libname + so_ext] + # mac, windows and linux >= py3.2 shared library and loadable + # module have different extensions so try both + so_ext2 = get_shared_lib_extension(is_python_ext=True) + if not so_ext2 == so_ext: + libname_ext.insert(0, libname + so_ext2) + else: + libname_ext = [libname] + + loader_path = os.path.abspath(loader_path) + if not os.path.isdir(loader_path): + libdir = os.path.dirname(loader_path) + else: + libdir = loader_path + + for ln in libname_ext: + libpath = os.path.join(libdir, ln) + if os.path.exists(libpath): + try: + return ctypes.cdll[libpath] + except OSError: + ## defective lib file + raise + ## if no successful return in the libname_ext loop: + raise OSError("no file with expected extension") + + +def _num_fromflags(flaglist): + num = 0 + for val in flaglist: + num += _flagdict[val] + return num + +_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', + 'OWNDATA', 'UPDATEIFCOPY', 'WRITEBACKIFCOPY'] +def _flags_fromnum(num): + res = [] + for key in _flagnames: + value = _flagdict[key] + if (num & value): + res.append(key) + return res + + +class _ndptr(_ndptr_base): + @classmethod + def from_param(cls, obj): + if not isinstance(obj, ndarray): + raise TypeError("argument must be an ndarray") + if cls._dtype_ is not None \ + and obj.dtype != cls._dtype_: + raise TypeError("array must have data type %s" % cls._dtype_) + if cls._ndim_ is not None \ + and obj.ndim != cls._ndim_: + raise TypeError("array must have %d dimension(s)" % cls._ndim_) + if cls._shape_ is not None \ + and obj.shape != cls._shape_: + raise TypeError("array must have shape %s" % str(cls._shape_)) + if cls._flags_ is not None \ + and ((obj.flags.num & cls._flags_) != cls._flags_): + raise TypeError("array must have flags %s" % + _flags_fromnum(cls._flags_)) + return obj.ctypes + + +class _concrete_ndptr(_ndptr): + """ + Like _ndptr, but with `_shape_` and `_dtype_` specified. + + Notably, this means the pointer has enough information to reconstruct + the array, which is not generally true. + """ + def _check_retval_(self): + """ + This method is called when this class is used as the .restype + attribute for a shared-library function, to automatically wrap the + pointer into an array. + """ + return self.contents + + @property + def contents(self): + """ + Get an ndarray viewing the data pointed to by this pointer. + + This mirrors the `contents` attribute of a normal ctypes pointer + """ + full_dtype = _dtype((self._dtype_, self._shape_)) + full_ctype = ctypes.c_char * full_dtype.itemsize + buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents + return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) + + +# Factory for an array-checking class with from_param defined for +# use with ctypes argtypes mechanism +_pointer_type_cache = {} +def ndpointer(dtype=None, ndim=None, shape=None, flags=None): + """ + Array-checking restype/argtypes. + + An ndpointer instance is used to describe an ndarray in restypes + and argtypes specifications. This approach is more flexible than + using, for example, ``POINTER(c_double)``, since several restrictions + can be specified, which are verified upon calling the ctypes function. + These include data type, number of dimensions, shape and flags. If a + given array does not satisfy the specified restrictions, + a ``TypeError`` is raised. + + Parameters + ---------- + dtype : data-type, optional + Array data-type. + ndim : int, optional + Number of array dimensions. + shape : tuple of ints, optional + Array shape. + flags : str or tuple of str + Array flags; may be one or more of: + + - C_CONTIGUOUS / C / CONTIGUOUS + - F_CONTIGUOUS / F / FORTRAN + - OWNDATA / O + - WRITEABLE / W + - ALIGNED / A + - WRITEBACKIFCOPY / X + - UPDATEIFCOPY / U + + Returns + ------- + klass : ndpointer type object + A type object, which is an ``_ndtpr`` instance containing + dtype, ndim, shape and flags information. + + Raises + ------ + TypeError + If a given array does not satisfy the specified restrictions. + + Examples + -------- + >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64, + ... ndim=1, + ... flags='C_CONTIGUOUS')] + ... #doctest: +SKIP + >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64)) + ... #doctest: +SKIP + + """ + + # normalize dtype to an Optional[dtype] + if dtype is not None: + dtype = _dtype(dtype) + + # normalize flags to an Optional[int] + num = None + if flags is not None: + if isinstance(flags, str): + flags = flags.split(',') + elif isinstance(flags, (int, integer)): + num = flags + flags = _flags_fromnum(num) + elif isinstance(flags, flagsobj): + num = flags.num + flags = _flags_fromnum(num) + if num is None: + try: + flags = [x.strip().upper() for x in flags] + except Exception as e: + raise TypeError("invalid flags specification") from e + num = _num_fromflags(flags) + + # normalize shape to an Optional[tuple] + if shape is not None: + try: + shape = tuple(shape) + except TypeError: + # single integer -> 1-tuple + shape = (shape,) + + cache_key = (dtype, ndim, shape, num) + + try: + return _pointer_type_cache[cache_key] + except KeyError: + pass + + # produce a name for the new type + if dtype is None: + name = 'any' + elif dtype.names is not None: + name = str(id(dtype)) + else: + name = dtype.str + if ndim is not None: + name += "_%dd" % ndim + if shape is not None: + name += "_"+"x".join(str(x) for x in shape) + if flags is not None: + name += "_"+"_".join(flags) + + if dtype is not None and shape is not None: + base = _concrete_ndptr + else: + base = _ndptr + + klass = type("ndpointer_%s"%name, (base,), + {"_dtype_": dtype, + "_shape_" : shape, + "_ndim_" : ndim, + "_flags_" : num}) + _pointer_type_cache[cache_key] = klass + return klass + + +if ctypes is not None: + def _ctype_ndarray(element_type, shape): + """ Create an ndarray of the given element type and shape """ + for dim in shape[::-1]: + element_type = dim * element_type + # prevent the type name include np.ctypeslib + element_type.__module__ = None + return element_type + + + def _get_scalar_type_map(): + """ + Return a dictionary mapping native endian scalar dtype to ctypes types + """ + ct = ctypes + simple_types = [ + ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong, + ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong, + ct.c_float, ct.c_double, + ct.c_bool, + ] + return {_dtype(ctype): ctype for ctype in simple_types} + + + _scalar_type_map = _get_scalar_type_map() + + + def _ctype_from_dtype_scalar(dtype): + # swapping twice ensure that `=` is promoted to <, >, or | + dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S') + dtype_native = dtype.newbyteorder('=') + try: + ctype = _scalar_type_map[dtype_native] + except KeyError as e: + raise NotImplementedError( + "Converting {!r} to a ctypes type".format(dtype) + ) from None + + if dtype_with_endian.byteorder == '>': + ctype = ctype.__ctype_be__ + elif dtype_with_endian.byteorder == '<': + ctype = ctype.__ctype_le__ + + return ctype + + + def _ctype_from_dtype_subarray(dtype): + element_dtype, shape = dtype.subdtype + ctype = _ctype_from_dtype(element_dtype) + return _ctype_ndarray(ctype, shape) + + + def _ctype_from_dtype_structured(dtype): + # extract offsets of each field + field_data = [] + for name in dtype.names: + field_dtype, offset = dtype.fields[name][:2] + field_data.append((offset, name, _ctype_from_dtype(field_dtype))) + + # ctypes doesn't care about field order + field_data = sorted(field_data, key=lambda f: f[0]) + + if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data): + # union, if multiple fields all at address 0 + size = 0 + _fields_ = [] + for offset, name, ctype in field_data: + _fields_.append((name, ctype)) + size = max(size, ctypes.sizeof(ctype)) + + # pad to the right size + if dtype.itemsize != size: + _fields_.append(('', ctypes.c_char * dtype.itemsize)) + + # we inserted manual padding, so always `_pack_` + return type('union', (ctypes.Union,), dict( + _fields_=_fields_, + _pack_=1, + __module__=None, + )) + else: + last_offset = 0 + _fields_ = [] + for offset, name, ctype in field_data: + padding = offset - last_offset + if padding < 0: + raise NotImplementedError("Overlapping fields") + if padding > 0: + _fields_.append(('', ctypes.c_char * padding)) + + _fields_.append((name, ctype)) + last_offset = offset + ctypes.sizeof(ctype) + + + padding = dtype.itemsize - last_offset + if padding > 0: + _fields_.append(('', ctypes.c_char * padding)) + + # we inserted manual padding, so always `_pack_` + return type('struct', (ctypes.Structure,), dict( + _fields_=_fields_, + _pack_=1, + __module__=None, + )) + + + def _ctype_from_dtype(dtype): + if dtype.fields is not None: + return _ctype_from_dtype_structured(dtype) + elif dtype.subdtype is not None: + return _ctype_from_dtype_subarray(dtype) + else: + return _ctype_from_dtype_scalar(dtype) + + + def as_ctypes_type(dtype): + r""" + Convert a dtype into a ctypes type. + + Parameters + ---------- + dtype : dtype + The dtype to convert + + Returns + ------- + ctype + A ctype scalar, union, array, or struct + + Raises + ------ + NotImplementedError + If the conversion is not possible + + Notes + ----- + This function does not losslessly round-trip in either direction. + + ``np.dtype(as_ctypes_type(dt))`` will: + + - insert padding fields + - reorder fields to be sorted by offset + - discard field titles + + ``as_ctypes_type(np.dtype(ctype))`` will: + + - discard the class names of `ctypes.Structure`\ s and + `ctypes.Union`\ s + - convert single-element `ctypes.Union`\ s into single-element + `ctypes.Structure`\ s + - insert padding fields + + """ + return _ctype_from_dtype(_dtype(dtype)) + + + def as_array(obj, shape=None): + """ + Create a numpy array from a ctypes array or POINTER. + + The numpy array shares the memory with the ctypes object. + + The shape parameter must be given if converting from a ctypes POINTER. + The shape parameter is ignored if converting from a ctypes array + """ + if isinstance(obj, ctypes._Pointer): + # convert pointers to an array of the desired shape + if shape is None: + raise TypeError( + 'as_array() requires a shape argument when called on a ' + 'pointer') + p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) + obj = ctypes.cast(obj, p_arr_type).contents + + return asarray(obj) + + + def as_ctypes(obj): + """Create and return a ctypes object from a numpy array. Actually + anything that exposes the __array_interface__ is accepted.""" + ai = obj.__array_interface__ + if ai["strides"]: + raise TypeError("strided arrays not supported") + if ai["version"] != 3: + raise TypeError("only __array_interface__ version 3 supported") + addr, readonly = ai["data"] + if readonly: + raise TypeError("readonly arrays unsupported") + + # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows + # dtype.itemsize (gh-14214) + ctype_scalar = as_ctypes_type(ai["typestr"]) + result_type = _ctype_ndarray(ctype_scalar, ai["shape"]) + result = result_type.from_address(addr) + result.__keep = obj + return result diff --git a/MLPY/Lib/site-packages/numpy/ctypeslib.pyi b/MLPY/Lib/site-packages/numpy/ctypeslib.pyi new file mode 100644 index 0000000000000000000000000000000000000000..68fbeb93b562448bc3f6ef99c32357be1da1eb21 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ctypeslib.pyi @@ -0,0 +1,14 @@ +from typing import List, Type +from ctypes import _SimpleCData + +__all__: List[str] + +# TODO: Update the `npt.mypy_plugin` such that it substitutes `c_intp` for +# a specific `_SimpleCData[int]` subclass (e.g. `ctypes.c_long`) +c_intp: Type[_SimpleCData[int]] + +def load_library(libname, loader_path): ... +def ndpointer(dtype=..., ndim=..., shape=..., flags=...): ... +def as_ctypes(obj): ... +def as_array(obj, shape=...): ... +def as_ctypes_type(dtype): ... diff --git a/MLPY/Lib/site-packages/numpy/distutils/__config__.py b/MLPY/Lib/site-packages/numpy/distutils/__config__.py new file mode 100644 index 0000000000000000000000000000000000000000..d3923fc4b16ca1581c79b4283de3bd9e477fb6c8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/__config__.py @@ -0,0 +1,98 @@ +# This file is generated by numpy's setup.py +# It contains system_info results at the time of building this package. +__all__ = ["get_info","show"] + + +import os +import sys + +extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') + +if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + if sys.version_info >= (3, 8): + os.add_dll_directory(extra_dll_dir) + else: + os.environ.setdefault('PATH', '') + os.environ['PATH'] += os.pathsep + extra_dll_dir + +blas_mkl_info={} +blis_info={} +openblas_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas_info'], 'libraries': ['openblas_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]} +blas_opt_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas_info'], 'libraries': ['openblas_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]} +lapack_mkl_info={} +openblas_lapack_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas_lapack_info'], 'libraries': ['openblas_lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]} +lapack_opt_info={'library_dirs': ['D:\\a\\1\\s\\numpy\\build\\openblas_lapack_info'], 'libraries': ['openblas_lapack_info'], 'language': 'f77', 'define_macros': [('HAVE_CBLAS', None)]} + +def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + +def show(): + """ + Show libraries in the system on which NumPy was built. + + Print information about various resources (libraries, library + directories, include directories, etc.) in the system on which + NumPy was built. + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + Classes specifying the information to be printed are defined + in the `numpy.distutils.system_info` module. + + Information may include: + + * ``language``: language used to write the libraries (mostly + C or f77) + * ``libraries``: names of libraries found in the system + * ``library_dirs``: directories containing the libraries + * ``include_dirs``: directories containing library header files + * ``src_dirs``: directories containing library source files + * ``define_macros``: preprocessor macros used by + ``distutils.setup`` + * ``baseline``: minimum CPU features required + * ``found``: dispatched features supported in the system + * ``not found``: dispatched features that are not supported + in the system + + Examples + -------- + >>> import numpy as np + >>> np.show_config() + blas_opt_info: + language = c + define_macros = [('HAVE_CBLAS', None)] + libraries = ['openblas', 'openblas'] + library_dirs = ['/usr/local/lib'] + """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + + print("Supported SIMD extensions in this NumPy install:") + print(" baseline = %s" % (','.join(__cpu_baseline__))) + print(" found = %s" % (','.join(features_found))) + print(" not found = %s" % (','.join(features_not_found))) + diff --git a/MLPY/Lib/site-packages/numpy/distutils/__init__.py b/MLPY/Lib/site-packages/numpy/distutils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..506520ad8ccb76307fd50ac42b9ba2dab550e0ca --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/__init__.py @@ -0,0 +1,51 @@ +""" +An enhanced distutils, providing support for Fortran compilers, for BLAS, +LAPACK and other common libraries for numerical computing, and more. + +Public submodules are:: + + misc_util + system_info + cpu_info + log + exec_command + +For details, please see the *Packaging* and *NumPy Distutils User Guide* +sections of the NumPy Reference Guide. + +For configuring the preference for and location of libraries like BLAS and +LAPACK, and for setting include paths and similar build options, please see +``site.cfg.example`` in the root of the NumPy repository or sdist. + +""" + +# Must import local ccompiler ASAP in order to get +# customized CCompiler.spawn effective. +from . import ccompiler +from . import unixccompiler + +from .npy_pkg_config import * + +# If numpy is installed, add distutils.test() +try: + from . import __config__ + # Normally numpy is installed if the above import works, but an interrupted + # in-place build could also have left a __config__.py. In that case the + # next import may still fail, so keep it inside the try block. + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester +except ImportError: + pass + + +def customized_fcompiler(plat=None, compiler=None): + from numpy.distutils.fcompiler import new_fcompiler + c = new_fcompiler(plat=plat, compiler=compiler) + c.customize() + return c + +def customized_ccompiler(plat=None, compiler=None, verbose=1): + c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) + c.customize('') + return c diff --git a/MLPY/Lib/site-packages/numpy/distutils/__init__.pyi b/MLPY/Lib/site-packages/numpy/distutils/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..48c64650efee3596c29b5e86e5079382e8ee0966 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/__init__.pyi @@ -0,0 +1,4 @@ +from typing import Any + +# TODO: remove when the full numpy namespace is defined +def __getattr__(name: str) -> Any: ... diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/__config__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/__config__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d95387cd812c93d8a93787189490996531446c4f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/__config__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3d20379d6d386c71a644a0fba7c05d345d5598d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/_shell_utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/_shell_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a539357cb1e19f3ea918260eccf70deb0d14c911 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/_shell_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/ccompiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/ccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a36dcab33561c5bb4025b5e7e1af913e41da831 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/ccompiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/ccompiler_opt.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/ccompiler_opt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b37ed68f726de31d962367214f93c986c52e5940 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/ccompiler_opt.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/conv_template.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/conv_template.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97357c837674e69f29f930df58271d8bf3f238c4 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/conv_template.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/core.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/core.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4bc7b8573080f4f9397b0f5046169ad6f2211dd Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/core.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/cpuinfo.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/cpuinfo.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc13c3d8879ab97fe4c530f78ce35ad467534e3d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/cpuinfo.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/exec_command.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/exec_command.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d587ed257cf1bf9334fcf5272df90cf193d62141 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/exec_command.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/extension.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/extension.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49ac3ef92d57be060db177b3d815728bdab52684 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/extension.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/from_template.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/from_template.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2db2af8ae52ebcf4639e9b65ba1688b1df848724 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/from_template.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/intelccompiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/intelccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19ce61b44364bcd6f88c8fe93009961c504bf018 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/intelccompiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/lib2def.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/lib2def.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8a35a42bbc5134f25a6886e01c2616918510c67 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/lib2def.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/line_endings.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/line_endings.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97c7296da27c64df1954c21c9e1cb4fbeaf9c823 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/line_endings.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/log.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/log.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3b3b088243c0992522323be121886a87019e769 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/log.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/mingw32ccompiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/mingw32ccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7095fc5cde6982ed51d63235d97ce55588ff93c2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/mingw32ccompiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/misc_util.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/misc_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b307ecbc352c81f051cc71add2cdb80e25dc42a8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/misc_util.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/msvc9compiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/msvc9compiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee3801e6a43163ace1694a4cacfbb00b336d646b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/msvc9compiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/msvccompiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/msvccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9698f4b7f10035abb6e03f18589cb353069bcd80 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/msvccompiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/npy_pkg_config.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/npy_pkg_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91967f855b7dae650bab3a3e27e428c316d3577c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/npy_pkg_config.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/numpy_distribution.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/numpy_distribution.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f94d51f69152c63812b592c5ccba0dd95890687a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/numpy_distribution.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/pathccompiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/pathccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2e3dbb6c406059915c79ce40e63ab13189d54ae Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/pathccompiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82efb8878094d22762853d20ea27ea6e6388b259 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/system_info.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/system_info.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1ca177b1aa8d5ea2017a509e5615bc5b72fc86a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/system_info.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/__pycache__/unixccompiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/unixccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..434746208ccdbfc42838cd30f290d501a035eff3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/__pycache__/unixccompiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/_shell_utils.py b/MLPY/Lib/site-packages/numpy/distutils/_shell_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5316d87ad8142763e1856b8df8f0477e29346d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/_shell_utils.py @@ -0,0 +1,91 @@ +""" +Helper functions for interacting with the shell, and consuming shell-style +parameters provided in config files. +""" +import os +import shlex +import subprocess +try: + from shlex import quote +except ImportError: + from pipes import quote + +__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] + + +class CommandLineParser: + """ + An object that knows how to split and join command-line arguments. + + It must be true that ``argv == split(join(argv))`` for all ``argv``. + The reverse neednt be true - `join(split(cmd))` may result in the addition + or removal of unnecessary escaping. + """ + @staticmethod + def join(argv): + """ Join a list of arguments into a command line string """ + raise NotImplementedError + + @staticmethod + def split(cmd): + """ Split a command line string into a list of arguments """ + raise NotImplementedError + + +class WindowsParser: + """ + The parsing behavior used by `subprocess.call("string")` on Windows, which + matches the Microsoft C/C++ runtime. + + Note that this is _not_ the behavior of cmd. + """ + @staticmethod + def join(argv): + # note that list2cmdline is specific to the windows syntax + return subprocess.list2cmdline(argv) + + @staticmethod + def split(cmd): + import ctypes # guarded import for systems without ctypes + try: + ctypes.windll + except AttributeError: + raise NotImplementedError + + # Windows has special parsing rules for the executable (no quotes), + # that we do not care about - insert a dummy element + if not cmd: + return [] + cmd = 'dummy ' + cmd + + CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW + CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) + CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) + + nargs = ctypes.c_int() + lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) + args = [lpargs[i] for i in range(nargs.value)] + assert not ctypes.windll.kernel32.LocalFree(lpargs) + + # strip the element we inserted + assert args[0] == "dummy" + return args[1:] + + +class PosixParser: + """ + The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. + """ + @staticmethod + def join(argv): + return ' '.join(quote(arg) for arg in argv) + + @staticmethod + def split(cmd): + return shlex.split(cmd, posix=True) + + +if os.name == 'nt': + NativeParser = WindowsParser +elif os.name == 'posix': + NativeParser = PosixParser diff --git a/MLPY/Lib/site-packages/numpy/distutils/ccompiler.py b/MLPY/Lib/site-packages/numpy/distutils/ccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..e655aac3d0189fc65d80c4cb92f34b79610e7de8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/ccompiler.py @@ -0,0 +1,795 @@ +import os +import re +import sys +import shlex +import time +import subprocess +from copy import copy +from distutils import ccompiler +from distutils.ccompiler import ( + compiler_class, gen_lib_options, get_default_compiler, new_compiler, + CCompiler +) +from distutils.errors import ( + DistutilsExecError, DistutilsModuleError, DistutilsPlatformError, + CompileError, UnknownFileError +) +from distutils.sysconfig import customize_compiler +from distutils.version import LooseVersion + +from numpy.distutils import log +from numpy.distutils.exec_command import ( + filepath_from_subprocess_output, forward_bytes_to_stdout +) +from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ + get_num_build_jobs, \ + _commandline_dep_string + +# globals for parallel build management +import threading + +_job_semaphore = None +_global_lock = threading.Lock() +_processing_files = set() + + +def _needs_build(obj, cc_args, extra_postargs, pp_opts): + """ + Check if an objects needs to be rebuild based on its dependencies + + Parameters + ---------- + obj : str + object file + + Returns + ------- + bool + """ + # defined in unixcompiler.py + dep_file = obj + '.d' + if not os.path.exists(dep_file): + return True + + # dep_file is a makefile containing 'object: dependencies' + # formatted like posix shell (spaces escaped, \ line continuations) + # the last line contains the compiler commandline arguments as some + # projects may compile an extension multiple times with different + # arguments + with open(dep_file, "r") as f: + lines = f.readlines() + + cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) + last_cmdline = lines[-1] + if last_cmdline != cmdline: + return True + + contents = ''.join(lines[:-1]) + deps = [x for x in shlex.split(contents, posix=True) + if x != "\n" and not x.endswith(":")] + + try: + t_obj = os.stat(obj).st_mtime + + # check if any of the dependencies is newer than the object + # the dependencies includes the source used to create the object + for f in deps: + if os.stat(f).st_mtime > t_obj: + return True + except OSError: + # no object counts as newer (shouldn't happen if dep_file exists) + return True + + return False + + +def replace_method(klass, method_name, func): + # Py3k does not have unbound method anymore, MethodType does not work + m = lambda self, *args, **kw: func(self, *args, **kw) + setattr(klass, method_name, m) + + +###################################################################### +## Method that subclasses may redefine. But don't call this method, +## it i private to CCompiler class and may return unexpected +## results if used elsewhere. So, you have been warned.. + +def CCompiler_find_executables(self): + """ + Does nothing here, but is called by the get_version method and can be + overridden by subclasses. In particular it is redefined in the `FCompiler` + class where more documentation can be found. + + """ + pass + + +replace_method(CCompiler, 'find_executables', CCompiler_find_executables) + + +# Using customized CCompiler.spawn. +def CCompiler_spawn(self, cmd, display=None): + """ + Execute a command in a sub-process. + + Parameters + ---------- + cmd : str + The command to execute. + display : str or sequence of str, optional + The text to add to the log file kept by `numpy.distutils`. + If not given, `display` is equal to `cmd`. + + Returns + ------- + None + + Raises + ------ + DistutilsExecError + If the command failed, i.e. the exit status was not 0. + + """ + if display is None: + display = cmd + if is_sequence(display): + display = ' '.join(list(display)) + log.info(display) + try: + if self.verbose: + subprocess.check_output(cmd) + else: + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as exc: + o = exc.output + s = exc.returncode + except OSError: + # OSError doesn't have the same hooks for the exception + # output, but exec_command() historically would use an + # empty string for EnvironmentError (base class for + # OSError) + o = b'' + # status previously used by exec_command() for parent + # of OSError + s = 127 + else: + # use a convenience return here so that any kind of + # caught exception will execute the default code after the + # try / except block, which handles various exceptions + return None + + if is_sequence(cmd): + cmd = ' '.join(list(cmd)) + + if self.verbose: + forward_bytes_to_stdout(o) + + if re.search(b'Too many open files', o): + msg = '\nTry rerunning setup command until build succeeds.' + else: + msg = '' + raise DistutilsExecError('Command "%s" failed with exit status %d%s' % + (cmd, s, msg)) + +replace_method(CCompiler, 'spawn', CCompiler_spawn) + +def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): + """ + Return the name of the object files for the given source files. + + Parameters + ---------- + source_filenames : list of str + The list of paths to source files. Paths can be either relative or + absolute, this is handled transparently. + strip_dir : bool, optional + Whether to strip the directory from the returned paths. If True, + the file name prepended by `output_dir` is returned. Default is False. + output_dir : str, optional + If given, this path is prepended to the returned paths to the + object files. + + Returns + ------- + obj_names : list of str + The list of paths to the object files corresponding to the source + files in `source_filenames`. + + """ + if output_dir is None: + output_dir = '' + obj_names = [] + for src_name in source_filenames: + base, ext = os.path.splitext(os.path.normpath(src_name)) + base = os.path.splitdrive(base)[1] # Chop off the drive + base = base[os.path.isabs(base):] # If abs, chop off leading / + if base.startswith('..'): + # Resolve starting relative path components, middle ones + # (if any) have been handled by os.path.normpath above. + i = base.rfind('..')+2 + d = base[:i] + d = os.path.basename(os.path.abspath(d)) + base = d + base[i:] + if ext not in self.src_extensions: + raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) + if strip_dir: + base = os.path.basename(base) + obj_name = os.path.join(output_dir, base + self.obj_extension) + obj_names.append(obj_name) + return obj_names + +replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) + +def CCompiler_compile(self, sources, output_dir=None, macros=None, + include_dirs=None, debug=0, extra_preargs=None, + extra_postargs=None, depends=None): + """ + Compile one or more source files. + + Please refer to the Python distutils API reference for more details. + + Parameters + ---------- + sources : list of str + A list of filenames + output_dir : str, optional + Path to the output directory. + macros : list of tuples + A list of macro definitions. + include_dirs : list of str, optional + The directories to add to the default include file search path for + this compilation only. + debug : bool, optional + Whether or not to output debug symbols in or alongside the object + file(s). + extra_preargs, extra_postargs : ? + Extra pre- and post-arguments. + depends : list of str, optional + A list of file names that all targets depend on. + + Returns + ------- + objects : list of str + A list of object file names, one per source file `sources`. + + Raises + ------ + CompileError + If compilation fails. + + """ + # This method is effective only with Python >=2.3 distutils. + # Any changes here should be applied also to fcompiler.compile + # method to support pre Python 2.3 distutils. + global _job_semaphore + + jobs = get_num_build_jobs() + + # setup semaphore to not exceed number of compile jobs when parallelized at + # extension level (python >= 3.5) + with _global_lock: + if _job_semaphore is None: + _job_semaphore = threading.Semaphore(jobs) + + if not sources: + return [] + from numpy.distutils.fcompiler import (FCompiler, is_f_file, + has_f90_header) + if isinstance(self, FCompiler): + display = [] + for fc in ['f77', 'f90', 'fix']: + fcomp = getattr(self, 'compiler_'+fc) + if fcomp is None: + continue + display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) + display = '\n'.join(display) + else: + ccomp = self.compiler_so + display = "C compiler: %s\n" % (' '.join(ccomp),) + log.info(display) + macros, objects, extra_postargs, pp_opts, build = \ + self._setup_compile(output_dir, macros, include_dirs, sources, + depends, extra_postargs) + cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) + display = "compile options: '%s'" % (' '.join(cc_args)) + if extra_postargs: + display += "\nextra options: '%s'" % (' '.join(extra_postargs)) + log.info(display) + + def single_compile(args): + obj, (src, ext) = args + if not _needs_build(obj, cc_args, extra_postargs, pp_opts): + return + + # check if we are currently already processing the same object + # happens when using the same source in multiple extensions + while True: + # need explicit lock as there is no atomic check and add with GIL + with _global_lock: + # file not being worked on, start working + if obj not in _processing_files: + _processing_files.add(obj) + break + # wait for the processing to end + time.sleep(0.1) + + try: + # retrieve slot from our #job semaphore and build + with _job_semaphore: + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + finally: + # register being done processing + with _global_lock: + _processing_files.remove(obj) + + + if isinstance(self, FCompiler): + objects_to_build = list(build.keys()) + f77_objects, other_objects = [], [] + for obj in objects: + if obj in objects_to_build: + src, ext = build[obj] + if self.compiler_type=='absoft': + obj = cyg2win32(obj) + src = cyg2win32(src) + if is_f_file(src) and not has_f90_header(src): + f77_objects.append((obj, (src, ext))) + else: + other_objects.append((obj, (src, ext))) + + # f77 objects can be built in parallel + build_items = f77_objects + # build f90 modules serial, module files are generated during + # compilation and may be used by files later in the list so the + # ordering is important + for o in other_objects: + single_compile(o) + else: + build_items = build.items() + + if len(build) > 1 and jobs > 1: + # build parallel + import multiprocessing.pool + pool = multiprocessing.pool.ThreadPool(jobs) + pool.map(single_compile, build_items) + pool.close() + else: + # build serial + for o in build_items: + single_compile(o) + + # Return *all* object filenames, not just the ones we just built. + return objects + +replace_method(CCompiler, 'compile', CCompiler_compile) + +def CCompiler_customize_cmd(self, cmd, ignore=()): + """ + Customize compiler using distutils command. + + Parameters + ---------- + cmd : class instance + An instance inheriting from `distutils.cmd.Command`. + ignore : sequence of str, optional + List of `CCompiler` commands (without ``'set_'``) that should not be + altered. Strings that are checked for are: + ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', + 'rpath', 'link_objects')``. + + Returns + ------- + None + + """ + log.info('customize %s using %s' % (self.__class__.__name__, + cmd.__class__.__name__)) + + if hasattr(self, 'compiler') and 'clang' in self.compiler[0]: + # clang defaults to a non-strict floating error point model. + # Since NumPy and most Python libs give warnings for these, override: + self.compiler.append('-ffp-exception-behavior=strict') + + def allow(attr): + return getattr(cmd, attr, None) is not None and attr not in ignore + + if allow('include_dirs'): + self.set_include_dirs(cmd.include_dirs) + if allow('define'): + for (name, value) in cmd.define: + self.define_macro(name, value) + if allow('undef'): + for macro in cmd.undef: + self.undefine_macro(macro) + if allow('libraries'): + self.set_libraries(self.libraries + cmd.libraries) + if allow('library_dirs'): + self.set_library_dirs(self.library_dirs + cmd.library_dirs) + if allow('rpath'): + self.set_runtime_library_dirs(cmd.rpath) + if allow('link_objects'): + self.set_link_objects(cmd.link_objects) + +replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) + +def _compiler_to_string(compiler): + props = [] + mx = 0 + keys = list(compiler.executables.keys()) + for key in ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch', + 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: + if key not in keys: + keys.append(key) + for key in keys: + if hasattr(compiler, key): + v = getattr(compiler, key) + mx = max(mx, len(key)) + props.append((key, repr(v))) + fmt = '%-' + repr(mx+1) + 's = %s' + lines = [fmt % prop for prop in props] + return '\n'.join(lines) + +def CCompiler_show_customization(self): + """ + Print the compiler customizations to stdout. + + Parameters + ---------- + None + + Returns + ------- + None + + Notes + ----- + Printing is only done if the distutils log threshold is < 2. + + """ + try: + self.get_version() + except Exception: + pass + if log._global_log.threshold<2: + print('*'*80) + print(self.__class__) + print(_compiler_to_string(self)) + print('*'*80) + +replace_method(CCompiler, 'show_customization', CCompiler_show_customization) + +def CCompiler_customize(self, dist, need_cxx=0): + """ + Do any platform-specific customization of a compiler instance. + + This method calls `distutils.sysconfig.customize_compiler` for + platform-specific customization, as well as optionally remove a flag + to suppress spurious warnings in case C++ code is being compiled. + + Parameters + ---------- + dist : object + This parameter is not used for anything. + need_cxx : bool, optional + Whether or not C++ has to be compiled. If so (True), the + ``"-Wstrict-prototypes"`` option is removed to prevent spurious + warnings. Default is False. + + Returns + ------- + None + + Notes + ----- + All the default options used by distutils can be extracted with:: + + from distutils import sysconfig + sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', + 'CCSHARED', 'LDSHARED', 'SO') + + """ + # See FCompiler.customize for suggested usage. + log.info('customize %s' % (self.__class__.__name__)) + customize_compiler(self) + if need_cxx: + # In general, distutils uses -Wstrict-prototypes, but this option is + # not valid for C++ code, only for C. Remove it if it's there to + # avoid a spurious warning on every compilation. + try: + self.compiler_so.remove('-Wstrict-prototypes') + except (AttributeError, ValueError): + pass + + if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: + if not self.compiler_cxx: + if self.compiler[0].startswith('gcc'): + a, b = 'gcc', 'g++' + else: + a, b = 'cc', 'c++' + self.compiler_cxx = [self.compiler[0].replace(a, b)]\ + + self.compiler[1:] + else: + if hasattr(self, 'compiler'): + log.warn("#### %s #######" % (self.compiler,)) + if not hasattr(self, 'compiler_cxx'): + log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) + + + # check if compiler supports gcc style automatic dependencies + # run on every extension so skip for known good compilers + if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or + 'g++' in self.compiler[0] or + 'clang' in self.compiler[0]): + self._auto_depends = True + elif os.name == 'posix': + import tempfile + import shutil + tmpdir = tempfile.mkdtemp() + try: + fn = os.path.join(tmpdir, "file.c") + with open(fn, "w") as f: + f.write("int a;\n") + self.compile([fn], output_dir=tmpdir, + extra_preargs=['-MMD', '-MF', fn + '.d']) + self._auto_depends = True + except CompileError: + self._auto_depends = False + finally: + shutil.rmtree(tmpdir) + + return + +replace_method(CCompiler, 'customize', CCompiler_customize) + +def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): + """ + Simple matching of version numbers, for use in CCompiler and FCompiler. + + Parameters + ---------- + pat : str, optional + A regular expression matching version numbers. + Default is ``r'[-.\\d]+'``. + ignore : str, optional + A regular expression matching patterns to skip. + Default is ``''``, in which case nothing is skipped. + start : str, optional + A regular expression matching the start of where to start looking + for version numbers. + Default is ``''``, in which case searching is started at the + beginning of the version string given to `matcher`. + + Returns + ------- + matcher : callable + A function that is appropriate to use as the ``.version_match`` + attribute of a `CCompiler` class. `matcher` takes a single parameter, + a version string. + + """ + def matcher(self, version_string): + # version string may appear in the second line, so getting rid + # of new lines: + version_string = version_string.replace('\n', ' ') + pos = 0 + if start: + m = re.match(start, version_string) + if not m: + return None + pos = m.end() + while True: + m = re.search(pat, version_string[pos:]) + if not m: + return None + if ignore and re.match(ignore, m.group(0)): + pos = m.end() + continue + break + return m.group(0) + return matcher + +def CCompiler_get_version(self, force=False, ok_status=[0]): + """ + Return compiler version, or None if compiler is not available. + + Parameters + ---------- + force : bool, optional + If True, force a new determination of the version, even if the + compiler already has a version attribute. Default is False. + ok_status : list of int, optional + The list of status values returned by the version look-up process + for which a version string is returned. If the status value is not + in `ok_status`, None is returned. Default is ``[0]``. + + Returns + ------- + version : str or None + Version string, in the format of `distutils.version.LooseVersion`. + + """ + if not force and hasattr(self, 'version'): + return self.version + self.find_executables() + try: + version_cmd = self.version_cmd + except AttributeError: + return None + if not version_cmd or not version_cmd[0]: + return None + try: + matcher = self.version_match + except AttributeError: + try: + pat = self.version_pattern + except AttributeError: + return None + def matcher(version_string): + m = re.match(pat, version_string) + if not m: + return None + version = m.group('version') + return version + + try: + output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as exc: + output = exc.output + status = exc.returncode + except OSError: + # match the historical returns for a parent + # exception class caught by exec_command() + status = 127 + output = b'' + else: + # output isn't actually a filepath but we do this + # for now to match previous distutils behavior + output = filepath_from_subprocess_output(output) + status = 0 + + version = None + if status in ok_status: + version = matcher(output) + if version: + version = LooseVersion(version) + self.version = version + return version + +replace_method(CCompiler, 'get_version', CCompiler_get_version) + +def CCompiler_cxx_compiler(self): + """ + Return the C++ compiler. + + Parameters + ---------- + None + + Returns + ------- + cxx : class instance + The C++ compiler, as a `CCompiler` instance. + + """ + if self.compiler_type in ('msvc', 'intelw', 'intelemw'): + return self + + cxx = copy(self) + cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:] + if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]: + # AIX needs the ld_so_aix script included with Python + cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ + + cxx.linker_so[2:] + else: + cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] + return cxx + +replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) + +compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', + "Intel C Compiler for 32-bit applications") +compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', + "Intel C Itanium Compiler for Itanium-based applications") +compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', + "Intel C Compiler for 64-bit applications") +compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', + "Intel C Compiler for 32-bit applications on Windows") +compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', + "Intel C Compiler for 64-bit applications on Windows") +compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', + "PathScale Compiler for SiCortex-based applications") +ccompiler._default_compilers += (('linux.*', 'intel'), + ('linux.*', 'intele'), + ('linux.*', 'intelem'), + ('linux.*', 'pathcc'), + ('nt', 'intelw'), + ('nt', 'intelemw')) + +if sys.platform == 'win32': + compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', + "Mingw32 port of GNU C Compiler for Win32"\ + "(for MSC built Python)") + if mingw32(): + # On windows platforms, we want to default to mingw32 (gcc) + # because msvc can't build blitz stuff. + log.info('Setting mingw32 as default compiler for nt.') + ccompiler._default_compilers = (('nt', 'mingw32'),) \ + + ccompiler._default_compilers + + +_distutils_new_compiler = new_compiler +def new_compiler (plat=None, + compiler=None, + verbose=None, + dry_run=0, + force=0): + # Try first C compilers from numpy.distutils. + if verbose is None: + verbose = log.get_threshold() <= log.INFO + if plat is None: + plat = os.name + try: + if compiler is None: + compiler = get_default_compiler(plat) + (module_name, class_name, long_description) = compiler_class[compiler] + except KeyError: + msg = "don't know how to compile C/C++ code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler" % compiler + raise DistutilsPlatformError(msg) + module_name = "numpy.distutils." + module_name + try: + __import__ (module_name) + except ImportError as e: + msg = str(e) + log.info('%s in numpy.distutils; trying from distutils', + str(msg)) + module_name = module_name[6:] + try: + __import__(module_name) + except ImportError as e: + msg = str(e) + raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ + module_name) + try: + module = sys.modules[module_name] + klass = vars(module)[class_name] + except KeyError: + raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + + "in module '%s'") % (class_name, module_name)) + compiler = klass(None, dry_run, force) + compiler.verbose = verbose + log.debug('new_compiler returns %s' % (klass)) + return compiler + +ccompiler.new_compiler = new_compiler + +_distutils_gen_lib_options = gen_lib_options +def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): + # the version of this function provided by CPython allows the following + # to return lists, which are unpacked automatically: + # - compiler.runtime_library_dir_option + # our version extends the behavior to: + # - compiler.library_dir_option + # - compiler.library_option + # - compiler.find_library_file + r = _distutils_gen_lib_options(compiler, library_dirs, + runtime_library_dirs, libraries) + lib_opts = [] + for i in r: + if is_sequence(i): + lib_opts.extend(list(i)) + else: + lib_opts.append(i) + return lib_opts +ccompiler.gen_lib_options = gen_lib_options + +# Also fix up the various compiler modules, which do +# from distutils.ccompiler import gen_lib_options +# Don't bother with mwerks, as we don't support Classic Mac. +for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: + _m = sys.modules.get('distutils.' + _cc + 'compiler') + if _m is not None: + setattr(_m, 'gen_lib_options', gen_lib_options) + diff --git a/MLPY/Lib/site-packages/numpy/distutils/ccompiler_opt.py b/MLPY/Lib/site-packages/numpy/distutils/ccompiler_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..959874f01f8803772441a99d5d9bd05db7ee4c98 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/ccompiler_opt.py @@ -0,0 +1,2572 @@ +"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware +optimization, starting from parsing the command arguments, to managing the +relation between the CPU baseline and dispatch-able features, +also generating the required C headers and ending with compiling +the sources with proper compiler's flags. + +`CCompilerOpt` doesn't provide runtime detection for the CPU features, +instead only focuses on the compiler side, but it creates abstract C headers +that can be used later for the final runtime dispatching process.""" + +import sys, io, os, re, textwrap, pprint, inspect, atexit, subprocess + +class _Config: + """An abstract class holds all configurable attributes of `CCompilerOpt`, + these class attributes can be used to change the default behavior + of `CCompilerOpt` in order to fit other requirements. + + Attributes + ---------- + conf_nocache : bool + Set True to disable memory and file cache. + Default is False. + + conf_noopt : bool + Set True to forces the optimization to be disabled, + in this case `CCompilerOpt` tends to generate all + expected headers in order to 'not' break the build. + Default is False. + + conf_cache_factors : list + Add extra factors to the primary caching factors. The caching factors + are utilized to determine if there are changes had happened that + requires to discard the cache and re-updating it. The primary factors + are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc). + Default is list of two items, containing the time of last modification + of `ccompiler_opt` and value of attribute "conf_noopt" + + conf_tmp_path : str, + The path of temporary directory. Default is auto-created + temporary directory via ``tempfile.mkdtemp()``. + + conf_check_path : str + The path of testing files. Each added CPU feature must have a + **C** source file contains at least one intrinsic or instruction that + related to this feature, so it can be tested against the compiler. + Default is ``./distutils/checks``. + + conf_target_groups : dict + Extra tokens that can be reached from dispatch-able sources through + the special mark ``@targets``. Default is an empty dictionary. + + **Notes**: + - case-insensitive for tokens and group names + - sign '#' must stick in the begin of group name and only within ``@targets`` + + **Example**: + .. code-block:: console + + $ "@targets #avx_group other_tokens" > group_inside.c + + >>> CCompilerOpt.conf_target_groups["avx_group"] = \\ + "$werror $maxopt avx2 avx512f avx512_skx" + >>> cco = CCompilerOpt(cc_instance) + >>> cco.try_dispatch(["group_inside.c"]) + + conf_c_prefix : str + The prefix of public C definitions. Default is ``"NPY_"``. + + conf_c_prefix_ : str + The prefix of internal C definitions. Default is ``"NPY__"``. + + conf_cc_flags : dict + Nested dictionaries defining several compiler flags + that linked to some major functions, the main key + represent the compiler name and sub-keys represent + flags names. Default is already covers all supported + **C** compilers. + + Sub-keys explained as follows: + + "native": str or None + used by argument option `native`, to detect the current + machine support via the compiler. + "werror": str or None + utilized to treat warning as errors during testing CPU features + against the compiler and also for target's policy `$werror` + via dispatch-able sources. + "maxopt": str or None + utilized for target's policy '$maxopt' and the value should + contains the maximum acceptable optimization by the compiler. + e.g. in gcc `'-O3'` + + **Notes**: + * case-sensitive for compiler names and flags + * use space to separate multiple flags + * any flag will tested against the compiler and it will skipped + if it's not applicable. + + conf_min_features : dict + A dictionary defines the used CPU features for + argument option `'min'`, the key represent the CPU architecture + name e.g. `'x86'`. Default values provide the best effort + on wide range of users platforms. + + **Note**: case-sensitive for architecture names. + + conf_features : dict + Nested dictionaries used for identifying the CPU features. + the primary key is represented as a feature name or group name + that gathers several features. Default values covers all + supported features but without the major options like "flags", + these undefined options handle it by method `conf_features_partial()`. + Default value is covers almost all CPU features for *X86*, *IBM/Power64* + and *ARM 7/8*. + + Sub-keys explained as follows: + + "implies" : str or list, optional, + List of CPU feature names to be implied by it, + the feature name must be defined within `conf_features`. + Default is None. + + "flags": str or list, optional + List of compiler flags. Default is None. + + "detect": str or list, optional + List of CPU feature names that required to be detected + in runtime. By default, its the feature name or features + in "group" if its specified. + + "implies_detect": bool, optional + If True, all "detect" of implied features will be combined. + Default is True. see `feature_detect()`. + + "group": str or list, optional + Same as "implies" but doesn't require the feature name to be + defined within `conf_features`. + + "interest": int, required + a key for sorting CPU features + + "headers": str or list, optional + intrinsics C header file + + "disable": str, optional + force disable feature, the string value should contains the + reason of disabling. + + "autovec": bool or None, optional + True or False to declare that CPU feature can be auto-vectorized + by the compiler. + By default(None), treated as True if the feature contains at + least one applicable flag. see `feature_can_autovec()` + + "extra_checks": str or list, optional + Extra test case names for the CPU feature that need to be tested + against the compiler. + + Each test case must have a C file named ``extra_xxxx.c``, where + ``xxxx`` is the case name in lower case, under 'conf_check_path'. + It should contain at least one intrinsic or function related to the test case. + + If the compiler able to successfully compile the C file then `CCompilerOpt` + will add a C ``#define`` for it into the main dispatch header, e.g. + ```#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case. + + **NOTES**: + * space can be used as separator with options that supports "str or list" + * case-sensitive for all values and feature name must be in upper-case. + * if flags aren't applicable, its will skipped rather than disable the + CPU feature + * the CPU feature will disabled if the compiler fail to compile + the test file + """ + conf_nocache = False + conf_noopt = False + conf_cache_factors = None + conf_tmp_path = None + conf_check_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "checks" + ) + conf_target_groups = {} + conf_c_prefix = 'NPY_' + conf_c_prefix_ = 'NPY__' + conf_cc_flags = dict( + gcc = dict( + # native should always fail on arm and ppc64, + # native usually works only with x86 + native = '-march=native', + opt = '-O3', + werror = '-Werror' + ), + clang = dict( + native = '-march=native', + opt = "-O3", + # One of the following flags needs to be applicable for Clang to + # guarantee the sanity of the testing process, however in certain + # cases `-Werror` gets skipped during the availability test due to + # "unused arguments" warnings. + # see https://github.com/numpy/numpy/issues/19624 + werror = '-Werror-implicit-function-declaration -Werror' + ), + icc = dict( + native = '-xHost', + opt = '-O3', + werror = '-Werror' + ), + iccw = dict( + native = '/QxHost', + opt = '/O3', + werror = '/Werror' + ), + msvc = dict( + native = None, + opt = '/O2', + werror = '/WX' + ) + ) + conf_min_features = dict( + x86 = "SSE SSE2", + x64 = "SSE SSE2 SSE3", + ppc64 = '', # play it safe + ppc64le = "VSX VSX2", + armhf = '', # play it safe + aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" + ) + conf_features = dict( + # X86 + SSE = dict( + interest=1, headers="xmmintrin.h", + # enabling SSE without SSE2 is useless also + # it's non-optional for x86_64 + implies="SSE2" + ), + SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"), + SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"), + SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"), + SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"), + POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"), + SSE42 = dict(interest=7, implies="POPCNT"), + AVX = dict( + interest=8, implies="SSE42", headers="immintrin.h", + implies_detect=False + ), + XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"), + FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"), + F16C = dict(interest=11, implies="AVX"), + FMA3 = dict(interest=12, implies="F16C"), + AVX2 = dict(interest=13, implies="F16C"), + AVX512F = dict( + interest=20, implies="FMA3 AVX2", implies_detect=False, + extra_checks="AVX512F_REDUCE" + ), + AVX512CD = dict(interest=21, implies="AVX512F"), + AVX512_KNL = dict( + interest=40, implies="AVX512CD", group="AVX512ER AVX512PF", + detect="AVX512_KNL", implies_detect=False + ), + AVX512_KNM = dict( + interest=41, implies="AVX512_KNL", + group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ", + detect="AVX512_KNM", implies_detect=False + ), + AVX512_SKX = dict( + interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", + detect="AVX512_SKX", implies_detect=False, + extra_checks="AVX512BW_MASK AVX512DQ_MASK" + ), + AVX512_CLX = dict( + interest=43, implies="AVX512_SKX", group="AVX512VNNI", + detect="AVX512_CLX" + ), + AVX512_CNL = dict( + interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI", + detect="AVX512_CNL", implies_detect=False + ), + AVX512_ICL = dict( + interest=45, implies="AVX512_CLX AVX512_CNL", + group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ", + detect="AVX512_ICL", implies_detect=False + ), + # IBM/Power + ## Power7/ISA 2.06 + VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), + ## Power8/ISA 2.07 + VSX2 = dict(interest=2, implies="VSX", implies_detect=False), + ## Power9/ISA 3.00 + VSX3 = dict(interest=3, implies="VSX2", implies_detect=False), + # ARM + NEON = dict(interest=1, headers="arm_neon.h"), + NEON_FP16 = dict(interest=2, implies="NEON"), + ## FMA + NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"), + ## Advanced SIMD + ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False), + ## ARMv8.2 half-precision & vector arithm + ASIMDHP = dict(interest=5, implies="ASIMD"), + ## ARMv8.2 dot product + ASIMDDP = dict(interest=6, implies="ASIMD"), + ## ARMv8.2 Single & half-precision Multiply + ASIMDFHM = dict(interest=7, implies="ASIMDHP"), + ) + def conf_features_partial(self): + """Return a dictionary of supported CPU features by the platform, + and accumulate the rest of undefined options in `conf_features`, + the returned dict has same rules and notes in + class attribute `conf_features`, also its override + any options that been set in 'conf_features'. + """ + if self.cc_noopt: + # optimization is disabled + return {} + + on_x86 = self.cc_on_x86 or self.cc_on_x64 + is_unix = self.cc_is_gcc or self.cc_is_clang + + if on_x86 and is_unix: return dict( + SSE = dict(flags="-msse"), + SSE2 = dict(flags="-msse2"), + SSE3 = dict(flags="-msse3"), + SSSE3 = dict(flags="-mssse3"), + SSE41 = dict(flags="-msse4.1"), + POPCNT = dict(flags="-mpopcnt"), + SSE42 = dict(flags="-msse4.2"), + AVX = dict(flags="-mavx"), + F16C = dict(flags="-mf16c"), + XOP = dict(flags="-mxop"), + FMA4 = dict(flags="-mfma4"), + FMA3 = dict(flags="-mfma"), + AVX2 = dict(flags="-mavx2"), + AVX512F = dict(flags="-mavx512f"), + AVX512CD = dict(flags="-mavx512cd"), + AVX512_KNL = dict(flags="-mavx512er -mavx512pf"), + AVX512_KNM = dict( + flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq" + ), + AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"), + AVX512_CLX = dict(flags="-mavx512vnni"), + AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"), + AVX512_ICL = dict( + flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq" + ) + ) + if on_x86 and self.cc_is_icc: return dict( + SSE = dict(flags="-msse"), + SSE2 = dict(flags="-msse2"), + SSE3 = dict(flags="-msse3"), + SSSE3 = dict(flags="-mssse3"), + SSE41 = dict(flags="-msse4.1"), + POPCNT = {}, + SSE42 = dict(flags="-msse4.2"), + AVX = dict(flags="-mavx"), + F16C = {}, + XOP = dict(disable="Intel Compiler doesn't support it"), + FMA4 = dict(disable="Intel Compiler doesn't support it"), + # Intel Compiler doesn't support AVX2 or FMA3 independently + FMA3 = dict( + implies="F16C AVX2", flags="-march=core-avx2" + ), + AVX2 = dict(implies="FMA3", flags="-march=core-avx2"), + # Intel Compiler doesn't support AVX512F or AVX512CD independently + AVX512F = dict( + implies="AVX2 AVX512CD", flags="-march=common-avx512" + ), + AVX512CD = dict( + implies="AVX2 AVX512F", flags="-march=common-avx512" + ), + AVX512_KNL = dict(flags="-xKNL"), + AVX512_KNM = dict(flags="-xKNM"), + AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"), + AVX512_CLX = dict(flags="-xCASCADELAKE"), + AVX512_CNL = dict(flags="-xCANNONLAKE"), + AVX512_ICL = dict(flags="-xICELAKE-CLIENT"), + ) + if on_x86 and self.cc_is_iccw: return dict( + SSE = dict(flags="/arch:SSE"), + SSE2 = dict(flags="/arch:SSE2"), + SSE3 = dict(flags="/arch:SSE3"), + SSSE3 = dict(flags="/arch:SSSE3"), + SSE41 = dict(flags="/arch:SSE4.1"), + POPCNT = {}, + SSE42 = dict(flags="/arch:SSE4.2"), + AVX = dict(flags="/arch:AVX"), + F16C = {}, + XOP = dict(disable="Intel Compiler doesn't support it"), + FMA4 = dict(disable="Intel Compiler doesn't support it"), + # Intel Compiler doesn't support FMA3 or AVX2 independently + FMA3 = dict( + implies="F16C AVX2", flags="/arch:CORE-AVX2" + ), + AVX2 = dict( + implies="FMA3", flags="/arch:CORE-AVX2" + ), + # Intel Compiler doesn't support AVX512F or AVX512CD independently + AVX512F = dict( + implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512" + ), + AVX512CD = dict( + implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512" + ), + AVX512_KNL = dict(flags="/Qx:KNL"), + AVX512_KNM = dict(flags="/Qx:KNM"), + AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"), + AVX512_CLX = dict(flags="/Qx:CASCADELAKE"), + AVX512_CNL = dict(flags="/Qx:CANNONLAKE"), + AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT") + ) + if on_x86 and self.cc_is_msvc: return dict( + SSE = dict(flags="/arch:SSE"), + SSE2 = dict(flags="/arch:SSE2"), + SSE3 = {}, + SSSE3 = {}, + SSE41 = {}, + POPCNT = dict(headers="nmmintrin.h"), + SSE42 = {}, + AVX = dict(flags="/arch:AVX"), + F16C = {}, + XOP = dict(headers="ammintrin.h"), + FMA4 = dict(headers="ammintrin.h"), + # MSVC doesn't support FMA3 or AVX2 independently + FMA3 = dict( + implies="F16C AVX2", flags="/arch:AVX2" + ), + AVX2 = dict( + implies="F16C FMA3", flags="/arch:AVX2" + ), + # MSVC doesn't support AVX512F or AVX512CD independently, + # always generate instructions belong to (VL/VW/DQ) + AVX512F = dict( + implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512" + ), + AVX512CD = dict( + implies="AVX512F AVX512_SKX", flags="/arch:AVX512" + ), + AVX512_KNL = dict( + disable="MSVC compiler doesn't support it" + ), + AVX512_KNM = dict( + disable="MSVC compiler doesn't support it" + ), + AVX512_SKX = dict(flags="/arch:AVX512"), + AVX512_CLX = {}, + AVX512_CNL = {}, + AVX512_ICL = {} + ) + + on_power = self.cc_on_ppc64le or self.cc_on_ppc64 + if on_power: + partial = dict( + VSX = dict( + implies=("VSX2" if self.cc_on_ppc64le else ""), + flags="-mvsx" + ), + VSX2 = dict( + flags="-mcpu=power8", implies_detect=False + ), + VSX3 = dict( + flags="-mcpu=power9 -mtune=power9", implies_detect=False + ) + ) + if self.cc_is_clang: + partial["VSX"]["flags"] = "-maltivec -mvsx" + partial["VSX2"]["flags"] = "-mpower8-vector" + partial["VSX3"]["flags"] = "-mpower9-vector" + + return partial + + if self.cc_on_aarch64 and is_unix: return dict( + NEON = dict( + implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True + ), + NEON_FP16 = dict( + implies="NEON NEON_VFPV4 ASIMD", autovec=True + ), + NEON_VFPV4 = dict( + implies="NEON NEON_FP16 ASIMD", autovec=True + ), + ASIMD = dict( + implies="NEON NEON_FP16 NEON_VFPV4", autovec=True + ), + ASIMDHP = dict( + flags="-march=armv8.2-a+fp16" + ), + ASIMDDP = dict( + flags="-march=armv8.2-a+dotprod" + ), + ASIMDFHM = dict( + flags="-march=armv8.2-a+fp16fml" + ), + ) + if self.cc_on_armhf and is_unix: return dict( + NEON = dict( + flags="-mfpu=neon" + ), + NEON_FP16 = dict( + flags="-mfpu=neon-fp16 -mfp16-format=ieee" + ), + NEON_VFPV4 = dict( + flags="-mfpu=neon-vfpv4", + ), + ASIMD = dict( + flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd", + ), + ASIMDHP = dict( + flags="-march=armv8.2-a+fp16" + ), + ASIMDDP = dict( + flags="-march=armv8.2-a+dotprod", + ), + ASIMDFHM = dict( + flags="-march=armv8.2-a+fp16fml" + ) + ) + # TODO: ARM MSVC + return {} + + def __init__(self): + if self.conf_tmp_path is None: + import tempfile, shutil + tmp = tempfile.mkdtemp() + def rm_temp(): + try: + shutil.rmtree(tmp) + except IOError: + pass + atexit.register(rm_temp) + self.conf_tmp_path = tmp + + if self.conf_cache_factors is None: + self.conf_cache_factors = [ + os.path.getmtime(__file__), + self.conf_nocache + ] + +class _Distutils: + """A helper class that provides a collection of fundamental methods + implemented in a top of Python and NumPy Distutils. + + The idea behind this class is to gather all methods that it may + need to override in case of reuse 'CCompilerOpt' in environment + different than of what NumPy has. + + Parameters + ---------- + ccompiler : `CCompiler` + The generate instance that returned from `distutils.ccompiler.new_compiler()`. + """ + def __init__(self, ccompiler): + self._ccompiler = ccompiler + + def dist_compile(self, sources, flags, ccompiler=None, **kwargs): + """Wrap CCompiler.compile()""" + assert(isinstance(sources, list)) + assert(isinstance(flags, list)) + flags = kwargs.pop("extra_postargs", []) + flags + if not ccompiler: + ccompiler = self._ccompiler + return ccompiler.compile(sources, extra_postargs=flags, **kwargs) + + def dist_test(self, source, flags, macros=[]): + """Return True if 'CCompiler.compile()' able to compile + a source file with certain flags. + """ + assert(isinstance(source, str)) + from distutils.errors import CompileError + cc = self._ccompiler; + bk_spawn = getattr(cc, 'spawn', None) + if bk_spawn: + cc_type = getattr(self._ccompiler, "compiler_type", "") + if cc_type in ("msvc",): + setattr(cc, 'spawn', self._dist_test_spawn_paths) + else: + setattr(cc, 'spawn', self._dist_test_spawn) + test = False + try: + self.dist_compile( + [source], flags, macros=macros, output_dir=self.conf_tmp_path + ) + test = True + except CompileError as e: + self.dist_log(str(e), stderr=True) + if bk_spawn: + setattr(cc, 'spawn', bk_spawn) + return test + + def dist_info(self): + """ + Return a tuple containing info about (platform, compiler, extra_args), + required by the abstract class '_CCompiler' for discovering the + platform environment. This is also used as a cache factor in order + to detect any changes happening from outside. + """ + if hasattr(self, "_dist_info"): + return self._dist_info + + cc_type = getattr(self._ccompiler, "compiler_type", '') + if cc_type in ("intelem", "intelemw"): + platform = "x86_64" + elif cc_type in ("intel", "intelw", "intele"): + platform = "x86" + else: + from distutils.util import get_platform + platform = get_platform() + + cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) + if not cc_type or cc_type == "unix": + if hasattr(cc_info, "__iter__"): + compiler = cc_info[0] + else: + compiler = str(cc_info) + else: + compiler = cc_type + + if hasattr(cc_info, "__iter__") and len(cc_info) > 1: + extra_args = ' '.join(cc_info[1:]) + else: + extra_args = os.environ.get("CFLAGS", "") + extra_args += os.environ.get("CPPFLAGS", "") + + self._dist_info = (platform, compiler, extra_args) + return self._dist_info + + @staticmethod + def dist_error(*args): + """Raise a compiler error""" + from distutils.errors import CompileError + raise CompileError(_Distutils._dist_str(*args)) + + @staticmethod + def dist_fatal(*args): + """Raise a distutils error""" + from distutils.errors import DistutilsError + raise DistutilsError(_Distutils._dist_str(*args)) + + @staticmethod + def dist_log(*args, stderr=False): + """Print a console message""" + from numpy.distutils import log + out = _Distutils._dist_str(*args) + if stderr: + log.warn(out) + else: + log.info(out) + + @staticmethod + def dist_load_module(name, path): + """Load a module from file, required by the abstract class '_Cache'.""" + from numpy.compat import npy_load_module + try: + return npy_load_module(name, path) + except Exception as e: + _Distutils.dist_log(e, stderr=True) + return None + + @staticmethod + def _dist_str(*args): + """Return a string to print by log and errors.""" + def to_str(arg): + if not isinstance(arg, str) and hasattr(arg, '__iter__'): + ret = [] + for a in arg: + ret.append(to_str(a)) + return '('+ ' '.join(ret) + ')' + return str(arg) + + stack = inspect.stack()[2] + start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) + out = ' '.join([ + to_str(a) + for a in (*args,) + ]) + return start + out + + def _dist_test_spawn_paths(self, cmd, display=None): + """ + Fix msvc SDK ENV path same as distutils do + without it we get c1: fatal error C1356: unable to find mspdbcore.dll + """ + if not hasattr(self._ccompiler, "_paths"): + self._dist_test_spawn(cmd) + return + old_path = os.getenv("path") + try: + os.environ["path"] = self._ccompiler._paths + self._dist_test_spawn(cmd) + finally: + os.environ["path"] = old_path + + _dist_warn_regex = re.compile( + # intel and msvc compilers don't raise + # fatal errors when flags are wrong or unsupported + ".*(" + "warning D9002|" # msvc, it should be work with any language. + "invalid argument for option" # intel + ").*" + ) + @staticmethod + def _dist_test_spawn(cmd, display=None): + from distutils.errors import CompileError + try: + o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, + universal_newlines=True) + if o and re.match(_Distutils._dist_warn_regex, o): + _Distutils.dist_error( + "Flags in command", cmd ,"aren't supported by the compiler" + ", output -> \n%s" % o + ) + except subprocess.CalledProcessError as exc: + o = exc.output + s = exc.returncode + except OSError: + o = b'' + s = 127 + else: + return None + _Distutils.dist_error( + "Command", cmd, "failed with exit status %d output -> \n%s" % ( + s, o + )) + +_share_cache = {} +class _Cache: + """An abstract class handles caching functionality, provides two + levels of caching, in-memory by share instances attributes among + each other and by store attributes into files. + + **Note**: + any attributes that start with ``_`` or ``conf_`` will be ignored. + + Parameters + ---------- + cache_path: str or None + The path of cache file, if None then cache in file will disabled. + + *factors: + The caching factors that need to utilize next to `conf_cache_factors`. + + Attributes + ---------- + cache_private: set + Hold the attributes that need be skipped from "in-memory cache". + + cache_infile: bool + Utilized during initializing this class, to determine if the cache was able + to loaded from the specified cache path in 'cache_path'. + """ + + # skip attributes from cache + _cache_ignore = re.compile("^(_|conf_)") + + def __init__(self, cache_path=None, *factors): + self.cache_me = {} + self.cache_private = set() + self.cache_infile = False + self._cache_path = None + + if self.conf_nocache: + self.dist_log("cache is disabled by `Config`") + return + + self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) + self._cache_path = cache_path + if cache_path: + if os.path.exists(cache_path): + self.dist_log("load cache from file ->", cache_path) + cache_mod = self.dist_load_module("cache", cache_path) + if not cache_mod: + self.dist_log( + "unable to load the cache file as a module", + stderr=True + ) + elif not hasattr(cache_mod, "hash") or \ + not hasattr(cache_mod, "data"): + self.dist_log("invalid cache file", stderr=True) + elif self._cache_hash == cache_mod.hash: + self.dist_log("hit the file cache") + for attr, val in cache_mod.data.items(): + setattr(self, attr, val) + self.cache_infile = True + else: + self.dist_log("miss the file cache") + + if not self.cache_infile: + other_cache = _share_cache.get(self._cache_hash) + if other_cache: + self.dist_log("hit the memory cache") + for attr, val in other_cache.__dict__.items(): + if attr in other_cache.cache_private or \ + re.match(self._cache_ignore, attr): + continue + setattr(self, attr, val) + + _share_cache[self._cache_hash] = self + atexit.register(self.cache_flush) + + def __del__(self): + for h, o in _share_cache.items(): + if o == self: + _share_cache.pop(h) + break + + def cache_flush(self): + """ + Force update the cache. + """ + if not self._cache_path: + return + # TODO: don't write if the cache doesn't change + self.dist_log("write cache to path ->", self._cache_path) + cdict = self.__dict__.copy() + for attr in self.__dict__.keys(): + if re.match(self._cache_ignore, attr): + cdict.pop(attr) + + d = os.path.dirname(self._cache_path) + if not os.path.exists(d): + os.makedirs(d) + + repr_dict = pprint.pformat(cdict, compact=True) + with open(self._cache_path, "w") as f: + f.write(textwrap.dedent("""\ + # AUTOGENERATED DON'T EDIT + # Please make changes to the code generator \ + (distutils/ccompiler_opt.py) + hash = {} + data = \\ + """).format(self._cache_hash)) + f.write(repr_dict) + + def cache_hash(self, *factors): + # is there a built-in non-crypto hash? + # sdbm + chash = 0 + for f in factors: + for char in str(f): + chash = ord(char) + (chash << 6) + (chash << 16) - chash + chash &= 0xFFFFFFFF + return chash + + @staticmethod + def me(cb): + """ + A static method that can be treated as a decorator to + dynamically cache certain methods. + """ + def cache_wrap_me(self, *args, **kwargs): + # good for normal args + cache_key = str(( + cb.__name__, *args, *kwargs.keys(), *kwargs.values() + )) + if cache_key in self.cache_me: + return self.cache_me[cache_key] + ccb = cb(self, *args, **kwargs) + self.cache_me[cache_key] = ccb + return ccb + return cache_wrap_me + +class _CCompiler: + """A helper class for `CCompilerOpt` containing all utilities that + related to the fundamental compiler's functions. + + Attributes + ---------- + cc_on_x86 : bool + True when the target architecture is 32-bit x86 + cc_on_x64 : bool + True when the target architecture is 64-bit x86 + cc_on_ppc64 : bool + True when the target architecture is 64-bit big-endian PowerPC + cc_on_armhf : bool + True when the target architecture is 32-bit ARMv7+ + cc_on_aarch64 : bool + True when the target architecture is 64-bit Armv8-a+ + cc_on_noarch : bool + True when the target architecture is unknown or not supported + cc_is_gcc : bool + True if the compiler is GNU or + if the compiler is unknown + cc_is_clang : bool + True if the compiler is Clang + cc_is_icc : bool + True if the compiler is Intel compiler (unix like) + cc_is_iccw : bool + True if the compiler is Intel compiler (msvc like) + cc_is_nocc : bool + True if the compiler isn't supported directly, + Note: that cause a fail-back to gcc + cc_has_debug : bool + True if the compiler has debug flags + cc_has_native : bool + True if the compiler has native flags + cc_noopt : bool + True if the compiler has definition 'DISABLE_OPT*', + or 'cc_on_noarch' is True + cc_march : str + The target architecture name, or "unknown" if + the architecture isn't supported + cc_name : str + The compiler name, or "unknown" if the compiler isn't supported + cc_flags : dict + Dictionary containing the initialized flags of `_Config.conf_cc_flags` + """ + def __init__(self): + if hasattr(self, "cc_is_cached"): + return + # attr regex + detect_arch = ( + ("cc_on_x64", ".*(x|x86_|amd)64.*"), + ("cc_on_x86", ".*(win32|x86|i386|i686).*"), + ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*"), + ("cc_on_ppc64", ".*(powerpc|ppc)64.*"), + ("cc_on_aarch64", ".*(aarch64|arm64).*"), + ("cc_on_armhf", ".*arm.*"), + # undefined platform + ("cc_on_noarch", ""), + ) + detect_compiler = ( + ("cc_is_gcc", r".*(gcc|gnu\-g).*"), + ("cc_is_clang", ".*clang.*"), + ("cc_is_iccw", ".*(intelw|intelemw|iccw).*"), # intel msvc like + ("cc_is_icc", ".*(intel|icc).*"), # intel unix like + ("cc_is_msvc", ".*msvc.*"), + # undefined compiler will be treat it as gcc + ("cc_is_nocc", ""), + ) + detect_args = ( + ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*"), + ("cc_has_native", ".*(-march=native|-xHost|/QxHost).*"), + # in case if the class run with -DNPY_DISABLE_OPTIMIZATION + ("cc_noopt", ".*DISABLE_OPT.*"), + ) + + dist_info = self.dist_info() + platform, compiler_info, extra_args = dist_info + # set False to all attrs + for section in (detect_arch, detect_compiler, detect_args): + for attr, rgex in section: + setattr(self, attr, False) + + for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): + for attr, rgex in detect: + if rgex and not re.match(rgex, searchin, re.IGNORECASE): + continue + setattr(self, attr, True) + break + + for attr, rgex in detect_args: + if rgex and not re.match(rgex, extra_args, re.IGNORECASE): + continue + setattr(self, attr, True) + + if self.cc_on_noarch: + self.dist_log( + "unable to detect CPU architecture which lead to disable the optimization. " + f"check dist_info:<<\n{dist_info}\n>>", + stderr=True + ) + self.cc_noopt = True + + if self.conf_noopt: + self.dist_log("Optimization is disabled by the Config", stderr=True) + self.cc_noopt = True + + if self.cc_is_nocc: + """ + mingw can be treated as a gcc, and also xlc even if it based on clang, + but still has the same gcc optimization flags. + """ + self.dist_log( + "unable to detect compiler type which leads to treating it as GCC. " + "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." + f"check dist_info:<<\n{dist_info}\n>>", + stderr=True + ) + self.cc_is_gcc = True + + self.cc_march = "unknown" + for arch in ("x86", "x64", "ppc64", "ppc64le", "armhf", "aarch64"): + if getattr(self, "cc_on_" + arch): + self.cc_march = arch + break + + self.cc_name = "unknown" + for name in ("gcc", "clang", "iccw", "icc", "msvc"): + if getattr(self, "cc_is_" + name): + self.cc_name = name + break + + self.cc_flags = {} + compiler_flags = self.conf_cc_flags.get(self.cc_name) + if compiler_flags is None: + self.dist_fatal( + "undefined flag for compiler '%s', " + "leave an empty dict instead" % self.cc_name + ) + for name, flags in compiler_flags.items(): + self.cc_flags[name] = nflags = [] + if flags: + assert(isinstance(flags, str)) + flags = flags.split() + for f in flags: + if self.cc_test_flags([f]): + nflags.append(f) + + self.cc_is_cached = True + + @_Cache.me + def cc_test_flags(self, flags): + """ + Returns True if the compiler supports 'flags'. + """ + assert(isinstance(flags, list)) + self.dist_log("testing flags", flags) + test_path = os.path.join(self.conf_check_path, "test_flags.c") + test = self.dist_test(test_path, flags) + if not test: + self.dist_log("testing failed", stderr=True) + return test + + def cc_normalize_flags(self, flags): + """ + Remove the conflicts that caused due gathering implied features flags. + + Parameters + ---------- + 'flags' list, compiler flags + flags should be sorted from the lowest to the highest interest. + + Returns + ------- + list, filtered from any conflicts. + + Examples + -------- + >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) + ['armv8.2-a+fp16+dotprod'] + + >>> self.cc_normalize_flags( + ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] + ) + ['-march=core-avx2'] + """ + assert(isinstance(flags, list)) + if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: + return self._cc_normalize_unix(flags) + + if self.cc_is_msvc or self.cc_is_iccw: + return self._cc_normalize_win(flags) + return flags + + _cc_normalize_unix_mrgx = re.compile( + # 1- to check the highest of + r"^(-mcpu=|-march=|-x[A-Z0-9\-])" + ) + _cc_normalize_unix_frgx = re.compile( + # 2- to remove any flags starts with + # -march, -mcpu, -x(INTEL) and '-m' without '=' + r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]))(?!-m[a-z0-9\-\.]*.$)" + ) + _cc_normalize_unix_krgx = re.compile( + # 3- keep only the highest of + r"^(-mfpu|-mtune)" + ) + _cc_normalize_arch_ver = re.compile( + r"[0-9.]" + ) + def _cc_normalize_unix(self, flags): + def ver_flags(f): + # arch ver subflag + # -march=armv8.2-a+fp16fml + tokens = f.split('+') + ver = float('0' + ''.join( + re.findall(self._cc_normalize_arch_ver, tokens[0]) + )) + return ver, tokens[0], tokens[1:] + + if len(flags) <= 1: + return flags + # get the highest matched flag + for i, cur_flag in enumerate(reversed(flags)): + if not re.match(self._cc_normalize_unix_mrgx, cur_flag): + continue + lower_flags = flags[:-(i+1)] + upper_flags = flags[-i:] + filterd = list(filter( + self._cc_normalize_unix_frgx.search, lower_flags + )) + # gather subflags + ver, arch, subflags = ver_flags(cur_flag) + if ver > 0 and len(subflags) > 0: + for xflag in lower_flags: + xver, _, xsubflags = ver_flags(xflag) + if ver == xver: + subflags = xsubflags + subflags + cur_flag = arch + '+' + '+'.join(subflags) + + flags = filterd + [cur_flag] + if i > 0: + flags += upper_flags + break + + # to remove overridable flags + final_flags = [] + matched = set() + for f in reversed(flags): + match = re.match(self._cc_normalize_unix_krgx, f) + if not match: + pass + elif match[0] in matched: + continue + else: + matched.add(match[0]) + final_flags.insert(0, f) + return final_flags + + _cc_normalize_win_frgx = re.compile( + r"^(?!(/arch\:|/Qx\:))" + ) + _cc_normalize_win_mrgx = re.compile( + r"^(/arch|/Qx:)" + ) + def _cc_normalize_win(self, flags): + for i, f in enumerate(reversed(flags)): + if not re.match(self._cc_normalize_win_mrgx, f): + continue + i += 1 + return list(filter( + self._cc_normalize_win_frgx.search, flags[:-i] + )) + flags[-i:] + return flags + +class _Feature: + """A helper class for `CCompilerOpt` that managing CPU features. + + Attributes + ---------- + feature_supported : dict + Dictionary containing all CPU features that supported + by the platform, according to the specified values in attribute + `_Config.conf_features` and `_Config.conf_features_partial()` + + feature_min : set + The minimum support of CPU features, according to + the specified values in attribute `_Config.conf_min_features`. + """ + def __init__(self): + if hasattr(self, "feature_is_cached"): + return + self.feature_supported = pfeatures = self.conf_features_partial() + for feature_name in list(pfeatures.keys()): + feature = pfeatures[feature_name] + cfeature = self.conf_features[feature_name] + feature.update({ + k:v for k,v in cfeature.items() if k not in feature + }) + disabled = feature.get("disable") + if disabled is not None: + pfeatures.pop(feature_name) + self.dist_log( + "feature '%s' is disabled," % feature_name, + disabled, stderr=True + ) + continue + # list is used internally for these options + for option in ( + "implies", "group", "detect", "headers", "flags", "extra_checks" + ) : + oval = feature.get(option) + if isinstance(oval, str): + feature[option] = oval.split() + + self.feature_min = set() + min_f = self.conf_min_features.get(self.cc_march, "") + for F in min_f.upper().split(): + if F in self.feature_supported: + self.feature_min.add(F) + + self.feature_is_cached = True + + def feature_names(self, names=None, force_flags=None, macros=[]): + """ + Returns a set of CPU feature names that supported by platform and the **C** compiler. + + Parameters + ---------- + names: sequence or None, optional + Specify certain CPU features to test it against the **C** compiler. + if None(default), it will test all current supported features. + **Note**: feature names must be in upper-case. + + force_flags: list or None, optional + If None(default), default compiler flags for every CPU feature will + be used during the test. + + macros : list of tuples, optional + A list of C macro definitions. + """ + assert( + names is None or ( + not isinstance(names, str) and + hasattr(names, "__iter__") + ) + ) + assert(force_flags is None or isinstance(force_flags, list)) + if names is None: + names = self.feature_supported.keys() + supported_names = set() + for f in names: + if self.feature_is_supported( + f, force_flags=force_flags, macros=macros + ): + supported_names.add(f) + return supported_names + + def feature_is_exist(self, name): + """ + Returns True if a certain feature is exist and covered within + `_Config.conf_features`. + + Parameters + ---------- + 'name': str + feature name in uppercase. + """ + assert(name.isupper()) + return name in self.conf_features + + def feature_sorted(self, names, reverse=False): + """ + Sort a list of CPU features ordered by the lowest interest. + + Parameters + ---------- + 'names': sequence + sequence of supported feature names in uppercase. + 'reverse': bool, optional + If true, the sorted features is reversed. (highest interest) + + Returns + ------- + list, sorted CPU features + """ + def sort_cb(k): + if isinstance(k, str): + return self.feature_supported[k]["interest"] + # multiple features + rank = max([self.feature_supported[f]["interest"] for f in k]) + # FIXME: that's not a safe way to increase the rank for + # multi targets + rank += len(k) -1 + return rank + return sorted(names, reverse=reverse, key=sort_cb) + + def feature_implies(self, names, keep_origins=False): + """ + Return a set of CPU features that implied by 'names' + + Parameters + ---------- + names: str or sequence of str + CPU feature name(s) in uppercase. + + keep_origins: bool + if False(default) then the returned set will not contain any + features from 'names'. This case happens only when two features + imply each other. + + Examples + -------- + >>> self.feature_implies("SSE3") + {'SSE', 'SSE2'} + >>> self.feature_implies("SSE2") + {'SSE'} + >>> self.feature_implies("SSE2", keep_origins=True) + # 'SSE2' found here since 'SSE' and 'SSE2' imply each other + {'SSE', 'SSE2'} + """ + def get_implies(name, _caller=set()): + implies = set() + d = self.feature_supported[name] + for i in d.get("implies", []): + implies.add(i) + if i in _caller: + # infinity recursive guard since + # features can imply each other + continue + _caller.add(name) + implies = implies.union(get_implies(i, _caller)) + return implies + + if isinstance(names, str): + implies = get_implies(names) + names = [names] + else: + assert(hasattr(names, "__iter__")) + implies = set() + for n in names: + implies = implies.union(get_implies(n)) + if not keep_origins: + implies.difference_update(names) + return implies + + def feature_implies_c(self, names): + """same as feature_implies() but combining 'names'""" + if isinstance(names, str): + names = set((names,)) + else: + names = set(names) + return names.union(self.feature_implies(names)) + + def feature_ahead(self, names): + """ + Return list of features in 'names' after remove any + implied features and keep the origins. + + Parameters + ---------- + 'names': sequence + sequence of CPU feature names in uppercase. + + Returns + ------- + list of CPU features sorted as-is 'names' + + Examples + -------- + >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"]) + ["SSE41"] + # assume AVX2 and FMA3 implies each other and AVX2 + # is the highest interest + >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) + ["AVX2"] + # assume AVX2 and FMA3 don't implies each other + >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) + ["AVX2", "FMA3"] + """ + assert( + not isinstance(names, str) + and hasattr(names, '__iter__') + ) + implies = self.feature_implies(names, keep_origins=True) + ahead = [n for n in names if n not in implies] + if len(ahead) == 0: + # return the highest interested feature + # if all features imply each other + ahead = self.feature_sorted(names, reverse=True)[:1] + return ahead + + def feature_untied(self, names): + """ + same as 'feature_ahead()' but if both features implied each other + and keep the highest interest. + + Parameters + ---------- + 'names': sequence + sequence of CPU feature names in uppercase. + + Returns + ------- + list of CPU features sorted as-is 'names' + + Examples + -------- + >>> self.feature_untied(["SSE2", "SSE3", "SSE41"]) + ["SSE2", "SSE3", "SSE41"] + # assume AVX2 and FMA3 implies each other + >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"]) + ["SSE2", "SSE3", "SSE41", "AVX2"] + """ + assert( + not isinstance(names, str) + and hasattr(names, '__iter__') + ) + final = [] + for n in names: + implies = self.feature_implies(n) + tied = [ + nn for nn in final + if nn in implies and n in self.feature_implies(nn) + ] + if tied: + tied = self.feature_sorted(tied + [n]) + if n not in tied[1:]: + continue + final.remove(tied[:1][0]) + final.append(n) + return final + + def feature_get_til(self, names, keyisfalse): + """ + same as `feature_implies_c()` but stop collecting implied + features when feature's option that provided through + parameter 'keyisfalse' is False, also sorting the returned + features. + """ + def til(tnames): + # sort from highest to lowest interest then cut if "key" is False + tnames = self.feature_implies_c(tnames) + tnames = self.feature_sorted(tnames, reverse=True) + for i, n in enumerate(tnames): + if not self.feature_supported[n].get(keyisfalse, True): + tnames = tnames[:i+1] + break + return tnames + + if isinstance(names, str) or len(names) <= 1: + names = til(names) + # normalize the sort + names.reverse() + return names + + names = self.feature_ahead(names) + names = {t for n in names for t in til(n)} + return self.feature_sorted(names) + + def feature_detect(self, names): + """ + Return a list of CPU features that required to be detected + sorted from the lowest to highest interest. + """ + names = self.feature_get_til(names, "implies_detect") + detect = [] + for n in names: + d = self.feature_supported[n] + detect += d.get("detect", d.get("group", [n])) + return detect + + @_Cache.me + def feature_flags(self, names): + """ + Return a list of CPU features flags sorted from the lowest + to highest interest. + """ + names = self.feature_sorted(self.feature_implies_c(names)) + flags = [] + for n in names: + d = self.feature_supported[n] + f = d.get("flags", []) + if not f or not self.cc_test_flags(f): + continue + flags += f + return self.cc_normalize_flags(flags) + + @_Cache.me + def feature_test(self, name, force_flags=None, macros=[]): + """ + Test a certain CPU feature against the compiler through its own + check file. + + Parameters + ---------- + name: str + Supported CPU feature name. + + force_flags: list or None, optional + If None(default), the returned flags from `feature_flags()` + will be used. + + macros : list of tuples, optional + A list of C macro definitions. + """ + if force_flags is None: + force_flags = self.feature_flags(name) + + self.dist_log( + "testing feature '%s' with flags (%s)" % ( + name, ' '.join(force_flags) + )) + # Each CPU feature must have C source code contains at + # least one intrinsic or instruction related to this feature. + test_path = os.path.join( + self.conf_check_path, "cpu_%s.c" % name.lower() + ) + if not os.path.exists(test_path): + self.dist_fatal("feature test file is not exist", test_path) + + test = self.dist_test( + test_path, force_flags + self.cc_flags["werror"], macros=macros + ) + if not test: + self.dist_log("testing failed", stderr=True) + return test + + @_Cache.me + def feature_is_supported(self, name, force_flags=None, macros=[]): + """ + Check if a certain CPU feature is supported by the platform and compiler. + + Parameters + ---------- + name: str + CPU feature name in uppercase. + + force_flags: list or None, optional + If None(default), default compiler flags for every CPU feature will + be used during test. + + macros : list of tuples, optional + A list of C macro definitions. + """ + assert(name.isupper()) + assert(force_flags is None or isinstance(force_flags, list)) + + supported = name in self.feature_supported + if supported: + for impl in self.feature_implies(name): + if not self.feature_test(impl, force_flags, macros=macros): + return False + if not self.feature_test(name, force_flags, macros=macros): + return False + return supported + + @_Cache.me + def feature_can_autovec(self, name): + """ + check if the feature can be auto-vectorized by the compiler + """ + assert(isinstance(name, str)) + d = self.feature_supported[name] + can = d.get("autovec", None) + if can is None: + valid_flags = [ + self.cc_test_flags([f]) for f in d.get("flags", []) + ] + can = valid_flags and any(valid_flags) + return can + + @_Cache.me + def feature_extra_checks(self, name): + """ + Return a list of supported extra checks after testing them against + the compiler. + + Parameters + ---------- + names: str + CPU feature name in uppercase. + """ + assert isinstance(name, str) + d = self.feature_supported[name] + extra_checks = d.get("extra_checks", []) + if not extra_checks: + return [] + + self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) + flags = self.feature_flags(name) + available = [] + not_available = [] + for chk in extra_checks: + test_path = os.path.join( + self.conf_check_path, "extra_%s.c" % chk.lower() + ) + if not os.path.exists(test_path): + self.dist_fatal("extra check file does not exist", test_path) + + is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) + if is_supported: + available.append(chk) + else: + not_available.append(chk) + + if not_available: + self.dist_log("testing failed for checks", not_available, stderr=True) + return available + + + def feature_c_preprocessor(self, feature_name, tabs=0): + """ + Generate C preprocessor definitions and include headers of a CPU feature. + + Parameters + ---------- + 'feature_name': str + CPU feature name in uppercase. + 'tabs': int + if > 0, align the generated strings to the right depend on number of tabs. + + Returns + ------- + str, generated C preprocessor + + Examples + -------- + >>> self.feature_c_preprocessor("SSE3") + /** SSE3 **/ + #define NPY_HAVE_SSE3 1 + #include + """ + assert(feature_name.isupper()) + feature = self.feature_supported.get(feature_name) + assert(feature is not None) + + prepr = [ + "/** %s **/" % feature_name, + "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name) + ] + prepr += [ + "#include <%s>" % h for h in feature.get("headers", []) + ] + + extra_defs = feature.get("group", []) + extra_defs += self.feature_extra_checks(feature_name) + for edef in extra_defs: + # Guard extra definitions in case of duplicate with + # another feature + prepr += [ + "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef), + "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef), + "#endif", + ] + + if tabs > 0: + prepr = [('\t'*tabs) + l for l in prepr] + return '\n'.join(prepr) + +class _Parse: + """A helper class that parsing main arguments of `CCompilerOpt`, + also parsing configuration statements in dispatch-able sources. + + Parameters + ---------- + cpu_baseline: str or None + minimal set of required CPU features or special options. + + cpu_dispatch: str or None + dispatched set of additional CPU features or special options. + + Special options can be: + - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features` + - **MAX**: Enables all supported CPU features by the Compiler and platform. + - **NATIVE**: Enables all CPU features that supported by the current machine. + - **NONE**: Enables nothing + - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**. + NOTE: operand + is only added for nominal reason. + + NOTES: + - Case-insensitive among all CPU features and special options. + - Comma or space can be used as a separator. + - If the CPU feature is not supported by the user platform or compiler, + it will be skipped rather than raising a fatal error. + - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features + - 'cpu_baseline' force enables implied features. + + Attributes + ---------- + parse_baseline_names : list + Final CPU baseline's feature names(sorted from low to high) + parse_baseline_flags : list + Compiler flags of baseline features + parse_dispatch_names : list + Final CPU dispatch-able feature names(sorted from low to high) + parse_target_groups : dict + Dictionary containing initialized target groups that configured + through class attribute `conf_target_groups`. + + The key is represent the group name and value is a tuple + contains three items : + - bool, True if group has the 'baseline' option. + - list, list of CPU features. + - list, list of extra compiler flags. + + """ + def __init__(self, cpu_baseline, cpu_dispatch): + self._parse_policies = dict( + # POLICY NAME, (HAVE, NOT HAVE, [DEB]) + KEEP_BASELINE = ( + None, self._parse_policy_not_keepbase, + [] + ), + KEEP_SORT = ( + self._parse_policy_keepsort, + self._parse_policy_not_keepsort, + [] + ), + MAXOPT = ( + self._parse_policy_maxopt, None, + [] + ), + WERROR = ( + self._parse_policy_werror, None, + [] + ), + AUTOVEC = ( + self._parse_policy_autovec, None, + ["MAXOPT"] + ) + ) + if hasattr(self, "parse_is_cached"): + return + + self.parse_baseline_names = [] + self.parse_baseline_flags = [] + self.parse_dispatch_names = [] + self.parse_target_groups = {} + + if self.cc_noopt: + # skip parsing baseline and dispatch args and keep parsing target groups + cpu_baseline = cpu_dispatch = None + + self.dist_log("check requested baseline") + if cpu_baseline is not None: + cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline) + baseline_names = self.feature_names(cpu_baseline) + self.parse_baseline_flags = self.feature_flags(baseline_names) + self.parse_baseline_names = self.feature_sorted( + self.feature_implies_c(baseline_names) + ) + + self.dist_log("check requested dispatch-able features") + if cpu_dispatch is not None: + cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch) + cpu_dispatch = { + f for f in cpu_dispatch_ + if f not in self.parse_baseline_names + } + conflict_baseline = cpu_dispatch_.difference(cpu_dispatch) + self.parse_dispatch_names = self.feature_sorted( + self.feature_names(cpu_dispatch) + ) + if len(conflict_baseline) > 0: + self.dist_log( + "skip features", conflict_baseline, "since its part of baseline" + ) + + self.dist_log("initialize targets groups") + for group_name, tokens in self.conf_target_groups.items(): + self.dist_log("parse target group", group_name) + GROUP_NAME = group_name.upper() + if not tokens or not tokens.strip(): + # allow empty groups, useful in case if there's a need + # to disable certain group since '_parse_target_tokens()' + # requires at least one valid target + self.parse_target_groups[GROUP_NAME] = ( + False, [], [] + ) + continue + has_baseline, features, extra_flags = \ + self._parse_target_tokens(tokens) + self.parse_target_groups[GROUP_NAME] = ( + has_baseline, features, extra_flags + ) + + self.parse_is_cached = True + + def parse_targets(self, source): + """ + Fetch and parse configuration statements that required for + defining the targeted CPU features, statements should be declared + in the top of source in between **C** comment and start + with a special mark **@targets**. + + Configuration statements are sort of keywords representing + CPU features names, group of statements and policies, combined + together to determine the required optimization. + + Parameters + ---------- + source: str + the path of **C** source file. + + Returns + ------- + - bool, True if group has the 'baseline' option + - list, list of CPU features + - list, list of extra compiler flags + """ + self.dist_log("looking for '@targets' inside -> ", source) + # get lines between /*@targets and */ + with open(source) as fd: + tokens = "" + max_to_reach = 1000 # good enough, isn't? + start_with = "@targets" + start_pos = -1 + end_with = "*/" + end_pos = -1 + for current_line, line in enumerate(fd): + if current_line == max_to_reach: + self.dist_fatal("reached the max of lines") + break + if start_pos == -1: + start_pos = line.find(start_with) + if start_pos == -1: + continue + start_pos += len(start_with) + tokens += line + end_pos = line.find(end_with) + if end_pos != -1: + end_pos += len(tokens) - len(line) + break + + if start_pos == -1: + self.dist_fatal("expected to find '%s' within a C comment" % start_with) + if end_pos == -1: + self.dist_fatal("expected to end with '%s'" % end_with) + + tokens = tokens[start_pos:end_pos] + return self._parse_target_tokens(tokens) + + _parse_regex_arg = re.compile(r'\s|,|([+-])') + def _parse_arg_features(self, arg_name, req_features): + if not isinstance(req_features, str): + self.dist_fatal("expected a string in '%s'" % arg_name) + + final_features = set() + # space and comma can be used as a separator + tokens = list(filter(None, re.split(self._parse_regex_arg, req_features))) + append = True # append is the default + for tok in tokens: + if tok[0] in ("#", "$"): + self.dist_fatal( + arg_name, "target groups and policies " + "aren't allowed from arguments, " + "only from dispatch-able sources" + ) + if tok == '+': + append = True + continue + if tok == '-': + append = False + continue + + TOK = tok.upper() # we use upper-case internally + features_to = set() + if TOK == "NONE": + pass + elif TOK == "NATIVE": + native = self.cc_flags["native"] + if not native: + self.dist_fatal(arg_name, + "native option isn't supported by the compiler" + ) + features_to = self.feature_names( + force_flags=native, macros=[("DETECT_FEATURES", 1)] + ) + elif TOK == "MAX": + features_to = self.feature_supported.keys() + elif TOK == "MIN": + features_to = self.feature_min + else: + if TOK in self.feature_supported: + features_to.add(TOK) + else: + if not self.feature_is_exist(TOK): + self.dist_fatal(arg_name, + ", '%s' isn't a known feature or option" % tok + ) + if append: + final_features = final_features.union(features_to) + else: + final_features = final_features.difference(features_to) + + append = True # back to default + + return final_features + + _parse_regex_target = re.compile(r'\s|[*,/]|([()])') + def _parse_target_tokens(self, tokens): + assert(isinstance(tokens, str)) + final_targets = [] # to keep it sorted as specified + extra_flags = [] + has_baseline = False + + skipped = set() + policies = set() + multi_target = None + + tokens = list(filter(None, re.split(self._parse_regex_target, tokens))) + if not tokens: + self.dist_fatal("expected one token at least") + + for tok in tokens: + TOK = tok.upper() + ch = tok[0] + if ch in ('+', '-'): + self.dist_fatal( + "+/- are 'not' allowed from target's groups or @targets, " + "only from cpu_baseline and cpu_dispatch parms" + ) + elif ch == '$': + if multi_target is not None: + self.dist_fatal( + "policies aren't allowed inside multi-target '()'" + ", only CPU features" + ) + policies.add(self._parse_token_policy(TOK)) + elif ch == '#': + if multi_target is not None: + self.dist_fatal( + "target groups aren't allowed inside multi-target '()'" + ", only CPU features" + ) + has_baseline, final_targets, extra_flags = \ + self._parse_token_group(TOK, has_baseline, final_targets, extra_flags) + elif ch == '(': + if multi_target is not None: + self.dist_fatal("unclosed multi-target, missing ')'") + multi_target = set() + elif ch == ')': + if multi_target is None: + self.dist_fatal("multi-target opener '(' wasn't found") + targets = self._parse_multi_target(multi_target) + if targets is None: + skipped.add(tuple(multi_target)) + else: + if len(targets) == 1: + targets = targets[0] + if targets and targets not in final_targets: + final_targets.append(targets) + multi_target = None # back to default + else: + if TOK == "BASELINE": + if multi_target is not None: + self.dist_fatal("baseline isn't allowed inside multi-target '()'") + has_baseline = True + continue + + if multi_target is not None: + multi_target.add(TOK) + continue + + if not self.feature_is_exist(TOK): + self.dist_fatal("invalid target name '%s'" % TOK) + + is_enabled = ( + TOK in self.parse_baseline_names or + TOK in self.parse_dispatch_names + ) + if is_enabled: + if TOK not in final_targets: + final_targets.append(TOK) + continue + + skipped.add(TOK) + + if multi_target is not None: + self.dist_fatal("unclosed multi-target, missing ')'") + if skipped: + self.dist_log( + "skip targets", skipped, + "not part of baseline or dispatch-able features" + ) + + final_targets = self.feature_untied(final_targets) + + # add polices dependencies + for p in list(policies): + _, _, deps = self._parse_policies[p] + for d in deps: + if d in policies: + continue + self.dist_log( + "policy '%s' force enables '%s'" % ( + p, d + )) + policies.add(d) + + # release policies filtrations + for p, (have, nhave, _) in self._parse_policies.items(): + func = None + if p in policies: + func = have + self.dist_log("policy '%s' is ON" % p) + else: + func = nhave + if not func: + continue + has_baseline, final_targets, extra_flags = func( + has_baseline, final_targets, extra_flags + ) + + return has_baseline, final_targets, extra_flags + + def _parse_token_policy(self, token): + """validate policy token""" + if len(token) <= 1 or token[-1:] == token[0]: + self.dist_fatal("'$' must stuck in the begin of policy name") + token = token[1:] + if token not in self._parse_policies: + self.dist_fatal( + "'%s' is an invalid policy name, available policies are" % token, + self._parse_policies.keys() + ) + return token + + def _parse_token_group(self, token, has_baseline, final_targets, extra_flags): + """validate group token""" + if len(token) <= 1 or token[-1:] == token[0]: + self.dist_fatal("'#' must stuck in the begin of group name") + + token = token[1:] + ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get( + token, (False, None, []) + ) + if gtargets is None: + self.dist_fatal( + "'%s' is an invalid target group name, " % token + \ + "available target groups are", + self.parse_target_groups.keys() + ) + if ghas_baseline: + has_baseline = True + # always keep sorting as specified + final_targets += [f for f in gtargets if f not in final_targets] + extra_flags += [f for f in gextra_flags if f not in extra_flags] + return has_baseline, final_targets, extra_flags + + def _parse_multi_target(self, targets): + """validate multi targets that defined between parentheses()""" + # remove any implied features and keep the origins + if not targets: + self.dist_fatal("empty multi-target '()'") + if not all([ + self.feature_is_exist(tar) for tar in targets + ]) : + self.dist_fatal("invalid target name in multi-target", targets) + if not all([ + ( + tar in self.parse_baseline_names or + tar in self.parse_dispatch_names + ) + for tar in targets + ]) : + return None + targets = self.feature_ahead(targets) + if not targets: + return None + # force sort multi targets, so it can be comparable + targets = self.feature_sorted(targets) + targets = tuple(targets) # hashable + return targets + + def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags): + """skip all baseline features""" + skipped = [] + for tar in final_targets[:]: + is_base = False + if isinstance(tar, str): + is_base = tar in self.parse_baseline_names + else: + # multi targets + is_base = all([ + f in self.parse_baseline_names + for f in tar + ]) + if is_base: + skipped.append(tar) + final_targets.remove(tar) + + if skipped: + self.dist_log("skip baseline features", skipped) + + return has_baseline, final_targets, extra_flags + + def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags): + """leave a notice that $keep_sort is on""" + self.dist_log( + "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n" + "are 'not' sorted depend on the highest interest but" + "as specified in the dispatch-able source or the extra group" + ) + return has_baseline, final_targets, extra_flags + + def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags): + """sorted depend on the highest interest""" + final_targets = self.feature_sorted(final_targets, reverse=True) + return has_baseline, final_targets, extra_flags + + def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags): + """append the compiler optimization flags""" + if self.cc_has_debug: + self.dist_log("debug mode is detected, policy 'maxopt' is skipped.") + elif self.cc_noopt: + self.dist_log("optimization is disabled, policy 'maxopt' is skipped.") + else: + flags = self.cc_flags["opt"] + if not flags: + self.dist_log( + "current compiler doesn't support optimization flags, " + "policy 'maxopt' is skipped", stderr=True + ) + else: + extra_flags += flags + return has_baseline, final_targets, extra_flags + + def _parse_policy_werror(self, has_baseline, final_targets, extra_flags): + """force warnings to treated as errors""" + flags = self.cc_flags["werror"] + if not flags: + self.dist_log( + "current compiler doesn't support werror flags, " + "warnings will 'not' treated as errors", stderr=True + ) + else: + self.dist_log("compiler warnings are treated as errors") + extra_flags += flags + return has_baseline, final_targets, extra_flags + + def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags): + """skip features that has no auto-vectorized support by compiler""" + skipped = [] + for tar in final_targets[:]: + if isinstance(tar, str): + can = self.feature_can_autovec(tar) + else: # multiple target + can = all([ + self.feature_can_autovec(t) + for t in tar + ]) + if not can: + final_targets.remove(tar) + skipped.append(tar) + + if skipped: + self.dist_log("skip non auto-vectorized features", skipped) + + return has_baseline, final_targets, extra_flags + +class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse): + """ + A helper class for `CCompiler` aims to provide extra build options + to effectively control of compiler optimizations that are directly + related to CPU features. + """ + def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None): + _Config.__init__(self) + _Distutils.__init__(self, ccompiler) + _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch) + _CCompiler.__init__(self) + _Feature.__init__(self) + if not self.cc_noopt and self.cc_has_native: + self.dist_log( + "native flag is specified through environment variables. " + "force cpu-baseline='native'" + ) + cpu_baseline = "native" + _Parse.__init__(self, cpu_baseline, cpu_dispatch) + # keep the requested features untouched, need it later for report + # and trace purposes + self._requested_baseline = cpu_baseline + self._requested_dispatch = cpu_dispatch + # key is the dispatch-able source and value is a tuple + # contains two items (has_baseline[boolean], dispatched-features[list]) + self.sources_status = getattr(self, "sources_status", {}) + # every instance should has a separate one + self.cache_private.add("sources_status") + # set it at the end to make sure the cache writing was done after init + # this class + self.hit_cache = hasattr(self, "hit_cache") + + def is_cached(self): + """ + Returns True if the class loaded from the cache file + """ + return self.cache_infile and self.hit_cache + + def cpu_baseline_flags(self): + """ + Returns a list of final CPU baseline compiler flags + """ + return self.parse_baseline_flags + + def cpu_baseline_names(self): + """ + return a list of final CPU baseline feature names + """ + return self.parse_baseline_names + + def cpu_dispatch_names(self): + """ + return a list of final CPU dispatch feature names + """ + return self.parse_dispatch_names + + def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): + """ + Compile one or more dispatch-able sources and generates object files, + also generates abstract C config headers and macros that + used later for the final runtime dispatching process. + + The mechanism behind it is to takes each source file that specified + in 'sources' and branching it into several files depend on + special configuration statements that must be declared in the + top of each source which contains targeted CPU features, + then it compiles every branched source with the proper compiler flags. + + Parameters + ---------- + sources : list + Must be a list of dispatch-able sources file paths, + and configuration statements must be declared inside + each file. + + src_dir : str + Path of parent directory for the generated headers and wrapped sources. + If None(default) the files will generated in-place. + + ccompiler: CCompiler + Distutils `CCompiler` instance to be used for compilation. + If None (default), the provided instance during the initialization + will be used instead. + + **kwargs : any + Arguments to pass on to the `CCompiler.compile()` + + Returns + ------- + list : generated object files + + Raises + ------ + CompileError + Raises by `CCompiler.compile()` on compiling failure. + DistutilsError + Some errors during checking the sanity of configuration statements. + + See Also + -------- + parse_targets : + Parsing the configuration statements of dispatch-able sources. + """ + to_compile = {} + baseline_flags = self.cpu_baseline_flags() + include_dirs = kwargs.setdefault("include_dirs", []) + + for src in sources: + output_dir = os.path.dirname(src) + if src_dir: + if not output_dir.startswith(src_dir): + output_dir = os.path.join(src_dir, output_dir) + if output_dir not in include_dirs: + # To allow including the generated config header(*.dispatch.h) + # by the dispatch-able sources + include_dirs.append(output_dir) + + has_baseline, targets, extra_flags = self.parse_targets(src) + nochange = self._generate_config(output_dir, src, targets, has_baseline) + for tar in targets: + tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange) + flags = tuple(extra_flags + self.feature_flags(tar)) + to_compile.setdefault(flags, []).append(tar_src) + + if has_baseline: + flags = tuple(extra_flags + baseline_flags) + to_compile.setdefault(flags, []).append(src) + + self.sources_status[src] = (has_baseline, targets) + + # For these reasons, the sources are compiled in a separate loop: + # - Gathering all sources with the same flags to benefit from + # the parallel compiling as much as possible. + # - To generate all config headers of the dispatchable sources, + # before the compilation in case if there are dependency relationships + # among them. + objects = [] + for flags, srcs in to_compile.items(): + objects += self.dist_compile( + srcs, list(flags), ccompiler=ccompiler, **kwargs + ) + return objects + + def generate_dispatch_header(self, header_path): + """ + Generate the dispatch header which contains the #definitions and headers + for platform-specific instruction-sets for the enabled CPU baseline and + dispatch-able features. + + Its highly recommended to take a look at the generated header + also the generated source files via `try_dispatch()` + in order to get the full picture. + """ + self.dist_log("generate CPU dispatch header: (%s)" % header_path) + + baseline_names = self.cpu_baseline_names() + dispatch_names = self.cpu_dispatch_names() + baseline_len = len(baseline_names) + dispatch_len = len(dispatch_names) + + header_dir = os.path.dirname(header_path) + if not os.path.exists(header_dir): + self.dist_log( + f"dispatch header dir {header_dir} does not exist, creating it", + stderr=True + ) + os.makedirs(header_dir) + + with open(header_path, 'w') as f: + baseline_calls = ' \\\n'.join([ + ( + "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" + ) % (self.conf_c_prefix, f) + for f in baseline_names + ]) + dispatch_calls = ' \\\n'.join([ + ( + "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" + ) % (self.conf_c_prefix, f) + for f in dispatch_names + ]) + f.write(textwrap.dedent("""\ + /* + * AUTOGENERATED DON'T EDIT + * Please make changes to the code generator (distutils/ccompiler_opt.py) + */ + #define {pfx}WITH_CPU_BASELINE "{baseline_str}" + #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}" + #define {pfx}WITH_CPU_BASELINE_N {baseline_len} + #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len} + #define {pfx}WITH_CPU_EXPAND_(X) X + #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\ + {baseline_calls} + #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\ + {dispatch_calls} + """).format( + pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names), + dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len, + dispatch_len=dispatch_len, baseline_calls=baseline_calls, + dispatch_calls=dispatch_calls + )) + baseline_pre = '' + for name in baseline_names: + baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n' + + dispatch_pre = '' + for name in dispatch_names: + dispatch_pre += textwrap.dedent("""\ + #ifdef {pfx}CPU_TARGET_{name} + {pre} + #endif /*{pfx}CPU_TARGET_{name}*/ + """).format( + pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor( + name, tabs=1 + )) + + f.write(textwrap.dedent("""\ + /******* baseline features *******/ + {baseline_pre} + /******* dispatch features *******/ + {dispatch_pre} + """).format( + pfx=self.conf_c_prefix_, baseline_pre=baseline_pre, + dispatch_pre=dispatch_pre + )) + + def report(self, full=False): + report = [] + platform_rows = [] + baseline_rows = [] + dispatch_rows = [] + report.append(("Platform", platform_rows)) + report.append(("", "")) + report.append(("CPU baseline", baseline_rows)) + report.append(("", "")) + report.append(("CPU dispatch", dispatch_rows)) + + ########## platform ########## + platform_rows.append(("Architecture", ( + "unsupported" if self.cc_on_noarch else self.cc_march) + )) + platform_rows.append(("Compiler", ( + "unix-like" if self.cc_is_nocc else self.cc_name) + )) + ########## baseline ########## + if self.cc_noopt: + baseline_rows.append(("Requested", "optimization disabled")) + else: + baseline_rows.append(("Requested", repr(self._requested_baseline))) + + baseline_names = self.cpu_baseline_names() + baseline_rows.append(( + "Enabled", (' '.join(baseline_names) if baseline_names else "none") + )) + baseline_flags = self.cpu_baseline_flags() + baseline_rows.append(( + "Flags", (' '.join(baseline_flags) if baseline_flags else "none") + )) + extra_checks = [] + for name in baseline_names: + extra_checks += self.feature_extra_checks(name) + baseline_rows.append(( + "Extra checks", (' '.join(extra_checks) if extra_checks else "none") + )) + + ########## dispatch ########## + if self.cc_noopt: + baseline_rows.append(("Requested", "optimization disabled")) + else: + dispatch_rows.append(("Requested", repr(self._requested_dispatch))) + + dispatch_names = self.cpu_dispatch_names() + dispatch_rows.append(( + "Enabled", (' '.join(dispatch_names) if dispatch_names else "none") + )) + ########## Generated ########## + # TODO: + # - collect object names from 'try_dispatch()' + # then get size of each object and printed + # - give more details about the features that not + # generated due compiler support + # - find a better output's design. + # + target_sources = {} + for source, (_, targets) in self.sources_status.items(): + for tar in targets: + target_sources.setdefault(tar, []).append(source) + + if not full or not target_sources: + generated = "" + for tar in self.feature_sorted(target_sources): + sources = target_sources[tar] + name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) + generated += name + "[%d] " % len(sources) + dispatch_rows.append(("Generated", generated[:-1] if generated else "none")) + else: + dispatch_rows.append(("Generated", '')) + for tar in self.feature_sorted(target_sources): + sources = target_sources[tar] + pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) + flags = ' '.join(self.feature_flags(tar)) + implies = ' '.join(self.feature_sorted(self.feature_implies(tar))) + detect = ' '.join(self.feature_detect(tar)) + extra_checks = [] + for name in ((tar,) if isinstance(tar, str) else tar): + extra_checks += self.feature_extra_checks(name) + extra_checks = (' '.join(extra_checks) if extra_checks else "none") + + dispatch_rows.append(('', '')) + dispatch_rows.append((pretty_name, implies)) + dispatch_rows.append(("Flags", flags)) + dispatch_rows.append(("Extra checks", extra_checks)) + dispatch_rows.append(("Detect", detect)) + for src in sources: + dispatch_rows.append(("", src)) + + ############################### + # TODO: add support for 'markdown' format + text = [] + secs_len = [len(secs) for secs, _ in report] + cols_len = [len(col) for _, rows in report for col, _ in rows] + tab = ' ' * 2 + pad = max(max(secs_len), max(cols_len)) + for sec, rows in report: + if not sec: + text.append("") # empty line + continue + sec += ' ' * (pad - len(sec)) + text.append(sec + tab + ': ') + for col, val in rows: + col += ' ' * (pad - len(col)) + text.append(tab + col + ': ' + val) + + return '\n'.join(text) + + def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): + assert(isinstance(target, (str, tuple))) + if isinstance(target, str): + ext_name = target_name = target + else: + # multi-target + ext_name = '.'.join(target) + target_name = '__'.join(target) + + wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src)) + wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower()) + if nochange and os.path.exists(wrap_path): + return wrap_path + + self.dist_log("wrap dispatch-able target -> ", wrap_path) + # sorting for readability + features = self.feature_sorted(self.feature_implies_c(target)) + target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_ + target_defs = [target_join + f for f in features] + target_defs = '\n'.join(target_defs) + + with open(wrap_path, "w") as fd: + fd.write(textwrap.dedent("""\ + /** + * AUTOGENERATED DON'T EDIT + * Please make changes to the code generator \ + (distutils/ccompiler_opt.py) + */ + #define {pfx}CPU_TARGET_MODE + #define {pfx}CPU_TARGET_CURRENT {target_name} + {target_defs} + #include "{path}" + """).format( + pfx=self.conf_c_prefix_, target_name=target_name, + path=os.path.abspath(dispatch_src), target_defs=target_defs + )) + return wrap_path + + def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): + config_path = os.path.basename(dispatch_src) + config_path = os.path.splitext(config_path)[0] + '.h' + config_path = os.path.join(output_dir, config_path) + # check if targets didn't change to avoid recompiling + cache_hash = self.cache_hash(targets, has_baseline) + try: + with open(config_path) as f: + last_hash = f.readline().split("cache_hash:") + if len(last_hash) == 2 and int(last_hash[1]) == cache_hash: + return True + except IOError: + pass + + self.dist_log("generate dispatched config -> ", config_path) + dispatch_calls = [] + for tar in targets: + if isinstance(tar, str): + target_name = tar + else: # multi target + target_name = '__'.join([t for t in tar]) + req_detect = self.feature_detect(tar) + req_detect = '&&'.join([ + "CHK(%s)" % f for f in req_detect + ]) + dispatch_calls.append( + "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % ( + self.conf_c_prefix_, req_detect, target_name + )) + dispatch_calls = ' \\\n'.join(dispatch_calls) + + if has_baseline: + baseline_calls = ( + "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))" + ) % self.conf_c_prefix_ + else: + baseline_calls = '' + + with open(config_path, "w") as fd: + fd.write(textwrap.dedent("""\ + // cache_hash:{cache_hash} + /** + * AUTOGENERATED DON'T EDIT + * Please make changes to the code generator (distutils/ccompiler_opt.py) + */ + #ifndef {pfx}CPU_DISPATCH_EXPAND_ + #define {pfx}CPU_DISPATCH_EXPAND_(X) X + #endif + #undef {pfx}CPU_DISPATCH_BASELINE_CALL + #undef {pfx}CPU_DISPATCH_CALL + #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\ + {baseline_calls} + #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\ + {dispatch_calls} + """).format( + pfx=self.conf_c_prefix_, baseline_calls=baseline_calls, + dispatch_calls=dispatch_calls, cache_hash=cache_hash + )) + return False + +def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): + """ + Create a new instance of 'CCompilerOpt' and generate the dispatch header + which contains the #definitions and headers of platform-specific instruction-sets for + the enabled CPU baseline and dispatch-able features. + + Parameters + ---------- + compiler : CCompiler instance + dispatch_hpath : str + path of the dispatch header + + **kwargs: passed as-is to `CCompilerOpt(...)` + Returns + ------- + new instance of CCompilerOpt + """ + opt = CCompilerOpt(compiler, **kwargs) + if not os.path.exists(dispatch_hpath) or not opt.is_cached(): + opt.generate_dispatch_header(dispatch_hpath) + return opt diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimd.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimd.c new file mode 100644 index 0000000000000000000000000000000000000000..3ffdccf80e0721c3f021701243377d680818c3ff --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimd.c @@ -0,0 +1,25 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(void) +{ + float32x4_t v1 = vdupq_n_f32(1.0f), v2 = vdupq_n_f32(2.0f); + /* MAXMIN */ + int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0); + ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0); + /* ROUNDING */ + ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0); +#ifdef __aarch64__ + { + float64x2_t vd1 = vdupq_n_f64(1.0), vd2 = vdupq_n_f64(2.0); + /* MAXMIN */ + ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0); + ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0); + /* ROUNDING */ + ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0); + } +#endif + return ret; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimddp.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimddp.c new file mode 100644 index 0000000000000000000000000000000000000000..4bd6870a24f85750f1982f482ff40eca96ad592c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimddp.c @@ -0,0 +1,15 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(void) +{ + uint8x16_t v1 = vdupq_n_u8((unsigned char)1), v2 = vdupq_n_u8((unsigned char)2); + uint32x4_t va = vdupq_n_u32(3); + int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0); +#ifdef __aarch64__ + ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0); +#endif + return ret; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimdfhm.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimdfhm.c new file mode 100644 index 0000000000000000000000000000000000000000..9f262f20b53dfdedcc297795ecb8ec1473ca22ce --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimdfhm.c @@ -0,0 +1,17 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(void) +{ + float16x8_t vhp = vdupq_n_f16((float16_t)1); + float16x4_t vlhp = vdup_n_f16((float16_t)1); + float32x4_t vf = vdupq_n_f32(1.0f); + float32x2_t vlf = vdup_n_f32(1.0f); + + int ret = (int)vget_lane_f32(vfmlal_low_u32(vlf, vlhp, vlhp), 0); + ret += (int)vgetq_lane_f32(vfmlslq_high_u32(vf, vhp, vhp), 0); + + return ret; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimdhp.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimdhp.c new file mode 100644 index 0000000000000000000000000000000000000000..a8e563933f421878338d8d803174c4cad6df7b89 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_asimdhp.c @@ -0,0 +1,14 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(void) +{ + float16x8_t vhp = vdupq_n_f16((float16_t)-1); + float16x4_t vlhp = vdup_n_f16((float16_t)-1); + + int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0); + ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0); + return ret; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx.c new file mode 100644 index 0000000000000000000000000000000000000000..544ce2a6f1b5655eef3ae5771c1497c3634fc898 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX__ + #error "HOST/ARCH doesn't support AVX" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1])); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx2.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx2.c new file mode 100644 index 0000000000000000000000000000000000000000..aabe6ee789bee3fca8ff4f17b5b274e47af6f856 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx2.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX2__ + #error "HOST/ARCH doesn't support AVX2" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_clx.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_clx.c new file mode 100644 index 0000000000000000000000000000000000000000..eeeb07de6b5e1c1e664c0d3a3eaeb9052b344892 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_clx.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512VNNI__ + #error "HOST/ARCH doesn't support CascadeLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + /* VNNI */ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c new file mode 100644 index 0000000000000000000000000000000000000000..c15c34dfdca21aa703b256ad709777c73344bf89 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c @@ -0,0 +1,24 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__) + #error "HOST/ARCH doesn't support CannonLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + /* IFMA */ + a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512()); + /* VMBI */ + a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_icl.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_icl.c new file mode 100644 index 0000000000000000000000000000000000000000..816a68cac0faa45da7f26ef14cb46830e53eeb13 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_icl.c @@ -0,0 +1,26 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__) + #error "HOST/ARCH doesn't support IceLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + /* VBMI2 */ + a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512()); + /* BITLAG */ + a = _mm512_popcnt_epi8(a); + /* VPOPCNTDQ */ + a = _mm512_popcnt_epi64(a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knl.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knl.c new file mode 100644 index 0000000000000000000000000000000000000000..91f7dd1dc77beb31a4179664e47efea158575eb4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knl.c @@ -0,0 +1,25 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512ER__) || !defined(__AVX512PF__) + #error "HOST/ARCH doesn't support Knights Landing AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + int base[128]; + __m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]); + /* ER */ + __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad)); + /* PF */ + _mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1); + return base[0]; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knm.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knm.c new file mode 100644 index 0000000000000000000000000000000000000000..edfa06cbd2977c144bbcc1941ecb4ab447c93ead --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_knm.c @@ -0,0 +1,30 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__) + #error "HOST/ARCH doesn't support Knights Mill AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + __m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]); + + /* 4FMAPS */ + b = _mm512_4fmadd_ps(b, b, b, b, b, NULL); + /* 4VNNIW */ + a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL); + /* VPOPCNTDQ */ + a = _mm512_popcnt_epi64(a); + + a = _mm512_add_epi32(a, _mm512_castps_si512(b)); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_skx.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_skx.c new file mode 100644 index 0000000000000000000000000000000000000000..fd007cc7e81684dfdac69b4475fc9150d247e147 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512_skx.c @@ -0,0 +1,26 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__) + #error "HOST/ARCH doesn't support SkyLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); + /* VL */ + __m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1)); + /* DQ */ + __m512i b = _mm512_broadcast_i32x8(a); + /* BW */ + b = _mm512_abs_epi16(b); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(b)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512cd.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512cd.c new file mode 100644 index 0000000000000000000000000000000000000000..09e546007c33442e39efd1a1e27d18e87259ec95 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512cd.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512CD__ + #error "HOST/ARCH doesn't support AVX512CD" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512f.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512f.c new file mode 100644 index 0000000000000000000000000000000000000000..cbe502d3b25e5159e083a148313168eeb2a72a80 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_avx512f.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX512F__ + #error "HOST/ARCH doesn't support AVX512F" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_f16c.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_f16c.c new file mode 100644 index 0000000000000000000000000000000000000000..b359595a9a0ff22ebdde100018286f7c947a83ab --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_f16c.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __F16C__ + #error "HOST/ARCH doesn't support F16C" + #endif +#endif + +#include +#include + +int main(int argc, char **argv) +{ + __m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1])); + __m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2])); + return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8))); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_fma3.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_fma3.c new file mode 100644 index 0000000000000000000000000000000000000000..03bf0f470eed36bbc581bab269feb3addc48bd3e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_fma3.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__FMA__) && !defined(__AVX2__) + #error "HOST/ARCH doesn't support FMA3" + #endif +#endif + +#include +#include + +int main(int argc, char **argv) +{ + __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); + a = _mm256_fmadd_ps(a, a, a); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_fma4.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_fma4.c new file mode 100644 index 0000000000000000000000000000000000000000..76e03bb916b76fa9b4367421c009632636dbb7f4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_fma4.c @@ -0,0 +1,13 @@ +#include +#ifdef _MSC_VER + #include +#else + #include +#endif + +int main(int argc, char **argv) +{ + __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); + a = _mm256_macc_ps(a, a, a); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_neon.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_neon.c new file mode 100644 index 0000000000000000000000000000000000000000..0cc8201e391c76644d7ff304b35298ef5d0045df --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_neon.c @@ -0,0 +1,15 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(void) +{ + float32x4_t v1 = vdupq_n_f32(1.0f), v2 = vdupq_n_f32(2.0f); + int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0); +#ifdef __aarch64__ + float64x2_t vd1 = vdupq_n_f64(1.0), vd2 = vdupq_n_f64(2.0); + ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0); +#endif + return ret; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_neon_fp16.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_neon_fp16.c new file mode 100644 index 0000000000000000000000000000000000000000..7df2d88647f50108f3942a5f9d1b00a57659d1e5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_neon_fp16.c @@ -0,0 +1,11 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(void) +{ + short z4[] = {0, 0, 0, 0, 0, 0, 0, 0}; + float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16((const short*)z4)); + return (int)vgetq_lane_f32(v_z4, 0); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c new file mode 100644 index 0000000000000000000000000000000000000000..7a26605deb11156fa8e30d8254e873bf219e71ad --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c @@ -0,0 +1,19 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(void) +{ + float32x4_t v1 = vdupq_n_f32(1.0f); + float32x4_t v2 = vdupq_n_f32(2.0f); + float32x4_t v3 = vdupq_n_f32(3.0f); + int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0); +#ifdef __aarch64__ + float64x2_t vd1 = vdupq_n_f64(1.0); + float64x2_t vd2 = vdupq_n_f64(2.0); + float64x2_t vd3 = vdupq_n_f64(3.0); + ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0); +#endif + return ret; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_popcnt.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_popcnt.c new file mode 100644 index 0000000000000000000000000000000000000000..3e54fe8068fcc69de2f764073e5951142105c20a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_popcnt.c @@ -0,0 +1,32 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__SSE4_2__) && !defined(__POPCNT__) + #error "HOST/ARCH doesn't support POPCNT" + #endif +#endif + +#ifdef _MSC_VER + #include +#else + #include +#endif + +int main(int argc, char **argv) +{ + // To make sure popcnt instructions are generated + // and been tested against the assembler + unsigned long long a = *((unsigned long long*)argv[argc-1]); + unsigned int b = *((unsigned int*)argv[argc-2]); + +#if defined(_M_X64) || defined(__x86_64__) + a = _mm_popcnt_u64(a); +#endif + b = _mm_popcnt_u32(b); + return (int)a + b; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse.c new file mode 100644 index 0000000000000000000000000000000000000000..063ccf738c0f62159cfd9262447a869492d9ac97 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE__ + #error "HOST/ARCH doesn't support SSE" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse2.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse2.c new file mode 100644 index 0000000000000000000000000000000000000000..d88c89122b3b94129f35f04fb83191c052e85d59 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse2.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE2__ + #error "HOST/ARCH doesn't support SSE2" + #endif +#endif + +#include + +int main(void) +{ + __m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128()); + return _mm_cvtsi128_si32(a); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse3.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse3.c new file mode 100644 index 0000000000000000000000000000000000000000..54c5bbaa111a58c6cffc181648e35e5f86ebdb9f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse3.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE3__ + #error "HOST/ARCH doesn't support SSE3" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse41.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse41.c new file mode 100644 index 0000000000000000000000000000000000000000..a0be9920451b8c3fa0fee24dcf6405332e028d07 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse41.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE4_1__ + #error "HOST/ARCH doesn't support SSE41" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_floor_ps(_mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse42.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse42.c new file mode 100644 index 0000000000000000000000000000000000000000..d3da06ab3dbeda8ac3232b65f000598aeaa9fbb8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_sse42.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE4_2__ + #error "HOST/ARCH doesn't support SSE42" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_ssse3.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_ssse3.c new file mode 100644 index 0000000000000000000000000000000000000000..ad91d6a91050fea9c32310db5eba9c1259a40aea --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_ssse3.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSSE3__ + #error "HOST/ARCH doesn't support SSSE3" + #endif +#endif + +#include + +int main(void) +{ + __m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128()); + return (int)_mm_cvtsi128_si32(a); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_vsx.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_vsx.c new file mode 100644 index 0000000000000000000000000000000000000000..9064998af7405eef530febe1c26a6bad1fb5e4b2 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_vsx.c @@ -0,0 +1,21 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) + #define vsx_ld vec_vsx_ld + #define vsx_st vec_vsx_st +#else + #define vsx_ld vec_xl + #define vsx_st vec_xst +#endif + +int main(void) +{ + unsigned int zout[4]; + unsigned int z4[] = {0, 0, 0, 0}; + __vector unsigned int v_z4 = vsx_ld(0, z4); + vsx_st(v_z4, 0, zout); + return zout[0]; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_vsx2.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_vsx2.c new file mode 100644 index 0000000000000000000000000000000000000000..006a1938b0bb7586ad1395198413e4715e37bf88 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_vsx2.c @@ -0,0 +1,13 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned long long v_uint64x2; + +int main(void) +{ + v_uint64x2 z2 = (v_uint64x2){0, 0}; + z2 = (v_uint64x2)vec_cmpeq(z2, z2); + return (int)vec_extract(z2, 0); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_vsx3.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_vsx3.c new file mode 100644 index 0000000000000000000000000000000000000000..dd255f8f34beb3c01ce335a3ac852297bc3b9470 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_vsx3.c @@ -0,0 +1,13 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned int v_uint32x4; + +int main(void) +{ + v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0}; + z4 = vec_absd(z4, z4); + return (int)vec_extract(z4, 0); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_xop.c b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_xop.c new file mode 100644 index 0000000000000000000000000000000000000000..5097ca30b2b5f71b9253cada27da230fbd724e6c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/cpu_xop.c @@ -0,0 +1,12 @@ +#include +#ifdef _MSC_VER + #include +#else + #include +#endif + +int main(void) +{ + __m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128()); + return _mm_cvtsi128_si32(a); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c b/MLPY/Lib/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c new file mode 100644 index 0000000000000000000000000000000000000000..121f574c1b7ddb4ef86d4ba2081a74c5a7c68309 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c @@ -0,0 +1,18 @@ +#include +/** + * Test BW mask operations due to: + * - MSVC has supported it since vs2019 see, + * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html + * - Clang >= v8.0 + * - GCC >= v7.1 + */ +int main(void) +{ + __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1)); + m64 = _kor_mask64(m64, m64); + m64 = _kxor_mask64(m64, m64); + m64 = _cvtu64_mask64(_cvtmask64_u64(m64)); + m64 = _mm512_kunpackd(m64, m64); + m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64); + return (int)_cvtmask64_u64(m64); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c b/MLPY/Lib/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c new file mode 100644 index 0000000000000000000000000000000000000000..6e3c7a7c36642eb2a4ec30bfc61e51ff325d722e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c @@ -0,0 +1,16 @@ +#include +/** + * Test DQ mask operations due to: + * - MSVC has supported it since vs2019 see, + * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html + * - Clang >= v8.0 + * - GCC >= v7.1 + */ +int main(void) +{ + __mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1)); + m8 = _kor_mask8(m8, m8); + m8 = _kxor_mask8(m8, m8); + m8 = _cvtu32_mask8(_cvtmask8_u32(m8)); + return (int)_cvtmask8_u32(m8); +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c b/MLPY/Lib/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c new file mode 100644 index 0000000000000000000000000000000000000000..539b386ac4e74ef2401f11ee2585e5bd3e9d129f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c @@ -0,0 +1,41 @@ +#include +/** + * The following intrinsics don't have direct native support but compilers + * tend to emulate them. + * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19 + */ +int main(void) +{ + __m512 one_ps = _mm512_set1_ps(1.0f); + __m512d one_pd = _mm512_set1_pd(1.0); + __m512i one_i64 = _mm512_set1_epi64(1); + // add + float sum_ps = _mm512_reduce_add_ps(one_ps); + double sum_pd = _mm512_reduce_add_pd(one_pd); + int sum_int = (int)_mm512_reduce_add_epi64(one_i64); + sum_int += (int)_mm512_reduce_add_epi32(one_i64); + // mul + sum_ps += _mm512_reduce_mul_ps(one_ps); + sum_pd += _mm512_reduce_mul_pd(one_pd); + sum_int += (int)_mm512_reduce_mul_epi64(one_i64); + sum_int += (int)_mm512_reduce_mul_epi32(one_i64); + // min + sum_ps += _mm512_reduce_min_ps(one_ps); + sum_pd += _mm512_reduce_min_pd(one_pd); + sum_int += (int)_mm512_reduce_min_epi32(one_i64); + sum_int += (int)_mm512_reduce_min_epu32(one_i64); + sum_int += (int)_mm512_reduce_min_epi64(one_i64); + // max + sum_ps += _mm512_reduce_max_ps(one_ps); + sum_pd += _mm512_reduce_max_pd(one_pd); + sum_int += (int)_mm512_reduce_max_epi32(one_i64); + sum_int += (int)_mm512_reduce_max_epu32(one_i64); + sum_int += (int)_mm512_reduce_max_epi64(one_i64); + // and + sum_int += (int)_mm512_reduce_and_epi32(one_i64); + sum_int += (int)_mm512_reduce_and_epi64(one_i64); + // or + sum_int += (int)_mm512_reduce_or_epi32(one_i64); + sum_int += (int)_mm512_reduce_or_epi64(one_i64); + return (int)sum_ps + (int)sum_pd + sum_int; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/extra_vsx_asm.c b/MLPY/Lib/site-packages/numpy/distutils/checks/extra_vsx_asm.c new file mode 100644 index 0000000000000000000000000000000000000000..2b44c7a7fb96f86ffd3653698da091bfc68f36cc --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/extra_vsx_asm.c @@ -0,0 +1,36 @@ +/** + * Testing ASM VSX register number fixer '%x' + * + * old versions of CLANG doesn't support %x in the inline asm template + * which fixes register number when using any of the register constraints wa, wd, wf. + * + * xref: + * - https://bugs.llvm.org/show_bug.cgi?id=31837 + * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html + */ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) + #define vsx_ld vec_vsx_ld + #define vsx_st vec_vsx_st +#else + #define vsx_ld vec_xl + #define vsx_st vec_xst +#endif + +int main(void) +{ + float z4[] = {0, 0, 0, 0}; + signed int zout[] = {0, 0, 0, 0}; + + __vector float vz4 = vsx_ld(0, z4); + __vector signed int asm_ret = vsx_ld(0, zout); + + __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret)); + + vsx_st(asm_ret, 0, zout); + return zout[0]; +} diff --git a/MLPY/Lib/site-packages/numpy/distutils/checks/test_flags.c b/MLPY/Lib/site-packages/numpy/distutils/checks/test_flags.c new file mode 100644 index 0000000000000000000000000000000000000000..2fcd23daf81229b35933db3f745fa6156aba7039 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/checks/test_flags.c @@ -0,0 +1 @@ +int test_flags; diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__init__.py b/MLPY/Lib/site-packages/numpy/distutils/command/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3af55f092e6f8b3b04bb2b9f4505e8cc3fff2308 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/__init__.py @@ -0,0 +1,41 @@ +"""distutils.command + +Package containing implementation of all the standard Distutils +commands. + +""" +def test_na_writable_attributes_deletion(): + a = np.NA(2) + attr = ['payload', 'dtype'] + for s in attr: + assert_raises(AttributeError, delattr, a, s) + + +__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" + +distutils_all = [ #'build_py', + 'clean', + 'install_clib', + 'install_scripts', + 'bdist', + 'bdist_dumb', + 'bdist_wininst', + ] + +__import__('distutils.command', globals(), locals(), distutils_all) + +__all__ = ['build', + 'config_compiler', + 'config', + 'build_src', + 'build_py', + 'build_ext', + 'build_clib', + 'build_scripts', + 'install', + 'install_data', + 'install_headers', + 'install_lib', + 'bdist_rpm', + 'sdist', + ] + distutils_all diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c048cbdef0217c44a076a569cb78ae1c8d1e9c5d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/autodist.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/autodist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99b2cddc766bd5bf863d80a4ca837a118bd9ef86 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/autodist.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/bdist_rpm.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/bdist_rpm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d77e454316e680e5b7a4551bdcb68b61e7df221a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/bdist_rpm.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e68b29dcfecf3a50c0669188fc06fade05e1139c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_clib.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_clib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67f33d42e5863e18cc9c9faa45c5524d203ab23f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_clib.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_ext.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_ext.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89aefc9cf9a32f771f76361d062e65c3df97eb5e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_ext.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_py.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_py.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26bca409711552159d7eb88dc50f0cadc0ff6b07 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_py.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_scripts.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_scripts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67b5aec8eef86726e9a36101504969d57f241442 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_scripts.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_src.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_src.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58b24344cda98b2a94a88afce891fb1b512817c7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/build_src.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/config.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51a8501d750691a7e07b112be02a702670ee4ec7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/config.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/config_compiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/config_compiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0e196cb04b5c375b94c41cef99d29a14f3e8817 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/config_compiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/develop.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/develop.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c57063660289846210c325247c21a7e4faf8f90 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/develop.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/egg_info.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/egg_info.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7dbfe37c71f1833e46343956622ce3ec99bdb77 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/egg_info.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94cf59301e9fd72347b251483818aee18f98b632 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install_clib.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install_clib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f95c2f5385295f1bd41bd76a4ca6e6b9a08d3af Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install_clib.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install_data.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install_data.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd7cbcfa1d25fd907d19706a484b24f24d260df9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install_data.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install_headers.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install_headers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72d285815c9236472457b1b1fccae01bc1d4d0c6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/install_headers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/sdist.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/sdist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6688ac97cef45c6aceaec1a6ad63aaa50d9539c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/command/__pycache__/sdist.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/autodist.py b/MLPY/Lib/site-packages/numpy/distutils/command/autodist.py new file mode 100644 index 0000000000000000000000000000000000000000..815c86216d432daffeb47b8f71f19527c0c6b0d4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/autodist.py @@ -0,0 +1,148 @@ +"""This module implements additional tests ala autoconf which can be useful. + +""" +import textwrap + +# We put them here since they could be easily reused outside numpy.distutils + +def check_inline(cmd): + """Return the inline identifier (may be empty).""" + cmd._check_compiler() + body = textwrap.dedent(""" + #ifndef __cplusplus + static %(inline)s int static_func (void) + { + return 0; + } + %(inline)s int nostatic_func (void) + { + return 0; + } + #endif""") + + for kw in ['inline', '__inline__', '__inline']: + st = cmd.try_compile(body % {'inline': kw}, None, None) + if st: + return kw + + return '' + + +def check_restrict(cmd): + """Return the restrict identifier (may be empty).""" + cmd._check_compiler() + body = textwrap.dedent(""" + static int static_func (char * %(restrict)s a) + { + return 0; + } + """) + + for kw in ['restrict', '__restrict__', '__restrict']: + st = cmd.try_compile(body % {'restrict': kw}, None, None) + if st: + return kw + + return '' + + +def check_compiler_gcc(cmd): + """Check if the compiler is GCC.""" + + cmd._check_compiler() + body = textwrap.dedent(""" + int + main() + { + #if (! defined __GNUC__) + #error gcc required + #endif + return 0; + } + """) + return cmd.try_compile(body, None, None) + + +def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0): + """ + Check that the gcc version is at least the specified version.""" + + cmd._check_compiler() + version = '.'.join([str(major), str(minor), str(patchlevel)]) + body = textwrap.dedent(""" + int + main() + { + #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\ + (__GNUC_MINOR__ < %(minor)d) || \\ + (__GNUC_PATCHLEVEL__ < %(patchlevel)d) + #error gcc >= %(version)s required + #endif + return 0; + } + """) + kw = {'version': version, 'major': major, 'minor': minor, + 'patchlevel': patchlevel} + + return cmd.try_compile(body % kw, None, None) + + +def check_gcc_function_attribute(cmd, attribute, name): + """Return True if the given function attribute is supported.""" + cmd._check_compiler() + body = textwrap.dedent(""" + #pragma GCC diagnostic error "-Wattributes" + #pragma clang diagnostic error "-Wattributes" + + int %s %s(void* unused) + { + return 0; + } + + int + main() + { + return 0; + } + """) % (attribute, name) + return cmd.try_compile(body, None, None) != 0 + + +def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, + include): + """Return True if the given function attribute is supported with + intrinsics.""" + cmd._check_compiler() + body = textwrap.dedent(""" + #include<%s> + int %s %s(void) + { + %s; + return 0; + } + + int + main() + { + return 0; + } + """) % (include, attribute, name, code) + return cmd.try_compile(body, None, None) != 0 + + +def check_gcc_variable_attribute(cmd, attribute): + """Return True if the given variable attribute is supported.""" + cmd._check_compiler() + body = textwrap.dedent(""" + #pragma GCC diagnostic error "-Wattributes" + #pragma clang diagnostic error "-Wattributes" + + int %s foo; + + int + main() + { + return 0; + } + """) % (attribute, ) + return cmd.try_compile(body, None, None) != 0 diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/bdist_rpm.py b/MLPY/Lib/site-packages/numpy/distutils/command/bdist_rpm.py new file mode 100644 index 0000000000000000000000000000000000000000..341e38a9d5c580a30098cfbc9cf5c388b1eccc3e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/bdist_rpm.py @@ -0,0 +1,22 @@ +import os +import sys +if 'setuptools' in sys.modules: + from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm +else: + from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm + +class bdist_rpm(old_bdist_rpm): + + def _make_spec_file(self): + spec_file = old_bdist_rpm._make_spec_file(self) + + # Replace hardcoded setup.py script name + # with the real setup script name. + setup_py = os.path.basename(sys.argv[0]) + if setup_py == 'setup.py': + return spec_file + new_spec_file = [] + for line in spec_file: + line = line.replace('setup.py', setup_py) + new_spec_file.append(line) + return new_spec_file diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/build.py b/MLPY/Lib/site-packages/numpy/distutils/command/build.py new file mode 100644 index 0000000000000000000000000000000000000000..72a9df6bf50343466faad51e731b7937c2682169 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/build.py @@ -0,0 +1,61 @@ +import os +import sys +from distutils.command.build import build as old_build +from distutils.util import get_platform +from numpy.distutils.command.config_compiler import show_fortran_compilers + +class build(old_build): + + sub_commands = [('config_cc', lambda *args: True), + ('config_fc', lambda *args: True), + ('build_src', old_build.has_ext_modules), + ] + old_build.sub_commands + + user_options = old_build.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('warn-error', None, + "turn all warnings into errors (-Werror)"), + ('cpu-baseline=', None, + "specify a list of enabled baseline CPU optimizations"), + ('cpu-dispatch=', None, + "specify a list of dispatched CPU optimizations"), + ('disable-optimization', None, + "disable CPU optimized code(dispatch,simd,fast...)"), + ('simd-test=', None, + "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), + ] + + help_options = old_build.help_options + [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + def initialize_options(self): + old_build.initialize_options(self) + self.fcompiler = None + self.warn_error = False + self.cpu_baseline = "min" + self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default + self.disable_optimization = False + """ + the '_simd' module is a very large. Adding more dispatched features + will increase binary size and compile time. By default we minimize + the targeted features to those most commonly used by the NumPy SIMD interface(NPYV), + NOTE: any specified features will be ignored if they're: + - part of the baseline(--cpu-baseline) + - not part of dispatch-able features(--cpu-dispatch) + - not supported by compiler or platform + """ + self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F AVX512_SKX VSX VSX2 VSX3 NEON ASIMD" + + def finalize_options(self): + build_scripts = self.build_scripts + old_build.finalize_options(self) + plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) + if build_scripts is None: + self.build_scripts = os.path.join(self.build_base, + 'scripts' + plat_specifier) + + def run(self): + old_build.run(self) diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/build_clib.py b/MLPY/Lib/site-packages/numpy/distutils/command/build_clib.py new file mode 100644 index 0000000000000000000000000000000000000000..e8ce94055d402c14d225b63d35a29cac99921bfb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/build_clib.py @@ -0,0 +1,431 @@ +""" Modified version of build_clib that handles fortran source files. +""" +import os +from glob import glob +import shutil +from distutils.command.build_clib import build_clib as old_build_clib +from distutils.errors import DistutilsSetupError, DistutilsError, \ + DistutilsFileError + +from numpy.distutils import log +from distutils.dep_util import newer_group +from numpy.distutils.misc_util import ( + filter_sources, get_lib_source_files, get_numpy_include_dirs, + has_cxx_sources, has_f_sources, is_sequence +) +from numpy.distutils.ccompiler_opt import new_ccompiler_opt + +# Fix Python distutils bug sf #1718574: +_l = old_build_clib.user_options +for _i in range(len(_l)): + if _l[_i][0] in ['build-clib', 'build-temp']: + _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] +# + + +class build_clib(old_build_clib): + + description = "build C/C++/F libraries used by Python extensions" + + user_options = old_build_clib.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('inplace', 'i', 'Build in-place'), + ('parallel=', 'j', + "number of parallel jobs"), + ('warn-error', None, + "turn all warnings into errors (-Werror)"), + ('cpu-baseline=', None, + "specify a list of enabled baseline CPU optimizations"), + ('cpu-dispatch=', None, + "specify a list of dispatched CPU optimizations"), + ('disable-optimization', None, + "disable CPU optimized code(dispatch,simd,fast...)"), + ] + + boolean_options = old_build_clib.boolean_options + \ + ['inplace', 'warn-error', 'disable-optimization'] + + def initialize_options(self): + old_build_clib.initialize_options(self) + self.fcompiler = None + self.inplace = 0 + self.parallel = None + self.warn_error = None + self.cpu_baseline = None + self.cpu_dispatch = None + self.disable_optimization = None + + + def finalize_options(self): + if self.parallel: + try: + self.parallel = int(self.parallel) + except ValueError as e: + raise ValueError("--parallel/-j argument must be an integer") from e + old_build_clib.finalize_options(self) + self.set_undefined_options('build', + ('parallel', 'parallel'), + ('warn_error', 'warn_error'), + ('cpu_baseline', 'cpu_baseline'), + ('cpu_dispatch', 'cpu_dispatch'), + ('disable_optimization', 'disable_optimization') + ) + + def have_f_sources(self): + for (lib_name, build_info) in self.libraries: + if has_f_sources(build_info.get('sources', [])): + return True + return False + + def have_cxx_sources(self): + for (lib_name, build_info) in self.libraries: + if has_cxx_sources(build_info.get('sources', [])): + return True + return False + + def run(self): + if not self.libraries: + return + + # Make sure that library sources are complete. + languages = [] + + # Make sure that extension sources are complete. + self.run_command('build_src') + + for (lib_name, build_info) in self.libraries: + l = build_info.get('language', None) + if l and l not in languages: + languages.append(l) + + from distutils.ccompiler import new_compiler + self.compiler = new_compiler(compiler=self.compiler, + dry_run=self.dry_run, + force=self.force) + self.compiler.customize(self.distribution, + need_cxx=self.have_cxx_sources()) + + if self.warn_error: + self.compiler.compiler.append('-Werror') + self.compiler.compiler_so.append('-Werror') + + libraries = self.libraries + self.libraries = None + self.compiler.customize_cmd(self) + self.libraries = libraries + + self.compiler.show_customization() + + if not self.disable_optimization: + dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") + dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) + opt_cache_path = os.path.abspath( + os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py') + ) + if hasattr(self, "compiler_opt"): + # By default `CCompilerOpt` update the cache at the exit of + # the process, which may lead to duplicate building + # (see build_extension()/force_rebuild) if run() called + # multiple times within the same os process/thread without + # giving the chance the previous instances of `CCompilerOpt` + # to update the cache. + self.compiler_opt.cache_flush() + + self.compiler_opt = new_ccompiler_opt( + compiler=self.compiler, dispatch_hpath=dispatch_hpath, + cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, + cache_path=opt_cache_path + ) + def report(copt): + log.info("\n########### CLIB COMPILER OPTIMIZATION ###########") + log.info(copt.report(full=True)) + + import atexit + atexit.register(report, self.compiler_opt) + + if self.have_f_sources(): + from numpy.distutils.fcompiler import new_fcompiler + self._f_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90='f90' in languages, + c_compiler=self.compiler) + if self._f_compiler is not None: + self._f_compiler.customize(self.distribution) + + libraries = self.libraries + self.libraries = None + self._f_compiler.customize_cmd(self) + self.libraries = libraries + + self._f_compiler.show_customization() + else: + self._f_compiler = None + + self.build_libraries(self.libraries) + + if self.inplace: + for l in self.distribution.installed_libraries: + libname = self.compiler.library_filename(l.name) + source = os.path.join(self.build_clib, libname) + target = os.path.join(l.target_dir, libname) + self.mkpath(l.target_dir) + shutil.copy(source, target) + + def get_source_files(self): + self.check_library_list(self.libraries) + filenames = [] + for lib in self.libraries: + filenames.extend(get_lib_source_files(lib)) + return filenames + + def build_libraries(self, libraries): + for (lib_name, build_info) in libraries: + self.build_a_library(build_info, lib_name, libraries) + + def build_a_library(self, build_info, lib_name, libraries): + # default compilers + compiler = self.compiler + fcompiler = self._f_compiler + + sources = build_info.get('sources') + if sources is None or not is_sequence(sources): + raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + + "'sources' must be present and must be " + + "a list of source filenames") % lib_name) + sources = list(sources) + + c_sources, cxx_sources, f_sources, fmodule_sources \ + = filter_sources(sources) + requiref90 = not not fmodule_sources or \ + build_info.get('language', 'c') == 'f90' + + # save source type information so that build_ext can use it. + source_languages = [] + if c_sources: + source_languages.append('c') + if cxx_sources: + source_languages.append('c++') + if requiref90: + source_languages.append('f90') + elif f_sources: + source_languages.append('f77') + build_info['source_languages'] = source_languages + + lib_file = compiler.library_filename(lib_name, + output_dir=self.build_clib) + depends = sources + build_info.get('depends', []) + + force_rebuild = self.force + if not self.disable_optimization and not self.compiler_opt.is_cached(): + log.debug("Detected changes on compiler optimizations") + force_rebuild = True + if not (force_rebuild or newer_group(depends, lib_file, 'newer')): + log.debug("skipping '%s' library (up-to-date)", lib_name) + return + else: + log.info("building '%s' library", lib_name) + + config_fc = build_info.get('config_fc', {}) + if fcompiler is not None and config_fc: + log.info('using additional config_fc from setup script ' + 'for fortran compiler: %s' + % (config_fc,)) + from numpy.distutils.fcompiler import new_fcompiler + fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=requiref90, + c_compiler=self.compiler) + if fcompiler is not None: + dist = self.distribution + base_config_fc = dist.get_option_dict('config_fc').copy() + base_config_fc.update(config_fc) + fcompiler.customize(base_config_fc) + + # check availability of Fortran compilers + if (f_sources or fmodule_sources) and fcompiler is None: + raise DistutilsError("library %s has Fortran sources" + " but no Fortran compiler found" % (lib_name)) + + if fcompiler is not None: + fcompiler.extra_f77_compile_args = build_info.get( + 'extra_f77_compile_args') or [] + fcompiler.extra_f90_compile_args = build_info.get( + 'extra_f90_compile_args') or [] + + macros = build_info.get('macros') + if macros is None: + macros = [] + include_dirs = build_info.get('include_dirs') + if include_dirs is None: + include_dirs = [] + extra_postargs = build_info.get('extra_compiler_args') or [] + + include_dirs.extend(get_numpy_include_dirs()) + # where compiled F90 module files are: + module_dirs = build_info.get('module_dirs') or [] + module_build_dir = os.path.dirname(lib_file) + if requiref90: + self.mkpath(module_build_dir) + + if compiler.compiler_type == 'msvc': + # this hack works around the msvc compiler attributes + # problem, msvc uses its own convention :( + c_sources += cxx_sources + cxx_sources = [] + + # filtering C dispatch-table sources when optimization is not disabled, + # otherwise treated as normal sources. + copt_c_sources = [] + copt_cxx_sources = [] + copt_baseline_flags = [] + copt_macros = [] + if not self.disable_optimization: + bsrc_dir = self.get_finalized_command("build_src").build_src + dispatch_hpath = os.path.join("numpy", "distutils", "include") + dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) + include_dirs.append(dispatch_hpath) + + copt_build_src = None if self.inplace else bsrc_dir + for _srcs, _dst, _ext in ( + ((c_sources,), copt_c_sources, ('.dispatch.c',)), + ((c_sources, cxx_sources), copt_cxx_sources, + ('.dispatch.cpp', '.dispatch.cxx')) + ): + for _src in _srcs: + _dst += [ + _src.pop(_src.index(s)) + for s in _src[:] if s.endswith(_ext) + ] + copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() + else: + copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) + + objects = [] + if copt_cxx_sources: + log.info("compiling C++ dispatch-able sources") + objects += self.compiler_opt.try_dispatch( + copt_c_sources, + output_dir=self.build_temp, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + ccompiler=cxx_compiler + ) + + if copt_c_sources: + log.info("compiling C dispatch-able sources") + objects += self.compiler_opt.try_dispatch(copt_c_sources, + output_dir=self.build_temp, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + + if c_sources: + log.info("compiling C sources") + objects += compiler.compile(c_sources, + output_dir=self.build_temp, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs + copt_baseline_flags) + + if cxx_sources: + log.info("compiling C++ sources") + cxx_compiler = compiler.cxx_compiler() + cxx_objects = cxx_compiler.compile(cxx_sources, + output_dir=self.build_temp, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs + copt_baseline_flags) + objects.extend(cxx_objects) + + if f_sources or fmodule_sources: + extra_postargs = [] + f_objects = [] + + if requiref90: + if fcompiler.module_dir_switch is None: + existing_modules = glob('*.mod') + extra_postargs += fcompiler.module_options( + module_dirs, module_build_dir) + + if fmodule_sources: + log.info("compiling Fortran 90 module sources") + f_objects += fcompiler.compile(fmodule_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + + if requiref90 and self._f_compiler.module_dir_switch is None: + # move new compiled F90 module files to module_build_dir + for f in glob('*.mod'): + if f in existing_modules: + continue + t = os.path.join(module_build_dir, f) + if os.path.abspath(f) == os.path.abspath(t): + continue + if os.path.isfile(t): + os.remove(t) + try: + self.move_file(f, module_build_dir) + except DistutilsFileError: + log.warn('failed to move %r to %r' + % (f, module_build_dir)) + + if f_sources: + log.info("compiling Fortran sources") + f_objects += fcompiler.compile(f_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs) + else: + f_objects = [] + + if f_objects and not fcompiler.can_ccompiler_link(compiler): + # Default linker cannot link Fortran object files, and results + # need to be wrapped later. Instead of creating a real static + # library, just keep track of the object files. + listfn = os.path.join(self.build_clib, + lib_name + '.fobjects') + with open(listfn, 'w') as f: + f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) + + listfn = os.path.join(self.build_clib, + lib_name + '.cobjects') + with open(listfn, 'w') as f: + f.write("\n".join(os.path.abspath(obj) for obj in objects)) + + # create empty "library" file for dependency tracking + lib_fname = os.path.join(self.build_clib, + lib_name + compiler.static_lib_extension) + with open(lib_fname, 'wb') as f: + pass + else: + # assume that default linker is suitable for + # linking Fortran object files + objects.extend(f_objects) + compiler.create_static_lib(objects, lib_name, + output_dir=self.build_clib, + debug=self.debug) + + # fix library dependencies + clib_libraries = build_info.get('libraries', []) + for lname, binfo in libraries: + if lname in clib_libraries: + clib_libraries.extend(binfo.get('libraries', [])) + if clib_libraries: + build_info['libraries'] = clib_libraries diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/build_ext.py b/MLPY/Lib/site-packages/numpy/distutils/command/build_ext.py new file mode 100644 index 0000000000000000000000000000000000000000..2e42121d8e788e64bccb38729729e91981c4eb04 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/build_ext.py @@ -0,0 +1,708 @@ +""" Modified version of build_ext that handles fortran source files. + +""" +import os +import subprocess +from glob import glob + +from distutils.dep_util import newer_group +from distutils.command.build_ext import build_ext as old_build_ext +from distutils.errors import DistutilsFileError, DistutilsSetupError,\ + DistutilsError +from distutils.file_util import copy_file + +from numpy.distutils import log +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.system_info import combine_paths +from numpy.distutils.misc_util import ( + filter_sources, get_ext_source_files, get_numpy_include_dirs, + has_cxx_sources, has_f_sources, is_sequence +) +from numpy.distutils.command.config_compiler import show_fortran_compilers +from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt + +class build_ext (old_build_ext): + + description = "build C/C++/F extensions (compile/link to build directory)" + + user_options = old_build_ext.user_options + [ + ('fcompiler=', None, + "specify the Fortran compiler type"), + ('parallel=', 'j', + "number of parallel jobs"), + ('warn-error', None, + "turn all warnings into errors (-Werror)"), + ('cpu-baseline=', None, + "specify a list of enabled baseline CPU optimizations"), + ('cpu-dispatch=', None, + "specify a list of dispatched CPU optimizations"), + ('disable-optimization', None, + "disable CPU optimized code(dispatch,simd,fast...)"), + ('simd-test=', None, + "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), + ] + + help_options = old_build_ext.help_options + [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization'] + + def initialize_options(self): + old_build_ext.initialize_options(self) + self.fcompiler = None + self.parallel = None + self.warn_error = None + self.cpu_baseline = None + self.cpu_dispatch = None + self.disable_optimization = None + self.simd_test = None + + def finalize_options(self): + if self.parallel: + try: + self.parallel = int(self.parallel) + except ValueError as e: + raise ValueError("--parallel/-j argument must be an integer") from e + + # Ensure that self.include_dirs and self.distribution.include_dirs + # refer to the same list object. finalize_options will modify + # self.include_dirs, but self.distribution.include_dirs is used + # during the actual build. + # self.include_dirs is None unless paths are specified with + # --include-dirs. + # The include paths will be passed to the compiler in the order: + # numpy paths, --include-dirs paths, Python include path. + if isinstance(self.include_dirs, str): + self.include_dirs = self.include_dirs.split(os.pathsep) + incl_dirs = self.include_dirs or [] + if self.distribution.include_dirs is None: + self.distribution.include_dirs = [] + self.include_dirs = self.distribution.include_dirs + self.include_dirs.extend(incl_dirs) + + old_build_ext.finalize_options(self) + self.set_undefined_options('build', + ('parallel', 'parallel'), + ('warn_error', 'warn_error'), + ('cpu_baseline', 'cpu_baseline'), + ('cpu_dispatch', 'cpu_dispatch'), + ('disable_optimization', 'disable_optimization'), + ('simd_test', 'simd_test') + ) + CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test + + def run(self): + if not self.extensions: + return + + # Make sure that extension sources are complete. + self.run_command('build_src') + + if self.distribution.has_c_libraries(): + if self.inplace: + if self.distribution.have_run.get('build_clib'): + log.warn('build_clib already run, it is too late to ' + 'ensure in-place build of build_clib') + build_clib = self.distribution.get_command_obj( + 'build_clib') + else: + build_clib = self.distribution.get_command_obj( + 'build_clib') + build_clib.inplace = 1 + build_clib.ensure_finalized() + build_clib.run() + self.distribution.have_run['build_clib'] = 1 + + else: + self.run_command('build_clib') + build_clib = self.get_finalized_command('build_clib') + self.library_dirs.append(build_clib.build_clib) + else: + build_clib = None + + # Not including C libraries to the list of + # extension libraries automatically to prevent + # bogus linking commands. Extensions must + # explicitly specify the C libraries that they use. + + from distutils.ccompiler import new_compiler + from numpy.distutils.fcompiler import new_fcompiler + + compiler_type = self.compiler + # Initialize C compiler: + self.compiler = new_compiler(compiler=compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) + self.compiler.customize(self.distribution) + self.compiler.customize_cmd(self) + + if self.warn_error: + self.compiler.compiler.append('-Werror') + self.compiler.compiler_so.append('-Werror') + + self.compiler.show_customization() + + if not self.disable_optimization: + dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") + dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) + opt_cache_path = os.path.abspath( + os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py') + ) + if hasattr(self, "compiler_opt"): + # By default `CCompilerOpt` update the cache at the exit of + # the process, which may lead to duplicate building + # (see build_extension()/force_rebuild) if run() called + # multiple times within the same os process/thread without + # giving the chance the previous instances of `CCompilerOpt` + # to update the cache. + self.compiler_opt.cache_flush() + + self.compiler_opt = new_ccompiler_opt( + compiler=self.compiler, dispatch_hpath=dispatch_hpath, + cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, + cache_path=opt_cache_path + ) + def report(copt): + log.info("\n########### EXT COMPILER OPTIMIZATION ###########") + log.info(copt.report(full=True)) + + import atexit + atexit.register(report, self.compiler_opt) + + # Setup directory for storing generated extra DLL files on Windows + self.extra_dll_dir = os.path.join(self.build_temp, '.libs') + if not os.path.isdir(self.extra_dll_dir): + os.makedirs(self.extra_dll_dir) + + # Create mapping of libraries built by build_clib: + clibs = {} + if build_clib is not None: + for libname, build_info in build_clib.libraries or []: + if libname in clibs and clibs[libname] != build_info: + log.warn('library %r defined more than once,' + ' overwriting build_info\n%s... \nwith\n%s...' + % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) + clibs[libname] = build_info + # .. and distribution libraries: + for libname, build_info in self.distribution.libraries or []: + if libname in clibs: + # build_clib libraries have a precedence before distribution ones + continue + clibs[libname] = build_info + + # Determine if C++/Fortran 77/Fortran 90 compilers are needed. + # Update extension libraries, library_dirs, and macros. + all_languages = set() + for ext in self.extensions: + ext_languages = set() + c_libs = [] + c_lib_dirs = [] + macros = [] + for libname in ext.libraries: + if libname in clibs: + binfo = clibs[libname] + c_libs += binfo.get('libraries', []) + c_lib_dirs += binfo.get('library_dirs', []) + for m in binfo.get('macros', []): + if m not in macros: + macros.append(m) + + for l in clibs.get(libname, {}).get('source_languages', []): + ext_languages.add(l) + if c_libs: + new_c_libs = ext.libraries + c_libs + log.info('updating extension %r libraries from %r to %r' + % (ext.name, ext.libraries, new_c_libs)) + ext.libraries = new_c_libs + ext.library_dirs = ext.library_dirs + c_lib_dirs + if macros: + log.info('extending extension %r defined_macros with %r' + % (ext.name, macros)) + ext.define_macros = ext.define_macros + macros + + # determine extension languages + if has_f_sources(ext.sources): + ext_languages.add('f77') + if has_cxx_sources(ext.sources): + ext_languages.add('c++') + l = ext.language or self.compiler.detect_language(ext.sources) + if l: + ext_languages.add(l) + # reset language attribute for choosing proper linker + if 'c++' in ext_languages: + ext_language = 'c++' + elif 'f90' in ext_languages: + ext_language = 'f90' + elif 'f77' in ext_languages: + ext_language = 'f77' + else: + ext_language = 'c' # default + if l and l != ext_language and ext.language: + log.warn('resetting extension %r language from %r to %r.' % + (ext.name, l, ext_language)) + ext.language = ext_language + # global language + all_languages.update(ext_languages) + + need_f90_compiler = 'f90' in all_languages + need_f77_compiler = 'f77' in all_languages + need_cxx_compiler = 'c++' in all_languages + + # Initialize C++ compiler: + if need_cxx_compiler: + self._cxx_compiler = new_compiler(compiler=compiler_type, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) + compiler = self._cxx_compiler + compiler.customize(self.distribution, need_cxx=need_cxx_compiler) + compiler.customize_cmd(self) + compiler.show_customization() + self._cxx_compiler = compiler.cxx_compiler() + else: + self._cxx_compiler = None + + # Initialize Fortran 77 compiler: + if need_f77_compiler: + ctype = self.fcompiler + self._f77_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=False, + c_compiler=self.compiler) + fcompiler = self._f77_compiler + if fcompiler: + ctype = fcompiler.compiler_type + fcompiler.customize(self.distribution) + if fcompiler and fcompiler.get_version(): + fcompiler.customize_cmd(self) + fcompiler.show_customization() + else: + self.warn('f77_compiler=%s is not available.' % + (ctype)) + self._f77_compiler = None + else: + self._f77_compiler = None + + # Initialize Fortran 90 compiler: + if need_f90_compiler: + ctype = self.fcompiler + self._f90_compiler = new_fcompiler(compiler=self.fcompiler, + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90=True, + c_compiler=self.compiler) + fcompiler = self._f90_compiler + if fcompiler: + ctype = fcompiler.compiler_type + fcompiler.customize(self.distribution) + if fcompiler and fcompiler.get_version(): + fcompiler.customize_cmd(self) + fcompiler.show_customization() + else: + self.warn('f90_compiler=%s is not available.' % + (ctype)) + self._f90_compiler = None + else: + self._f90_compiler = None + + # Build extensions + self.build_extensions() + + # Copy over any extra DLL files + # FIXME: In the case where there are more than two packages, + # we blindly assume that both packages need all of the libraries, + # resulting in a larger wheel than is required. This should be fixed, + # but it's so rare that I won't bother to handle it. + pkg_roots = { + self.get_ext_fullname(ext.name).split('.')[0] + for ext in self.extensions + } + for pkg_root in pkg_roots: + shared_lib_dir = os.path.join(pkg_root, '.libs') + if not self.inplace: + shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) + for fn in os.listdir(self.extra_dll_dir): + if not os.path.isdir(shared_lib_dir): + os.makedirs(shared_lib_dir) + if not fn.lower().endswith('.dll'): + continue + runtime_lib = os.path.join(self.extra_dll_dir, fn) + copy_file(runtime_lib, shared_lib_dir) + + def swig_sources(self, sources, extensions=None): + # Do nothing. Swig sources have been handled in build_src command. + return sources + + def build_extension(self, ext): + sources = ext.sources + if sources is None or not is_sequence(sources): + raise DistutilsSetupError( + ("in 'ext_modules' option (extension '%s'), " + + "'sources' must be present and must be " + + "a list of source filenames") % ext.name) + sources = list(sources) + + if not sources: + return + + fullname = self.get_ext_fullname(ext.name) + if self.inplace: + modpath = fullname.split('.') + package = '.'.join(modpath[0:-1]) + base = modpath[-1] + build_py = self.get_finalized_command('build_py') + package_dir = build_py.get_package_dir(package) + ext_filename = os.path.join(package_dir, + self.get_ext_filename(base)) + else: + ext_filename = os.path.join(self.build_lib, + self.get_ext_filename(fullname)) + depends = sources + ext.depends + + force_rebuild = self.force + if not self.disable_optimization and not self.compiler_opt.is_cached(): + log.debug("Detected changes on compiler optimizations") + force_rebuild = True + if not (force_rebuild or newer_group(depends, ext_filename, 'newer')): + log.debug("skipping '%s' extension (up-to-date)", ext.name) + return + else: + log.info("building '%s' extension", ext.name) + + extra_args = ext.extra_compile_args or [] + macros = ext.define_macros[:] + for undef in ext.undef_macros: + macros.append((undef,)) + + c_sources, cxx_sources, f_sources, fmodule_sources = \ + filter_sources(ext.sources) + + if self.compiler.compiler_type == 'msvc': + if cxx_sources: + # Needed to compile kiva.agg._agg extension. + extra_args.append('/Zm1000') + # this hack works around the msvc compiler attributes + # problem, msvc uses its own convention :( + c_sources += cxx_sources + cxx_sources = [] + + # Set Fortran/C++ compilers for compilation and linking. + if ext.language == 'f90': + fcompiler = self._f90_compiler + elif ext.language == 'f77': + fcompiler = self._f77_compiler + else: # in case ext.language is c++, for instance + fcompiler = self._f90_compiler or self._f77_compiler + if fcompiler is not None: + fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( + ext, 'extra_f77_compile_args') else [] + fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( + ext, 'extra_f90_compile_args') else [] + cxx_compiler = self._cxx_compiler + + # check for the availability of required compilers + if cxx_sources and cxx_compiler is None: + raise DistutilsError("extension %r has C++ sources" + "but no C++ compiler found" % (ext.name)) + if (f_sources or fmodule_sources) and fcompiler is None: + raise DistutilsError("extension %r has Fortran sources " + "but no Fortran compiler found" % (ext.name)) + if ext.language in ['f77', 'f90'] and fcompiler is None: + self.warn("extension %r has Fortran libraries " + "but no Fortran linker found, using default linker" % (ext.name)) + if ext.language == 'c++' and cxx_compiler is None: + self.warn("extension %r has C++ libraries " + "but no C++ linker found, using default linker" % (ext.name)) + + kws = {'depends': ext.depends} + output_dir = self.build_temp + + include_dirs = ext.include_dirs + get_numpy_include_dirs() + + # filtering C dispatch-table sources when optimization is not disabled, + # otherwise treated as normal sources. + copt_c_sources = [] + copt_cxx_sources = [] + copt_baseline_flags = [] + copt_macros = [] + if not self.disable_optimization: + bsrc_dir = self.get_finalized_command("build_src").build_src + dispatch_hpath = os.path.join("numpy", "distutils", "include") + dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) + include_dirs.append(dispatch_hpath) + + copt_build_src = None if self.inplace else bsrc_dir + for _srcs, _dst, _ext in ( + ((c_sources,), copt_c_sources, ('.dispatch.c',)), + ((c_sources, cxx_sources), copt_cxx_sources, + ('.dispatch.cpp', '.dispatch.cxx')) + ): + for _src in _srcs: + _dst += [ + _src.pop(_src.index(s)) + for s in _src[:] if s.endswith(_ext) + ] + copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() + else: + copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) + + c_objects = [] + if copt_cxx_sources: + log.info("compiling C++ dispatch-able sources") + c_objects += self.compiler_opt.try_dispatch( + copt_cxx_sources, + output_dir=output_dir, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args, + ccompiler=cxx_compiler, + **kws + ) + if copt_c_sources: + log.info("compiling C dispatch-able sources") + c_objects += self.compiler_opt.try_dispatch(copt_c_sources, + output_dir=output_dir, + src_dir=copt_build_src, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args, + **kws) + if c_sources: + log.info("compiling C sources") + c_objects += self.compiler.compile(c_sources, + output_dir=output_dir, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args + copt_baseline_flags, + **kws) + if cxx_sources: + log.info("compiling C++ sources") + c_objects += cxx_compiler.compile(cxx_sources, + output_dir=output_dir, + macros=macros + copt_macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_args + copt_baseline_flags, + **kws) + + extra_postargs = [] + f_objects = [] + if fmodule_sources: + log.info("compiling Fortran 90 module sources") + module_dirs = ext.module_dirs[:] + module_build_dir = os.path.join( + self.build_temp, os.path.dirname( + self.get_ext_filename(fullname))) + + self.mkpath(module_build_dir) + if fcompiler.module_dir_switch is None: + existing_modules = glob('*.mod') + extra_postargs += fcompiler.module_options( + module_dirs, module_build_dir) + f_objects += fcompiler.compile(fmodule_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + depends=ext.depends) + + if fcompiler.module_dir_switch is None: + for f in glob('*.mod'): + if f in existing_modules: + continue + t = os.path.join(module_build_dir, f) + if os.path.abspath(f) == os.path.abspath(t): + continue + if os.path.isfile(t): + os.remove(t) + try: + self.move_file(f, module_build_dir) + except DistutilsFileError: + log.warn('failed to move %r to %r' % + (f, module_build_dir)) + if f_sources: + log.info("compiling Fortran sources") + f_objects += fcompiler.compile(f_sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + debug=self.debug, + extra_postargs=extra_postargs, + depends=ext.depends) + + if f_objects and not fcompiler.can_ccompiler_link(self.compiler): + unlinkable_fobjects = f_objects + objects = c_objects + else: + unlinkable_fobjects = [] + objects = c_objects + f_objects + + if ext.extra_objects: + objects.extend(ext.extra_objects) + extra_args = ext.extra_link_args or [] + libraries = self.get_libraries(ext)[:] + library_dirs = ext.library_dirs[:] + + linker = self.compiler.link_shared_object + # Always use system linker when using MSVC compiler. + if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): + # expand libraries with fcompiler libraries as we are + # not using fcompiler linker + self._libs_with_msvc_and_fortran( + fcompiler, libraries, library_dirs) + + elif ext.language in ['f77', 'f90'] and fcompiler is not None: + linker = fcompiler.link_shared_object + if ext.language == 'c++' and cxx_compiler is not None: + linker = cxx_compiler.link_shared_object + + if fcompiler is not None: + objects, libraries = self._process_unlinkable_fobjects( + objects, libraries, + fcompiler, library_dirs, + unlinkable_fobjects) + + linker(objects, ext_filename, + libraries=libraries, + library_dirs=library_dirs, + runtime_library_dirs=ext.runtime_library_dirs, + extra_postargs=extra_args, + export_symbols=self.get_export_symbols(ext), + debug=self.debug, + build_temp=self.build_temp, + target_lang=ext.language) + + def _add_dummy_mingwex_sym(self, c_sources): + build_src = self.get_finalized_command("build_src").build_src + build_clib = self.get_finalized_command("build_clib").build_clib + objects = self.compiler.compile([os.path.join(build_src, + "gfortran_vs2003_hack.c")], + output_dir=self.build_temp) + self.compiler.create_static_lib( + objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) + + def _process_unlinkable_fobjects(self, objects, libraries, + fcompiler, library_dirs, + unlinkable_fobjects): + libraries = list(libraries) + objects = list(objects) + unlinkable_fobjects = list(unlinkable_fobjects) + + # Expand possible fake static libraries to objects; + # make sure to iterate over a copy of the list as + # "fake" libraries will be removed as they are + # enountered + for lib in libraries[:]: + for libdir in library_dirs: + fake_lib = os.path.join(libdir, lib + '.fobjects') + if os.path.isfile(fake_lib): + # Replace fake static library + libraries.remove(lib) + with open(fake_lib, 'r') as f: + unlinkable_fobjects.extend(f.read().splitlines()) + + # Expand C objects + c_lib = os.path.join(libdir, lib + '.cobjects') + with open(c_lib, 'r') as f: + objects.extend(f.read().splitlines()) + + # Wrap unlinkable objects to a linkable one + if unlinkable_fobjects: + fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects] + wrapped = fcompiler.wrap_unlinkable_objects( + fobjects, output_dir=self.build_temp, + extra_dll_dir=self.extra_dll_dir) + objects.extend(wrapped) + + return objects, libraries + + def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, + c_library_dirs): + if fcompiler is None: + return + + for libname in c_libraries: + if libname.startswith('msvc'): + continue + fileexists = False + for libdir in c_library_dirs or []: + libfile = os.path.join(libdir, '%s.lib' % (libname)) + if os.path.isfile(libfile): + fileexists = True + break + if fileexists: + continue + # make g77-compiled static libs available to MSVC + fileexists = False + for libdir in c_library_dirs: + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) + if os.path.isfile(libfile): + # copy libname.a file to name.lib so that MSVC linker + # can find it + libfile2 = os.path.join(self.build_temp, libname + '.lib') + copy_file(libfile, libfile2) + if self.build_temp not in c_library_dirs: + c_library_dirs.append(self.build_temp) + fileexists = True + break + if fileexists: + continue + log.warn('could not find library %r in directories %s' + % (libname, c_library_dirs)) + + # Always use system linker when using MSVC compiler. + f_lib_dirs = [] + for dir in fcompiler.library_dirs: + # correct path when compiling in Cygwin but with normal Win + # Python + if dir.startswith('/usr/lib'): + try: + dir = subprocess.check_output(['cygpath', '-w', dir]) + except (OSError, subprocess.CalledProcessError): + pass + else: + dir = filepath_from_subprocess_output(dir) + f_lib_dirs.append(dir) + c_library_dirs.extend(f_lib_dirs) + + # make g77-compiled static libs available to MSVC + for lib in fcompiler.libraries: + if not lib.startswith('msvc'): + c_libraries.append(lib) + p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') + if p: + dst_name = os.path.join(self.build_temp, lib + '.lib') + if not os.path.isfile(dst_name): + copy_file(p[0], dst_name) + if self.build_temp not in c_library_dirs: + c_library_dirs.append(self.build_temp) + + def get_source_files(self): + self.check_extensions_list(self.extensions) + filenames = [] + for ext in self.extensions: + filenames.extend(get_ext_source_files(ext)) + return filenames + + def get_outputs(self): + self.check_extensions_list(self.extensions) + + outputs = [] + for ext in self.extensions: + if not ext.sources: + continue + fullname = self.get_ext_fullname(ext.name) + outputs.append(os.path.join(self.build_lib, + self.get_ext_filename(fullname))) + return outputs diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/build_py.py b/MLPY/Lib/site-packages/numpy/distutils/command/build_py.py new file mode 100644 index 0000000000000000000000000000000000000000..c4efde5042df4975a5b7888af9fb3ad9bd59ce5d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/build_py.py @@ -0,0 +1,31 @@ +from distutils.command.build_py import build_py as old_build_py +from numpy.distutils.misc_util import is_string + +class build_py(old_build_py): + + def run(self): + build_src = self.get_finalized_command('build_src') + if build_src.py_modules_dict and self.packages is None: + self.packages = list(build_src.py_modules_dict.keys ()) + old_build_py.run(self) + + def find_package_modules(self, package, package_dir): + modules = old_build_py.find_package_modules(self, package, package_dir) + + # Find build_src generated *.py files. + build_src = self.get_finalized_command('build_src') + modules += build_src.py_modules_dict.get(package, []) + + return modules + + def find_modules(self): + old_py_modules = self.py_modules[:] + new_py_modules = [_m for _m in self.py_modules if is_string(_m)] + self.py_modules[:] = new_py_modules + modules = old_build_py.find_modules(self) + self.py_modules[:] = old_py_modules + + return modules + + # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple + # and item[2] is source file. diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/build_scripts.py b/MLPY/Lib/site-packages/numpy/distutils/command/build_scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..9ea703e4486c2f0f72a0f0b22fa263ac3d41a333 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/build_scripts.py @@ -0,0 +1,49 @@ +""" Modified version of build_scripts that handles building scripts from functions. + +""" +from distutils.command.build_scripts import build_scripts as old_build_scripts +from numpy.distutils import log +from numpy.distutils.misc_util import is_string + +class build_scripts(old_build_scripts): + + def generate_scripts(self, scripts): + new_scripts = [] + func_scripts = [] + for script in scripts: + if is_string(script): + new_scripts.append(script) + else: + func_scripts.append(script) + if not func_scripts: + return new_scripts + + build_dir = self.build_dir + self.mkpath(build_dir) + for func in func_scripts: + script = func(build_dir) + if not script: + continue + if is_string(script): + log.info(" adding '%s' to scripts" % (script,)) + new_scripts.append(script) + else: + [log.info(" adding '%s' to scripts" % (s,)) for s in script] + new_scripts.extend(list(script)) + return new_scripts + + def run (self): + if not self.scripts: + return + + self.scripts = self.generate_scripts(self.scripts) + # Now make sure that the distribution object has this list of scripts. + # setuptools' develop command requires that this be a list of filenames, + # not functions. + self.distribution.scripts = self.scripts + + return old_build_scripts.run(self) + + def get_source_files(self): + from numpy.distutils.misc_util import get_script_files + return get_script_files(self.scripts) diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/build_src.py b/MLPY/Lib/site-packages/numpy/distutils/command/build_src.py new file mode 100644 index 0000000000000000000000000000000000000000..18671e9bfd792b9ef41d041b9f65c526548f2fe8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/build_src.py @@ -0,0 +1,773 @@ +""" Build swig and f2py sources. +""" +import os +import re +import sys +import shlex +import copy + +from distutils.command import build_ext +from distutils.dep_util import newer_group, newer +from distutils.util import get_platform +from distutils.errors import DistutilsError, DistutilsSetupError + + +# this import can't be done here, as it uses numpy stuff only available +# after it's installed +#import numpy.f2py +from numpy.distutils import log +from numpy.distutils.misc_util import ( + fortran_ext_match, appendpath, is_string, is_sequence, get_cmd + ) +from numpy.distutils.from_template import process_file as process_f_file +from numpy.distutils.conv_template import process_file as process_c_file + +def subst_vars(target, source, d): + """Substitute any occurrence of @foo@ by d['foo'] from source file into + target.""" + var = re.compile('@([a-zA-Z_]+)@') + with open(source, 'r') as fs: + with open(target, 'w') as ft: + for l in fs: + m = var.search(l) + if m: + ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) + else: + ft.write(l) + +class build_src(build_ext.build_ext): + + description = "build sources from SWIG, F2PY files or a function" + + user_options = [ + ('build-src=', 'd', "directory to \"build\" sources to"), + ('f2py-opts=', None, "list of f2py command line options"), + ('swig=', None, "path to the SWIG executable"), + ('swig-opts=', None, "list of SWIG command line options"), + ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), + ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete + ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete + ('force', 'f', "forcibly build everything (ignore file timestamps)"), + ('inplace', 'i', + "ignore build-lib and put compiled extensions into the source " + + "directory alongside your pure Python modules"), + ('verbose-cfg', None, + "change logging level from WARN to INFO which will show all " + + "compiler output") + ] + + boolean_options = ['force', 'inplace', 'verbose-cfg'] + + help_options = [] + + def initialize_options(self): + self.extensions = None + self.package = None + self.py_modules = None + self.py_modules_dict = None + self.build_src = None + self.build_lib = None + self.build_base = None + self.force = None + self.inplace = None + self.package_dir = None + self.f2pyflags = None # obsolete + self.f2py_opts = None + self.swigflags = None # obsolete + self.swig_opts = None + self.swig_cpp = None + self.swig = None + self.verbose_cfg = None + + def finalize_options(self): + self.set_undefined_options('build', + ('build_base', 'build_base'), + ('build_lib', 'build_lib'), + ('force', 'force')) + if self.package is None: + self.package = self.distribution.ext_package + self.extensions = self.distribution.ext_modules + self.libraries = self.distribution.libraries or [] + self.py_modules = self.distribution.py_modules or [] + self.data_files = self.distribution.data_files or [] + + if self.build_src is None: + plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) + self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) + + # py_modules_dict is used in build_py.find_package_modules + self.py_modules_dict = {} + + if self.f2pyflags: + if self.f2py_opts: + log.warn('ignoring --f2pyflags as --f2py-opts already used') + else: + self.f2py_opts = self.f2pyflags + self.f2pyflags = None + if self.f2py_opts is None: + self.f2py_opts = [] + else: + self.f2py_opts = shlex.split(self.f2py_opts) + + if self.swigflags: + if self.swig_opts: + log.warn('ignoring --swigflags as --swig-opts already used') + else: + self.swig_opts = self.swigflags + self.swigflags = None + + if self.swig_opts is None: + self.swig_opts = [] + else: + self.swig_opts = shlex.split(self.swig_opts) + + # use options from build_ext command + build_ext = self.get_finalized_command('build_ext') + if self.inplace is None: + self.inplace = build_ext.inplace + if self.swig_cpp is None: + self.swig_cpp = build_ext.swig_cpp + for c in ['swig', 'swig_opt']: + o = '--'+c.replace('_', '-') + v = getattr(build_ext, c, None) + if v: + if getattr(self, c): + log.warn('both build_src and build_ext define %s option' % (o)) + else: + log.info('using "%s=%s" option from build_ext command' % (o, v)) + setattr(self, c, v) + + def run(self): + log.info("build_src") + if not (self.extensions or self.libraries): + return + self.build_sources() + + def build_sources(self): + + if self.inplace: + self.get_package_dir = \ + self.get_finalized_command('build_py').get_package_dir + + self.build_py_modules_sources() + + for libname_info in self.libraries: + self.build_library_sources(*libname_info) + + if self.extensions: + self.check_extensions_list(self.extensions) + + for ext in self.extensions: + self.build_extension_sources(ext) + + self.build_data_files_sources() + self.build_npy_pkg_config() + + def build_data_files_sources(self): + if not self.data_files: + return + log.info('building data_files sources') + from numpy.distutils.misc_util import get_data_files + new_data_files = [] + for data in self.data_files: + if isinstance(data, str): + new_data_files.append(data) + elif isinstance(data, tuple): + d, files = data + if self.inplace: + build_dir = self.get_package_dir('.'.join(d.split(os.sep))) + else: + build_dir = os.path.join(self.build_src, d) + funcs = [f for f in files if hasattr(f, '__call__')] + files = [f for f in files if not hasattr(f, '__call__')] + for f in funcs: + if f.__code__.co_argcount==1: + s = f(build_dir) + else: + s = f() + if s is not None: + if isinstance(s, list): + files.extend(s) + elif isinstance(s, str): + files.append(s) + else: + raise TypeError(repr(s)) + filenames = get_data_files((d, files)) + new_data_files.append((d, filenames)) + else: + raise TypeError(repr(data)) + self.data_files[:] = new_data_files + + + def _build_npy_pkg_config(self, info, gd): + template, install_dir, subst_dict = info + template_dir = os.path.dirname(template) + for k, v in gd.items(): + subst_dict[k] = v + + if self.inplace == 1: + generated_dir = os.path.join(template_dir, install_dir) + else: + generated_dir = os.path.join(self.build_src, template_dir, + install_dir) + generated = os.path.basename(os.path.splitext(template)[0]) + generated_path = os.path.join(generated_dir, generated) + if not os.path.exists(generated_dir): + os.makedirs(generated_dir) + + subst_vars(generated_path, template, subst_dict) + + # Where to install relatively to install prefix + full_install_dir = os.path.join(template_dir, install_dir) + return full_install_dir, generated_path + + def build_npy_pkg_config(self): + log.info('build_src: building npy-pkg config files') + + # XXX: another ugly workaround to circumvent distutils brain damage. We + # need the install prefix here, but finalizing the options of the + # install command when only building sources cause error. Instead, we + # copy the install command instance, and finalize the copy so that it + # does not disrupt how distutils want to do things when with the + # original install command instance. + install_cmd = copy.copy(get_cmd('install')) + if not install_cmd.finalized == 1: + install_cmd.finalize_options() + build_npkg = False + if self.inplace == 1: + top_prefix = '.' + build_npkg = True + elif hasattr(install_cmd, 'install_libbase'): + top_prefix = install_cmd.install_libbase + build_npkg = True + + if build_npkg: + for pkg, infos in self.distribution.installed_pkg_config.items(): + pkg_path = self.distribution.package_dir[pkg] + prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) + d = {'prefix': prefix} + for info in infos: + install_dir, generated = self._build_npy_pkg_config(info, d) + self.distribution.data_files.append((install_dir, + [generated])) + + def build_py_modules_sources(self): + if not self.py_modules: + return + log.info('building py_modules sources') + new_py_modules = [] + for source in self.py_modules: + if is_sequence(source) and len(source)==3: + package, module_base, source = source + if self.inplace: + build_dir = self.get_package_dir(package) + else: + build_dir = os.path.join(self.build_src, + os.path.join(*package.split('.'))) + if hasattr(source, '__call__'): + target = os.path.join(build_dir, module_base + '.py') + source = source(target) + if source is None: + continue + modules = [(package, module_base, source)] + if package not in self.py_modules_dict: + self.py_modules_dict[package] = [] + self.py_modules_dict[package] += modules + else: + new_py_modules.append(source) + self.py_modules[:] = new_py_modules + + def build_library_sources(self, lib_name, build_info): + sources = list(build_info.get('sources', [])) + + if not sources: + return + + log.info('building library "%s" sources' % (lib_name)) + + sources = self.generate_sources(sources, (lib_name, build_info)) + + sources = self.template_sources(sources, (lib_name, build_info)) + + sources, h_files = self.filter_h_files(sources) + + if h_files: + log.info('%s - nothing done with h_files = %s', + self.package, h_files) + + #for f in h_files: + # self.distribution.headers.append((lib_name,f)) + + build_info['sources'] = sources + return + + def build_extension_sources(self, ext): + + sources = list(ext.sources) + + log.info('building extension "%s" sources' % (ext.name)) + + fullname = self.get_ext_fullname(ext.name) + + modpath = fullname.split('.') + package = '.'.join(modpath[0:-1]) + + if self.inplace: + self.ext_target_dir = self.get_package_dir(package) + + sources = self.generate_sources(sources, ext) + sources = self.template_sources(sources, ext) + sources = self.swig_sources(sources, ext) + sources = self.f2py_sources(sources, ext) + sources = self.pyrex_sources(sources, ext) + + sources, py_files = self.filter_py_files(sources) + + if package not in self.py_modules_dict: + self.py_modules_dict[package] = [] + modules = [] + for f in py_files: + module = os.path.splitext(os.path.basename(f))[0] + modules.append((package, module, f)) + self.py_modules_dict[package] += modules + + sources, h_files = self.filter_h_files(sources) + + if h_files: + log.info('%s - nothing done with h_files = %s', + package, h_files) + #for f in h_files: + # self.distribution.headers.append((package,f)) + + ext.sources = sources + + def generate_sources(self, sources, extension): + new_sources = [] + func_sources = [] + for source in sources: + if is_string(source): + new_sources.append(source) + else: + func_sources.append(source) + if not func_sources: + return new_sources + if self.inplace and not is_sequence(extension): + build_dir = self.ext_target_dir + else: + if is_sequence(extension): + name = extension[0] + # if 'include_dirs' not in extension[1]: + # extension[1]['include_dirs'] = [] + # incl_dirs = extension[1]['include_dirs'] + else: + name = extension.name + # incl_dirs = extension.include_dirs + #if self.build_src not in incl_dirs: + # incl_dirs.append(self.build_src) + build_dir = os.path.join(*([self.build_src] + +name.split('.')[:-1])) + self.mkpath(build_dir) + + if self.verbose_cfg: + new_level = log.INFO + else: + new_level = log.WARN + old_level = log.set_threshold(new_level) + + for func in func_sources: + source = func(extension, build_dir) + if not source: + continue + if is_sequence(source): + [log.info(" adding '%s' to sources." % (s,)) for s in source] + new_sources.extend(source) + else: + log.info(" adding '%s' to sources." % (source,)) + new_sources.append(source) + log.set_threshold(old_level) + return new_sources + + def filter_py_files(self, sources): + return self.filter_files(sources, ['.py']) + + def filter_h_files(self, sources): + return self.filter_files(sources, ['.h', '.hpp', '.inc']) + + def filter_files(self, sources, exts = []): + new_sources = [] + files = [] + for source in sources: + (base, ext) = os.path.splitext(source) + if ext in exts: + files.append(source) + else: + new_sources.append(source) + return new_sources, files + + def template_sources(self, sources, extension): + new_sources = [] + if is_sequence(extension): + depends = extension[1].get('depends') + include_dirs = extension[1].get('include_dirs') + else: + depends = extension.depends + include_dirs = extension.include_dirs + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.src': # Template file + if self.inplace: + target_dir = os.path.dirname(base) + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + self.mkpath(target_dir) + target_file = os.path.join(target_dir, os.path.basename(base)) + if (self.force or newer_group([source] + depends, target_file)): + if _f_pyf_ext_match(base): + log.info("from_template:> %s" % (target_file)) + outstr = process_f_file(source) + else: + log.info("conv_template:> %s" % (target_file)) + outstr = process_c_file(source) + with open(target_file, 'w') as fid: + fid.write(outstr) + if _header_ext_match(target_file): + d = os.path.dirname(target_file) + if d not in include_dirs: + log.info(" adding '%s' to include_dirs." % (d)) + include_dirs.append(d) + new_sources.append(target_file) + else: + new_sources.append(source) + return new_sources + + def pyrex_sources(self, sources, extension): + """Pyrex not supported; this remains for Cython support (see below)""" + new_sources = [] + ext_name = extension.name.split('.')[-1] + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.pyx': + target_file = self.generate_a_pyrex_source(base, ext_name, + source, + extension) + new_sources.append(target_file) + else: + new_sources.append(source) + return new_sources + + def generate_a_pyrex_source(self, base, ext_name, source, extension): + """Pyrex is not supported, but some projects monkeypatch this method. + + That allows compiling Cython code, see gh-6955. + This method will remain here for compatibility reasons. + """ + return [] + + def f2py_sources(self, sources, extension): + new_sources = [] + f2py_sources = [] + f_sources = [] + f2py_targets = {} + target_dirs = [] + ext_name = extension.name.split('.')[-1] + skip_f2py = 0 + + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.pyf': # F2PY interface file + if self.inplace: + target_dir = os.path.dirname(base) + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + if os.path.isfile(source): + name = get_f2py_modulename(source) + if name != ext_name: + raise DistutilsSetupError('mismatch of extension names: %s ' + 'provides %r but expected %r' % ( + source, name, ext_name)) + target_file = os.path.join(target_dir, name+'module.c') + else: + log.debug(' source %s does not exist: skipping f2py\'ing.' \ + % (source)) + name = ext_name + skip_f2py = 1 + target_file = os.path.join(target_dir, name+'module.c') + if not os.path.isfile(target_file): + log.warn(' target %s does not exist:\n '\ + 'Assuming %smodule.c was generated with '\ + '"build_src --inplace" command.' \ + % (target_file, name)) + target_dir = os.path.dirname(base) + target_file = os.path.join(target_dir, name+'module.c') + if not os.path.isfile(target_file): + raise DistutilsSetupError("%r missing" % (target_file,)) + log.info(' Yes! Using %r as up-to-date target.' \ + % (target_file)) + target_dirs.append(target_dir) + f2py_sources.append(source) + f2py_targets[source] = target_file + new_sources.append(target_file) + elif fortran_ext_match(ext): + f_sources.append(source) + else: + new_sources.append(source) + + if not (f2py_sources or f_sources): + return new_sources + + for d in target_dirs: + self.mkpath(d) + + f2py_options = extension.f2py_options + self.f2py_opts + + if self.distribution.libraries: + for name, build_info in self.distribution.libraries: + if name in extension.libraries: + f2py_options.extend(build_info.get('f2py_options', [])) + + log.info("f2py options: %s" % (f2py_options)) + + if f2py_sources: + if len(f2py_sources) != 1: + raise DistutilsSetupError( + 'only one .pyf file is allowed per extension module but got'\ + ' more: %r' % (f2py_sources,)) + source = f2py_sources[0] + target_file = f2py_targets[source] + target_dir = os.path.dirname(target_file) or '.' + depends = [source] + extension.depends + if (self.force or newer_group(depends, target_file, 'newer')) \ + and not skip_f2py: + log.info("f2py: %s" % (source)) + import numpy.f2py + numpy.f2py.run_main(f2py_options + + ['--build-dir', target_dir, source]) + else: + log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) + else: + #XXX TODO: --inplace support for sdist command + if is_sequence(extension): + name = extension[0] + else: name = extension.name + target_dir = os.path.join(*([self.build_src] + +name.split('.')[:-1])) + target_file = os.path.join(target_dir, ext_name + 'module.c') + new_sources.append(target_file) + depends = f_sources + extension.depends + if (self.force or newer_group(depends, target_file, 'newer')) \ + and not skip_f2py: + log.info("f2py:> %s" % (target_file)) + self.mkpath(target_dir) + import numpy.f2py + numpy.f2py.run_main(f2py_options + ['--lower', + '--build-dir', target_dir]+\ + ['-m', ext_name]+f_sources) + else: + log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ + % (target_file)) + + if not os.path.isfile(target_file): + raise DistutilsError("f2py target file %r not generated" % (target_file,)) + + build_dir = os.path.join(self.build_src, target_dir) + target_c = os.path.join(build_dir, 'fortranobject.c') + target_h = os.path.join(build_dir, 'fortranobject.h') + log.info(" adding '%s' to sources." % (target_c)) + new_sources.append(target_c) + if build_dir not in extension.include_dirs: + log.info(" adding '%s' to include_dirs." % (build_dir)) + extension.include_dirs.append(build_dir) + + if not skip_f2py: + import numpy.f2py + d = os.path.dirname(numpy.f2py.__file__) + source_c = os.path.join(d, 'src', 'fortranobject.c') + source_h = os.path.join(d, 'src', 'fortranobject.h') + if newer(source_c, target_c) or newer(source_h, target_h): + self.mkpath(os.path.dirname(target_c)) + self.copy_file(source_c, target_c) + self.copy_file(source_h, target_h) + else: + if not os.path.isfile(target_c): + raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) + if not os.path.isfile(target_h): + raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) + + for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: + filename = os.path.join(target_dir, ext_name + name_ext) + if os.path.isfile(filename): + log.info(" adding '%s' to sources." % (filename)) + f_sources.append(filename) + + return new_sources + f_sources + + def swig_sources(self, sources, extension): + # Assuming SWIG 1.3.14 or later. See compatibility note in + # http://www.swig.org/Doc1.3/Python.html#Python_nn6 + + new_sources = [] + swig_sources = [] + swig_targets = {} + target_dirs = [] + py_files = [] # swig generated .py files + target_ext = '.c' + if '-c++' in extension.swig_opts: + typ = 'c++' + is_cpp = True + extension.swig_opts.remove('-c++') + elif self.swig_cpp: + typ = 'c++' + is_cpp = True + else: + typ = None + is_cpp = False + skip_swig = 0 + ext_name = extension.name.split('.')[-1] + + for source in sources: + (base, ext) = os.path.splitext(source) + if ext == '.i': # SWIG interface file + # the code below assumes that the sources list + # contains not more than one .i SWIG interface file + if self.inplace: + target_dir = os.path.dirname(base) + py_target_dir = self.ext_target_dir + else: + target_dir = appendpath(self.build_src, os.path.dirname(base)) + py_target_dir = target_dir + if os.path.isfile(source): + name = get_swig_modulename(source) + if name != ext_name[1:]: + raise DistutilsSetupError( + 'mismatch of extension names: %s provides %r' + ' but expected %r' % (source, name, ext_name[1:])) + if typ is None: + typ = get_swig_target(source) + is_cpp = typ=='c++' + else: + typ2 = get_swig_target(source) + if typ2 is None: + log.warn('source %r does not define swig target, assuming %s swig target' \ + % (source, typ)) + elif typ!=typ2: + log.warn('expected %r but source %r defines %r swig target' \ + % (typ, source, typ2)) + if typ2=='c++': + log.warn('resetting swig target to c++ (some targets may have .c extension)') + is_cpp = True + else: + log.warn('assuming that %r has c++ swig target' % (source)) + if is_cpp: + target_ext = '.cpp' + target_file = os.path.join(target_dir, '%s_wrap%s' \ + % (name, target_ext)) + else: + log.warn(' source %s does not exist: skipping swig\'ing.' \ + % (source)) + name = ext_name[1:] + skip_swig = 1 + target_file = _find_swig_target(target_dir, name) + if not os.path.isfile(target_file): + log.warn(' target %s does not exist:\n '\ + 'Assuming %s_wrap.{c,cpp} was generated with '\ + '"build_src --inplace" command.' \ + % (target_file, name)) + target_dir = os.path.dirname(base) + target_file = _find_swig_target(target_dir, name) + if not os.path.isfile(target_file): + raise DistutilsSetupError("%r missing" % (target_file,)) + log.warn(' Yes! Using %r as up-to-date target.' \ + % (target_file)) + target_dirs.append(target_dir) + new_sources.append(target_file) + py_files.append(os.path.join(py_target_dir, name+'.py')) + swig_sources.append(source) + swig_targets[source] = new_sources[-1] + else: + new_sources.append(source) + + if not swig_sources: + return new_sources + + if skip_swig: + return new_sources + py_files + + for d in target_dirs: + self.mkpath(d) + + swig = self.swig or self.find_swig() + swig_cmd = [swig, "-python"] + extension.swig_opts + if is_cpp: + swig_cmd.append('-c++') + for d in extension.include_dirs: + swig_cmd.append('-I'+d) + for source in swig_sources: + target = swig_targets[source] + depends = [source] + extension.depends + if self.force or newer_group(depends, target, 'newer'): + log.info("%s: %s" % (os.path.basename(swig) \ + + (is_cpp and '++' or ''), source)) + self.spawn(swig_cmd + self.swig_opts \ + + ["-o", target, '-outdir', py_target_dir, source]) + else: + log.debug(" skipping '%s' swig interface (up-to-date)" \ + % (source)) + + return new_sources + py_files + +_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match +_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match + +#### SWIG related auxiliary functions #### +_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', + re.I).match +_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search +_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search + +def get_swig_target(source): + with open(source, 'r') as f: + result = None + line = f.readline() + if _has_cpp_header(line): + result = 'c++' + if _has_c_header(line): + result = 'c' + return result + +def get_swig_modulename(source): + with open(source, 'r') as f: + name = None + for line in f: + m = _swig_module_name_match(line) + if m: + name = m.group('name') + break + return name + +def _find_swig_target(target_dir, name): + for ext in ['.cpp', '.c']: + target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) + if os.path.isfile(target): + break + return target + +#### F2PY related auxiliary functions #### + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' + r'__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + with open(source) as f: + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + return name + +########################################## diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/config.py b/MLPY/Lib/site-packages/numpy/distutils/command/config.py new file mode 100644 index 0000000000000000000000000000000000000000..e2435046df2ee688d2bb569403406e84da7f22b3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/config.py @@ -0,0 +1,517 @@ +# Added Fortran compiler support to config. Currently useful only for +# try_compile call. try_run works but is untested for most of Fortran +# compilers (they must define linker_exe first). +# Pearu Peterson +import os +import signal +import subprocess +import sys +import textwrap +import warnings + +from distutils.command.config import config as old_config +from distutils.command.config import LANG_EXT +from distutils import log +from distutils.file_util import copy_file +from distutils.ccompiler import CompileError, LinkError +import distutils +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.mingw32ccompiler import generate_manifest +from numpy.distutils.command.autodist import (check_gcc_function_attribute, + check_gcc_function_attribute_with_intrinsics, + check_gcc_variable_attribute, + check_gcc_version_at_least, + check_inline, + check_restrict, + check_compiler_gcc) + +LANG_EXT['f77'] = '.f' +LANG_EXT['f90'] = '.f90' + +class config(old_config): + old_config.user_options += [ + ('fcompiler=', None, "specify the Fortran compiler type"), + ] + + def initialize_options(self): + self.fcompiler = None + old_config.initialize_options(self) + + def _check_compiler (self): + old_config._check_compiler(self) + from numpy.distutils.fcompiler import FCompiler, new_fcompiler + + if sys.platform == 'win32' and (self.compiler.compiler_type in + ('msvc', 'intelw', 'intelemw')): + # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: + # initialize call query_vcvarsall, which throws an IOError, and + # causes an error along the way without much information. We try to + # catch it here, hoping it is early enough, and print an helpful + # message instead of Error: None. + if not self.compiler.initialized: + try: + self.compiler.initialize() + except IOError as e: + msg = textwrap.dedent("""\ + Could not initialize compiler instance: do you have Visual Studio + installed? If you are trying to build with MinGW, please use "python setup.py + build -c mingw32" instead. If you have Visual Studio installed, check it is + correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2, + VS 2010 for >= 3.3). + + Original exception was: %s, and the Compiler class was %s + ============================================================================""") \ + % (e, self.compiler.__class__.__name__) + print(textwrap.dedent("""\ + ============================================================================""")) + raise distutils.errors.DistutilsPlatformError(msg) from e + + # After MSVC is initialized, add an explicit /MANIFEST to linker + # flags. See issues gh-4245 and gh-4101 for details. Also + # relevant are issues 4431 and 16296 on the Python bug tracker. + from distutils import msvc9compiler + if msvc9compiler.get_build_version() >= 10: + for ldflags in [self.compiler.ldflags_shared, + self.compiler.ldflags_shared_debug]: + if '/MANIFEST' not in ldflags: + ldflags.append('/MANIFEST') + + if not isinstance(self.fcompiler, FCompiler): + self.fcompiler = new_fcompiler(compiler=self.fcompiler, + dry_run=self.dry_run, force=1, + c_compiler=self.compiler) + if self.fcompiler is not None: + self.fcompiler.customize(self.distribution) + if self.fcompiler.get_version(): + self.fcompiler.customize_cmd(self) + self.fcompiler.show_customization() + + def _wrap_method(self, mth, lang, args): + from distutils.ccompiler import CompileError + from distutils.errors import DistutilsExecError + save_compiler = self.compiler + if lang in ['f77', 'f90']: + self.compiler = self.fcompiler + if self.compiler is None: + raise CompileError('%s compiler is not set' % (lang,)) + try: + ret = mth(*((self,)+args)) + except (DistutilsExecError, CompileError) as e: + self.compiler = save_compiler + raise CompileError from e + self.compiler = save_compiler + return ret + + def _compile (self, body, headers, include_dirs, lang): + src, obj = self._wrap_method(old_config._compile, lang, + (body, headers, include_dirs, lang)) + # _compile in unixcompiler.py sometimes creates .d dependency files. + # Clean them up. + self.temp_files.append(obj + '.d') + return src, obj + + def _link (self, body, + headers, include_dirs, + libraries, library_dirs, lang): + if self.compiler.compiler_type=='msvc': + libraries = (libraries or [])[:] + library_dirs = (library_dirs or [])[:] + if lang in ['f77', 'f90']: + lang = 'c' # always use system linker when using MSVC compiler + if self.fcompiler: + for d in self.fcompiler.library_dirs or []: + # correct path when compiling in Cygwin but with + # normal Win Python + if d.startswith('/usr/lib'): + try: + d = subprocess.check_output(['cygpath', + '-w', d]) + except (OSError, subprocess.CalledProcessError): + pass + else: + d = filepath_from_subprocess_output(d) + library_dirs.append(d) + for libname in self.fcompiler.libraries or []: + if libname not in libraries: + libraries.append(libname) + for libname in libraries: + if libname.startswith('msvc'): continue + fileexists = False + for libdir in library_dirs or []: + libfile = os.path.join(libdir, '%s.lib' % (libname)) + if os.path.isfile(libfile): + fileexists = True + break + if fileexists: continue + # make g77-compiled static libs available to MSVC + fileexists = False + for libdir in library_dirs: + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) + if os.path.isfile(libfile): + # copy libname.a file to name.lib so that MSVC linker + # can find it + libfile2 = os.path.join(libdir, '%s.lib' % (libname)) + copy_file(libfile, libfile2) + self.temp_files.append(libfile2) + fileexists = True + break + if fileexists: continue + log.warn('could not find library %r in directories %s' \ + % (libname, library_dirs)) + elif self.compiler.compiler_type == 'mingw32': + generate_manifest(self) + return self._wrap_method(old_config._link, lang, + (body, headers, include_dirs, + libraries, library_dirs, lang)) + + def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): + self._check_compiler() + return self.try_compile( + "/* we need a dummy line to make distutils happy */", + [header], include_dirs) + + def check_decl(self, symbol, + headers=None, include_dirs=None): + self._check_compiler() + body = textwrap.dedent(""" + int main(void) + { + #ifndef %s + (void) %s; + #endif + ; + return 0; + }""") % (symbol, symbol) + + return self.try_compile(body, headers, include_dirs) + + def check_macro_true(self, symbol, + headers=None, include_dirs=None): + self._check_compiler() + body = textwrap.dedent(""" + int main(void) + { + #if %s + #else + #error false or undefined macro + #endif + ; + return 0; + }""") % (symbol,) + + return self.try_compile(body, headers, include_dirs) + + def check_type(self, type_name, headers=None, include_dirs=None, + library_dirs=None): + """Check type availability. Return True if the type can be compiled, + False otherwise""" + self._check_compiler() + + # First check the type can be compiled + body = textwrap.dedent(r""" + int main(void) { + if ((%(name)s *) 0) + return 0; + if (sizeof (%(name)s)) + return 0; + } + """) % {'name': type_name} + + st = False + try: + try: + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + st = True + except distutils.errors.CompileError: + st = False + finally: + self._clean() + + return st + + def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): + """Check size of a given type.""" + self._check_compiler() + + # First check the type can be compiled + body = textwrap.dedent(r""" + typedef %(type)s npy_check_sizeof_type; + int main (void) + { + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; + test_array [0] = 0 + + ; + return 0; + } + """) + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + self._clean() + + if expected: + body = textwrap.dedent(r""" + typedef %(type)s npy_check_sizeof_type; + int main (void) + { + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; + test_array [0] = 0 + + ; + return 0; + } + """) + for size in expected: + try: + self._compile(body % {'type': type_name, 'size': size}, + headers, include_dirs, 'c') + self._clean() + return size + except CompileError: + pass + + # this fails to *compile* if size > sizeof(type) + body = textwrap.dedent(r""" + typedef %(type)s npy_check_sizeof_type; + int main (void) + { + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; + test_array [0] = 0 + + ; + return 0; + } + """) + + # The principle is simple: we first find low and high bounds of size + # for the type, where low/high are looked up on a log scale. Then, we + # do a binary search to find the exact size between low and high + low = 0 + mid = 0 + while True: + try: + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + break + except CompileError: + #log.info("failure to test for bound %d" % mid) + low = mid + 1 + mid = 2 * mid + 1 + + high = mid + # Binary search: + while low != high: + mid = (high - low) // 2 + low + try: + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + high = mid + except CompileError: + low = mid + 1 + return low + + def check_func(self, func, + headers=None, include_dirs=None, + libraries=None, library_dirs=None, + decl=False, call=False, call_args=None): + # clean up distutils's config a bit: add void to main(), and + # return a value. + self._check_compiler() + body = [] + if decl: + if type(decl) == str: + body.append(decl) + else: + body.append("int %s (void);" % func) + # Handle MSVC intrinsics: force MS compiler to make a function call. + # Useful to test for some functions when built with optimization on, to + # avoid build error because the intrinsic and our 'fake' test + # declaration do not match. + body.append("#ifdef _MSC_VER") + body.append("#pragma function(%s)" % func) + body.append("#endif") + body.append("int main (void) {") + if call: + if call_args is None: + call_args = '' + body.append(" %s(%s);" % (func, call_args)) + else: + body.append(" %s;" % func) + body.append(" return 0;") + body.append("}") + body = '\n'.join(body) + "\n" + + return self.try_link(body, headers, include_dirs, + libraries, library_dirs) + + def check_funcs_once(self, funcs, + headers=None, include_dirs=None, + libraries=None, library_dirs=None, + decl=False, call=False, call_args=None): + """Check a list of functions at once. + + This is useful to speed up things, since all the functions in the funcs + list will be put in one compilation unit. + + Arguments + --------- + funcs : seq + list of functions to test + include_dirs : seq + list of header paths + libraries : seq + list of libraries to link the code snippet to + library_dirs : seq + list of library paths + decl : dict + for every (key, value), the declaration in the value will be + used for function in key. If a function is not in the + dictionary, no declaration will be used. + call : dict + for every item (f, value), if the value is True, a call will be + done to the function f. + """ + self._check_compiler() + body = [] + if decl: + for f, v in decl.items(): + if v: + body.append("int %s (void);" % f) + + # Handle MS intrinsics. See check_func for more info. + body.append("#ifdef _MSC_VER") + for func in funcs: + body.append("#pragma function(%s)" % func) + body.append("#endif") + + body.append("int main (void) {") + if call: + for f in funcs: + if f in call and call[f]: + if not (call_args and f in call_args and call_args[f]): + args = '' + else: + args = call_args[f] + body.append(" %s(%s);" % (f, args)) + else: + body.append(" %s;" % f) + else: + for f in funcs: + body.append(" %s;" % f) + body.append(" return 0;") + body.append("}") + body = '\n'.join(body) + "\n" + + return self.try_link(body, headers, include_dirs, + libraries, library_dirs) + + def check_inline(self): + """Return the inline keyword recognized by the compiler, empty string + otherwise.""" + return check_inline(self) + + def check_restrict(self): + """Return the restrict keyword recognized by the compiler, empty string + otherwise.""" + return check_restrict(self) + + def check_compiler_gcc(self): + """Return True if the C compiler is gcc""" + return check_compiler_gcc(self) + + def check_gcc_function_attribute(self, attribute, name): + return check_gcc_function_attribute(self, attribute, name) + + def check_gcc_function_attribute_with_intrinsics(self, attribute, name, + code, include): + return check_gcc_function_attribute_with_intrinsics(self, attribute, + name, code, include) + + def check_gcc_variable_attribute(self, attribute): + return check_gcc_variable_attribute(self, attribute) + + def check_gcc_version_at_least(self, major, minor=0, patchlevel=0): + """Return True if the GCC version is greater than or equal to the + specified version.""" + return check_gcc_version_at_least(self, major, minor, patchlevel) + + def get_output(self, body, headers=None, include_dirs=None, + libraries=None, library_dirs=None, + lang="c", use_tee=None): + """Try to compile, link to an executable, and run a program + built from 'body' and 'headers'. Returns the exit status code + of the program and its output. + """ + # 2008-11-16, RemoveMe + warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" + "Usage of get_output is deprecated: please do not \n" + "use it anymore, and avoid configuration checks \n" + "involving running executable on the target machine.\n" + "+++++++++++++++++++++++++++++++++++++++++++++++++\n", + DeprecationWarning, stacklevel=2) + self._check_compiler() + exitcode, output = 255, '' + try: + grabber = GrabStdout() + try: + src, obj, exe = self._link(body, headers, include_dirs, + libraries, library_dirs, lang) + grabber.restore() + except Exception: + output = grabber.data + grabber.restore() + raise + exe = os.path.join('.', exe) + try: + # specify cwd arg for consistency with + # historic usage pattern of exec_command() + # also, note that exe appears to be a string, + # which exec_command() handled, but we now + # use a list for check_output() -- this assumes + # that exe is always a single command + output = subprocess.check_output([exe], cwd='.') + except subprocess.CalledProcessError as exc: + exitstatus = exc.returncode + output = '' + except OSError: + # preserve the EnvironmentError exit status + # used historically in exec_command() + exitstatus = 127 + output = '' + else: + output = filepath_from_subprocess_output(output) + if hasattr(os, 'WEXITSTATUS'): + exitcode = os.WEXITSTATUS(exitstatus) + if os.WIFSIGNALED(exitstatus): + sig = os.WTERMSIG(exitstatus) + log.error('subprocess exited with signal %d' % (sig,)) + if sig == signal.SIGINT: + # control-C + raise KeyboardInterrupt + else: + exitcode = exitstatus + log.info("success!") + except (CompileError, LinkError): + log.info("failure.") + self._clean() + return exitcode, output + +class GrabStdout: + + def __init__(self): + self.sys_stdout = sys.stdout + self.data = '' + sys.stdout = self + + def write (self, data): + self.sys_stdout.write(data) + self.data += data + + def flush (self): + self.sys_stdout.flush() + + def restore(self): + sys.stdout = self.sys_stdout diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/config_compiler.py b/MLPY/Lib/site-packages/numpy/distutils/command/config_compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..83a8a39b1d082faae48ce6c605bda49dd183e363 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/config_compiler.py @@ -0,0 +1,126 @@ +from distutils.core import Command +from numpy.distutils import log + +#XXX: Linker flags + +def show_fortran_compilers(_cache=None): + # Using cache to prevent infinite recursion. + if _cache: + return + elif _cache is None: + _cache = [] + _cache.append(1) + from numpy.distutils.fcompiler import show_fcompilers + import distutils.core + dist = distutils.core._setup_distribution + show_fcompilers(dist) + +class config_fc(Command): + """ Distutils command to hold user specified options + to Fortran compilers. + + config_fc command is used by the FCompiler.customize() method. + """ + + description = "specify Fortran 77/Fortran 90 compiler information" + + user_options = [ + ('fcompiler=', None, "specify Fortran compiler type"), + ('f77exec=', None, "specify F77 compiler command"), + ('f90exec=', None, "specify F90 compiler command"), + ('f77flags=', None, "specify F77 compiler flags"), + ('f90flags=', None, "specify F90 compiler flags"), + ('opt=', None, "specify optimization flags"), + ('arch=', None, "specify architecture specific optimization flags"), + ('debug', 'g', "compile with debugging information"), + ('noopt', None, "compile without optimization"), + ('noarch', None, "compile without arch-dependent optimization"), + ] + + help_options = [ + ('help-fcompiler', None, "list available Fortran compilers", + show_fortran_compilers), + ] + + boolean_options = ['debug', 'noopt', 'noarch'] + + def initialize_options(self): + self.fcompiler = None + self.f77exec = None + self.f90exec = None + self.f77flags = None + self.f90flags = None + self.opt = None + self.arch = None + self.debug = None + self.noopt = None + self.noarch = None + + def finalize_options(self): + log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') + build_clib = self.get_finalized_command('build_clib') + build_ext = self.get_finalized_command('build_ext') + config = self.get_finalized_command('config') + build = self.get_finalized_command('build') + cmd_list = [self, config, build_clib, build_ext, build] + for a in ['fcompiler']: + l = [] + for c in cmd_list: + v = getattr(c, a) + if v is not None: + if not isinstance(v, str): v = v.compiler_type + if v not in l: l.append(v) + if not l: v1 = None + else: v1 = l[0] + if len(l)>1: + log.warn(' commands have different --%s options: %s'\ + ', using first in list as default' % (a, l)) + if v1: + for c in cmd_list: + if getattr(c, a) is None: setattr(c, a, v1) + + def run(self): + # Do nothing. + return + +class config_cc(Command): + """ Distutils command to hold user specified options + to C/C++ compilers. + """ + + description = "specify C/C++ compiler information" + + user_options = [ + ('compiler=', None, "specify C/C++ compiler type"), + ] + + def initialize_options(self): + self.compiler = None + + def finalize_options(self): + log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') + build_clib = self.get_finalized_command('build_clib') + build_ext = self.get_finalized_command('build_ext') + config = self.get_finalized_command('config') + build = self.get_finalized_command('build') + cmd_list = [self, config, build_clib, build_ext, build] + for a in ['compiler']: + l = [] + for c in cmd_list: + v = getattr(c, a) + if v is not None: + if not isinstance(v, str): v = v.compiler_type + if v not in l: l.append(v) + if not l: v1 = None + else: v1 = l[0] + if len(l)>1: + log.warn(' commands have different --%s options: %s'\ + ', using first in list as default' % (a, l)) + if v1: + for c in cmd_list: + if getattr(c, a) is None: setattr(c, a, v1) + return + + def run(self): + # Do nothing. + return diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/develop.py b/MLPY/Lib/site-packages/numpy/distutils/command/develop.py new file mode 100644 index 0000000000000000000000000000000000000000..5cef66ce42387601d9364051f63ad92308a7e1c3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/develop.py @@ -0,0 +1,15 @@ +""" Override the develop command from setuptools so we can ensure that our +generated files (from build_src or build_scripts) are properly converted to real +files with filenames. + +""" +from setuptools.command.develop import develop as old_develop + +class develop(old_develop): + __doc__ = old_develop.__doc__ + def install_for_development(self): + # Build sources in-place, too. + self.reinitialize_command('build_src', inplace=1) + # Make sure scripts are built. + self.run_command('build_scripts') + old_develop.install_for_development(self) diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/egg_info.py b/MLPY/Lib/site-packages/numpy/distutils/command/egg_info.py new file mode 100644 index 0000000000000000000000000000000000000000..c59836d23161ba0653e8350d979e2365f629a38e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/egg_info.py @@ -0,0 +1,25 @@ +import sys + +from setuptools.command.egg_info import egg_info as _egg_info + +class egg_info(_egg_info): + def run(self): + if 'sdist' in sys.argv: + import warnings + import textwrap + msg = textwrap.dedent(""" + `build_src` is being run, this may lead to missing + files in your sdist! You want to use distutils.sdist + instead of the setuptools version: + + from distutils.command.sdist import sdist + cmdclass={'sdist': sdist}" + + See numpy's setup.py or gh-7131 for details.""") + warnings.warn(msg, UserWarning, stacklevel=2) + + # We need to ensure that build_src has been executed in order to give + # setuptools' egg_info command real filenames instead of functions which + # generate files. + self.run_command("build_src") + _egg_info.run(self) diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/install.py b/MLPY/Lib/site-packages/numpy/distutils/command/install.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7c15f50681f451be7638789905246a78d3842a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/install.py @@ -0,0 +1,79 @@ +import sys +if 'setuptools' in sys.modules: + import setuptools.command.install as old_install_mod + have_setuptools = True +else: + import distutils.command.install as old_install_mod + have_setuptools = False +from distutils.file_util import write_file + +old_install = old_install_mod.install + +class install(old_install): + + # Always run install_clib - the command is cheap, so no need to bypass it; + # but it's not run by setuptools -- so it's run again in install_data + sub_commands = old_install.sub_commands + [ + ('install_clib', lambda x: True) + ] + + def finalize_options (self): + old_install.finalize_options(self) + self.install_lib = self.install_libbase + + def setuptools_run(self): + """ The setuptools version of the .run() method. + + We must pull in the entire code so we can override the level used in the + _getframe() call since we wrap this call by one more level. + """ + from distutils.command.install import install as distutils_install + + # Explicit request for old-style install? Just do it + if self.old_and_unmanageable or self.single_version_externally_managed: + return distutils_install.run(self) + + # Attempt to detect whether we were called from setup() or by another + # command. If we were called by setup(), our caller will be the + # 'run_command' method in 'distutils.dist', and *its* caller will be + # the 'run_commands' method. If we were called any other way, our + # immediate caller *might* be 'run_command', but it won't have been + # called by 'run_commands'. This is slightly kludgy, but seems to + # work. + # + caller = sys._getframe(3) + caller_module = caller.f_globals.get('__name__', '') + caller_name = caller.f_code.co_name + + if caller_module != 'distutils.dist' or caller_name!='run_commands': + # We weren't called from the command line or setup(), so we + # should run in backward-compatibility mode to support bdist_* + # commands. + distutils_install.run(self) + else: + self.do_egg_install() + + def run(self): + if not have_setuptools: + r = old_install.run(self) + else: + r = self.setuptools_run() + if self.record: + # bdist_rpm fails when INSTALLED_FILES contains + # paths with spaces. Such paths must be enclosed + # with double-quotes. + with open(self.record, 'r') as f: + lines = [] + need_rewrite = False + for l in f: + l = l.rstrip() + if ' ' in l: + need_rewrite = True + l = '"%s"' % (l) + lines.append(l) + if need_rewrite: + self.execute(write_file, + (self.record, lines), + "re-writing list of installed files to '%s'" % + self.record) + return r diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/install_clib.py b/MLPY/Lib/site-packages/numpy/distutils/command/install_clib.py new file mode 100644 index 0000000000000000000000000000000000000000..1a0569ae5d60c9129ec4309291056923f496700f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/install_clib.py @@ -0,0 +1,40 @@ +import os +from distutils.core import Command +from distutils.ccompiler import new_compiler +from numpy.distutils.misc_util import get_cmd + +class install_clib(Command): + description = "Command to install installable C libraries" + + user_options = [] + + def initialize_options(self): + self.install_dir = None + self.outfiles = [] + + def finalize_options(self): + self.set_undefined_options('install', ('install_lib', 'install_dir')) + + def run (self): + build_clib_cmd = get_cmd("build_clib") + if not build_clib_cmd.build_clib: + # can happen if the user specified `--skip-build` + build_clib_cmd.finalize_options() + build_dir = build_clib_cmd.build_clib + + # We need the compiler to get the library name -> filename association + if not build_clib_cmd.compiler: + compiler = new_compiler(compiler=None) + compiler.customize(self.distribution) + else: + compiler = build_clib_cmd.compiler + + for l in self.distribution.installed_libraries: + target_dir = os.path.join(self.install_dir, l.target_dir) + name = compiler.library_filename(l.name) + source = os.path.join(build_dir, name) + self.mkpath(target_dir) + self.outfiles.append(self.copy_file(source, target_dir)[0]) + + def get_outputs(self): + return self.outfiles diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/install_data.py b/MLPY/Lib/site-packages/numpy/distutils/command/install_data.py new file mode 100644 index 0000000000000000000000000000000000000000..0bc0ce422bad47d5b69963b9906b68d7e971ac33 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/install_data.py @@ -0,0 +1,24 @@ +import sys +have_setuptools = ('setuptools' in sys.modules) + +from distutils.command.install_data import install_data as old_install_data + +#data installer with improved intelligence over distutils +#data files are copied into the project directory instead +#of willy-nilly +class install_data (old_install_data): + + def run(self): + old_install_data.run(self) + + if have_setuptools: + # Run install_clib again, since setuptools does not run sub-commands + # of install automatically + self.run_command('install_clib') + + def finalize_options (self): + self.set_undefined_options('install', + ('install_lib', 'install_dir'), + ('root', 'root'), + ('force', 'force'), + ) diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/install_headers.py b/MLPY/Lib/site-packages/numpy/distutils/command/install_headers.py new file mode 100644 index 0000000000000000000000000000000000000000..131a7e5882ce8fffe589e3979a8b924a06c62b81 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/install_headers.py @@ -0,0 +1,25 @@ +import os +from distutils.command.install_headers import install_headers as old_install_headers + +class install_headers (old_install_headers): + + def run (self): + headers = self.distribution.headers + if not headers: + return + + prefix = os.path.dirname(self.install_dir) + for header in headers: + if isinstance(header, tuple): + # Kind of a hack, but I don't know where else to change this... + if header[0] == 'numpy.core': + header = ('numpy', header[1]) + if os.path.splitext(header[1])[1] == '.inc': + continue + d = os.path.join(*([prefix]+header[0].split('.'))) + header = header[1] + else: + d = self.install_dir + self.mkpath(d) + (out, _) = self.copy_file(header, d) + self.outfiles.append(out) diff --git a/MLPY/Lib/site-packages/numpy/distutils/command/sdist.py b/MLPY/Lib/site-packages/numpy/distutils/command/sdist.py new file mode 100644 index 0000000000000000000000000000000000000000..ed0e75cacf5e667499a1d3dee6eac4223c579c46 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/command/sdist.py @@ -0,0 +1,27 @@ +import sys +if 'setuptools' in sys.modules: + from setuptools.command.sdist import sdist as old_sdist +else: + from distutils.command.sdist import sdist as old_sdist + +from numpy.distutils.misc_util import get_data_files + +class sdist(old_sdist): + + def add_defaults (self): + old_sdist.add_defaults(self) + + dist = self.distribution + + if dist.has_data_files(): + for data in dist.data_files: + self.filelist.extend(get_data_files(data)) + + if dist.has_headers(): + headers = [] + for h in dist.headers: + if isinstance(h, str): headers.append(h) + else: headers.append(h[1]) + self.filelist.extend(headers) + + return diff --git a/MLPY/Lib/site-packages/numpy/distutils/conv_template.py b/MLPY/Lib/site-packages/numpy/distutils/conv_template.py new file mode 100644 index 0000000000000000000000000000000000000000..223c4a2751e558e6a9b9507f5bc54963de53dbb5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/conv_template.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python3 +""" +takes templated file .xxx.src and produces .xxx file where .xxx is +.i or .c or .h, using the following template rules + +/**begin repeat -- on a line by itself marks the start of a repeated code + segment +/**end repeat**/ -- on a line by itself marks it's end + +After the /**begin repeat and before the */, all the named templates are placed +these should all have the same number of replacements + +Repeat blocks can be nested, with each nested block labeled with its depth, +i.e. +/**begin repeat1 + *.... + */ +/**end repeat1**/ + +When using nested loops, you can optionally exclude particular +combinations of the variables using (inside the comment portion of the inner loop): + + :exclude: var1=value1, var2=value2, ... + +This will exclude the pattern where var1 is value1 and var2 is value2 when +the result is being generated. + + +In the main body each replace will use one entry from the list of named replacements + + Note that all #..# forms in a block must have the same number of + comma-separated entries. + +Example: + + An input file containing + + /**begin repeat + * #a = 1,2,3# + * #b = 1,2,3# + */ + + /**begin repeat1 + * #c = ted, jim# + */ + @a@, @b@, @c@ + /**end repeat1**/ + + /**end repeat**/ + + produces + + line 1 "template.c.src" + + /* + ********************************************************************* + ** This file was autogenerated from a template DO NOT EDIT!!** + ** Changes should be made to the original source (.src) file ** + ********************************************************************* + */ + + #line 9 + 1, 1, ted + + #line 9 + 1, 1, jim + + #line 9 + 2, 2, ted + + #line 9 + 2, 2, jim + + #line 9 + 3, 3, ted + + #line 9 + 3, 3, jim + +""" + +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +# names for replacement that are already global. +global_names = {} + +# header placed at the front of head processed file +header =\ +""" +/* + ***************************************************************************** + ** This file was autogenerated from a template DO NOT EDIT!!!! ** + ** Changes should be made to the original source (.src) file ** + ***************************************************************************** + */ + +""" +# Parse string for repeat loops +def parse_structure(astr, level): + """ + The returned line number is from the beginning of the string, starting + at zero. Returns an empty list if no loops found. + + """ + if level == 0 : + loopbeg = "/**begin repeat" + loopend = "/**end repeat**/" + else : + loopbeg = "/**begin repeat%d" % level + loopend = "/**end repeat%d**/" % level + + ind = 0 + line = 0 + spanlist = [] + while True: + start = astr.find(loopbeg, ind) + if start == -1: + break + start2 = astr.find("*/", start) + start2 = astr.find("\n", start2) + fini1 = astr.find(loopend, start2) + fini2 = astr.find("\n", fini1) + line += astr.count("\n", ind, start2+1) + spanlist.append((start, start2+1, fini1, fini2+1, line)) + line += astr.count("\n", start2+1, fini2) + ind = fini2 + spanlist.sort() + return spanlist + + +def paren_repl(obj): + torep = obj.group(1) + numrep = obj.group(2) + return ','.join([torep]*int(numrep)) + +parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") +plainrep = re.compile(r"([^*]+)\*(\d+)") +def parse_values(astr): + # replaces all occurrences of '(a,b,c)*4' in astr + # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate + # empty values, i.e., ()*4 yields ',,,'. The result is + # split at ',' and a list of values returned. + astr = parenrep.sub(paren_repl, astr) + # replaces occurrences of xxx*3 with xxx, xxx, xxx + astr = ','.join([plainrep.sub(paren_repl, x.strip()) + for x in astr.split(',')]) + return astr.split(',') + + +stripast = re.compile(r"\n\s*\*?") +named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") +exclude_vars_re = re.compile(r"(\w*)=(\w*)") +exclude_re = re.compile(":exclude:") +def parse_loop_header(loophead) : + """Find all named replacements in the header + + Returns a list of dictionaries, one for each loop iteration, + where each key is a name to be substituted and the corresponding + value is the replacement string. + + Also return a list of exclusions. The exclusions are dictionaries + of key value pairs. There can be more than one exclusion. + [{'var1':'value1', 'var2', 'value2'[,...]}, ...] + + """ + # Strip out '\n' and leading '*', if any, in continuation lines. + # This should not effect code previous to this change as + # continuation lines were not allowed. + loophead = stripast.sub("", loophead) + # parse out the names and lists of values + names = [] + reps = named_re.findall(loophead) + nsub = None + for rep in reps: + name = rep[0] + vals = parse_values(rep[1]) + size = len(vals) + if nsub is None : + nsub = size + elif nsub != size : + msg = "Mismatch in number of values, %d != %d\n%s = %s" + raise ValueError(msg % (nsub, size, name, vals)) + names.append((name, vals)) + + + # Find any exclude variables + excludes = [] + + for obj in exclude_re.finditer(loophead): + span = obj.span() + # find next newline + endline = loophead.find('\n', span[1]) + substr = loophead[span[1]:endline] + ex_names = exclude_vars_re.findall(substr) + excludes.append(dict(ex_names)) + + # generate list of dictionaries, one for each template iteration + dlist = [] + if nsub is None : + raise ValueError("No substitution variables found") + for i in range(nsub): + tmp = {name: vals[i] for name, vals in names} + dlist.append(tmp) + return dlist + +replace_re = re.compile(r"@(\w+)@") +def parse_string(astr, env, level, line) : + lineno = "#line %d\n" % line + + # local function for string replacement, uses env + def replace(match): + name = match.group(1) + try : + val = env[name] + except KeyError: + msg = 'line %d: no definition of key "%s"'%(line, name) + raise ValueError(msg) from None + return val + + code = [lineno] + struct = parse_structure(astr, level) + if struct : + # recurse over inner loops + oldend = 0 + newlevel = level + 1 + for sub in struct: + pref = astr[oldend:sub[0]] + head = astr[sub[0]:sub[1]] + text = astr[sub[1]:sub[2]] + oldend = sub[3] + newline = line + sub[4] + code.append(replace_re.sub(replace, pref)) + try : + envlist = parse_loop_header(head) + except ValueError as e: + msg = "line %d: %s" % (newline, e) + raise ValueError(msg) + for newenv in envlist : + newenv.update(env) + newcode = parse_string(text, newenv, newlevel, newline) + code.extend(newcode) + suff = astr[oldend:] + code.append(replace_re.sub(replace, suff)) + else : + # replace keys + code.append(replace_re.sub(replace, astr)) + code.append('\n') + return ''.join(code) + +def process_str(astr): + code = [header] + code.extend(parse_string(astr, global_names, 0, 1)) + return ''.join(code) + + +include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" + r"(?P[\w\d./\\]+[.]src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + sourcefile = os.path.normcase(source).replace("\\", "\\\\") + try: + code = process_str(''.join(lines)) + except ValueError as e: + raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None + return '#line 1 "%s"\n%s' % (sourcefile, code) + + +def unique_key(adict): + # this obtains a unique key given a dictionary + # currently it works by appending together n of the letters of the + # current keys and increasing n until a unique key is found + # -- not particularly quick + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = "".join([x[:n] for x in allkeys]) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +def main(): + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + try: + writestr = process_str(allstr) + except ValueError as e: + raise ValueError("In %s loop at %s" % (file, e)) from None + + outfile.write(writestr) + +if __name__ == "__main__": + main() diff --git a/MLPY/Lib/site-packages/numpy/distutils/core.py b/MLPY/Lib/site-packages/numpy/distutils/core.py new file mode 100644 index 0000000000000000000000000000000000000000..923546eac2c7c42e503af86fd1ce5987a48c2581 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/core.py @@ -0,0 +1,215 @@ +import sys +from distutils.core import Distribution + +if 'setuptools' in sys.modules: + have_setuptools = True + from setuptools import setup as old_setup + # easy_install imports math, it may be picked up from cwd + from setuptools.command import easy_install + try: + # very old versions of setuptools don't have this + from setuptools.command import bdist_egg + except ImportError: + have_setuptools = False +else: + from distutils.core import setup as old_setup + have_setuptools = False + +import warnings +import distutils.core +import distutils.dist + +from numpy.distutils.extension import Extension +from numpy.distutils.numpy_distribution import NumpyDistribution +from numpy.distutils.command import config, config_compiler, \ + build, build_py, build_ext, build_clib, build_src, build_scripts, \ + sdist, install_data, install_headers, install, bdist_rpm, \ + install_clib +from numpy.distutils.misc_util import is_sequence, is_string + +numpy_cmdclass = {'build': build.build, + 'build_src': build_src.build_src, + 'build_scripts': build_scripts.build_scripts, + 'config_cc': config_compiler.config_cc, + 'config_fc': config_compiler.config_fc, + 'config': config.config, + 'build_ext': build_ext.build_ext, + 'build_py': build_py.build_py, + 'build_clib': build_clib.build_clib, + 'sdist': sdist.sdist, + 'install_data': install_data.install_data, + 'install_headers': install_headers.install_headers, + 'install_clib': install_clib.install_clib, + 'install': install.install, + 'bdist_rpm': bdist_rpm.bdist_rpm, + } +if have_setuptools: + # Use our own versions of develop and egg_info to ensure that build_src is + # handled appropriately. + from numpy.distutils.command import develop, egg_info + numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg + numpy_cmdclass['develop'] = develop.develop + numpy_cmdclass['easy_install'] = easy_install.easy_install + numpy_cmdclass['egg_info'] = egg_info.egg_info + +def _dict_append(d, **kws): + for k, v in kws.items(): + if k not in d: + d[k] = v + continue + dv = d[k] + if isinstance(dv, tuple): + d[k] = dv + tuple(v) + elif isinstance(dv, list): + d[k] = dv + list(v) + elif isinstance(dv, dict): + _dict_append(dv, **v) + elif is_string(dv): + d[k] = dv + v + else: + raise TypeError(repr(type(dv))) + +def _command_line_ok(_cache=None): + """ Return True if command line does not contain any + help or display requests. + """ + if _cache: + return _cache[0] + elif _cache is None: + _cache = [] + ok = True + display_opts = ['--'+n for n in Distribution.display_option_names] + for o in Distribution.display_options: + if o[1]: + display_opts.append('-'+o[1]) + for arg in sys.argv: + if arg.startswith('--help') or arg=='-h' or arg in display_opts: + ok = False + break + _cache.append(ok) + return ok + +def get_distribution(always=False): + dist = distutils.core._setup_distribution + # XXX Hack to get numpy installable with easy_install. + # The problem is easy_install runs it's own setup(), which + # sets up distutils.core._setup_distribution. However, + # when our setup() runs, that gets overwritten and lost. + # We can't use isinstance, as the DistributionWithoutHelpCommands + # class is local to a function in setuptools.command.easy_install + if dist is not None and \ + 'DistributionWithoutHelpCommands' in repr(dist): + dist = None + if always and dist is None: + dist = NumpyDistribution() + return dist + +def setup(**attr): + + cmdclass = numpy_cmdclass.copy() + + new_attr = attr.copy() + if 'cmdclass' in new_attr: + cmdclass.update(new_attr['cmdclass']) + new_attr['cmdclass'] = cmdclass + + if 'configuration' in new_attr: + # To avoid calling configuration if there are any errors + # or help request in command in the line. + configuration = new_attr.pop('configuration') + + old_dist = distutils.core._setup_distribution + old_stop = distutils.core._setup_stop_after + distutils.core._setup_distribution = None + distutils.core._setup_stop_after = "commandline" + try: + dist = setup(**new_attr) + finally: + distutils.core._setup_distribution = old_dist + distutils.core._setup_stop_after = old_stop + if dist.help or not _command_line_ok(): + # probably displayed help, skip running any commands + return dist + + # create setup dictionary and append to new_attr + config = configuration() + if hasattr(config, 'todict'): + config = config.todict() + _dict_append(new_attr, **config) + + # Move extension source libraries to libraries + libraries = [] + for ext in new_attr.get('ext_modules', []): + new_libraries = [] + for item in ext.libraries: + if is_sequence(item): + lib_name, build_info = item + _check_append_ext_library(libraries, lib_name, build_info) + new_libraries.append(lib_name) + elif is_string(item): + new_libraries.append(item) + else: + raise TypeError("invalid description of extension module " + "library %r" % (item,)) + ext.libraries = new_libraries + if libraries: + if 'libraries' not in new_attr: + new_attr['libraries'] = [] + for item in libraries: + _check_append_library(new_attr['libraries'], item) + + # sources in ext_modules or libraries may contain header files + if ('ext_modules' in new_attr or 'libraries' in new_attr) \ + and 'headers' not in new_attr: + new_attr['headers'] = [] + + # Use our custom NumpyDistribution class instead of distutils' one + new_attr['distclass'] = NumpyDistribution + + return old_setup(**new_attr) + +def _check_append_library(libraries, item): + for libitem in libraries: + if is_sequence(libitem): + if is_sequence(item): + if item[0]==libitem[0]: + if item[1] is libitem[1]: + return + warnings.warn("[0] libraries list contains %r with" + " different build_info" % (item[0],), + stacklevel=2) + break + else: + if item==libitem[0]: + warnings.warn("[1] libraries list contains %r with" + " no build_info" % (item[0],), + stacklevel=2) + break + else: + if is_sequence(item): + if item[0]==libitem: + warnings.warn("[2] libraries list contains %r with" + " no build_info" % (item[0],), + stacklevel=2) + break + else: + if item==libitem: + return + libraries.append(item) + +def _check_append_ext_library(libraries, lib_name, build_info): + for item in libraries: + if is_sequence(item): + if item[0]==lib_name: + if item[1] is build_info: + return + warnings.warn("[3] libraries list contains %r with" + " different build_info" % (lib_name,), + stacklevel=2) + break + elif item==lib_name: + warnings.warn("[4] libraries list contains %r with" + " no build_info" % (lib_name,), + stacklevel=2) + break + libraries.append((lib_name, build_info)) diff --git a/MLPY/Lib/site-packages/numpy/distutils/cpuinfo.py b/MLPY/Lib/site-packages/numpy/distutils/cpuinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..be4c100e08c77589805fdbb5a99d4231146dfc71 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/cpuinfo.py @@ -0,0 +1,683 @@ +#!/usr/bin/env python3 +""" +cpuinfo + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +Pearu Peterson + +""" +__all__ = ['cpu'] + +import os +import platform +import re +import sys +import types +import warnings + +from subprocess import getstatusoutput + + +def getoutput(cmd, successful_status=(0,), stacklevel=1): + try: + status, output = getstatusoutput(cmd) + except EnvironmentError as e: + warnings.warn(str(e), UserWarning, stacklevel=stacklevel) + return False, "" + if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: + return True, output + return False, output + +def command_info(successful_status=(0,), stacklevel=1, **kw): + info = {} + for key in kw: + ok, output = getoutput(kw[key], successful_status=successful_status, + stacklevel=stacklevel+1) + if ok: + info[key] = output.strip() + return info + +def command_by_line(cmd, successful_status=(0,), stacklevel=1): + ok, output = getoutput(cmd, successful_status=successful_status, + stacklevel=stacklevel+1) + if not ok: + return + for line in output.splitlines(): + yield line.strip() + +def key_value_from_command(cmd, sep, successful_status=(0,), + stacklevel=1): + d = {} + for line in command_by_line(cmd, successful_status=successful_status, + stacklevel=stacklevel+1): + l = [s.strip() for s in line.split(sep, 1)] + if len(l) == 2: + d[l[0]] = l[1] + return d + +class CPUInfoBase: + """Holds CPU information and provides methods for requiring + the availability of various CPU features. + """ + + def _try_call(self, func): + try: + return func() + except Exception: + pass + + def __getattr__(self, name): + if not name.startswith('_'): + if hasattr(self, '_'+name): + attr = getattr(self, '_'+name) + if isinstance(attr, types.MethodType): + return lambda func=self._try_call,attr=attr : func(attr) + else: + return lambda : None + raise AttributeError(name) + + def _getNCPUs(self): + return 1 + + def __get_nbits(self): + abits = platform.architecture()[0] + nbits = re.compile(r'(\d+)bit').search(abits).group(1) + return nbits + + def _is_32bit(self): + return self.__get_nbits() == '32' + + def _is_64bit(self): + return self.__get_nbits() == '64' + +class LinuxCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = [ {} ] + ok, output = getoutput('uname -m') + if ok: + info[0]['uname_m'] = output.strip() + try: + fo = open('/proc/cpuinfo') + except EnvironmentError as e: + warnings.warn(str(e), UserWarning, stacklevel=2) + else: + for line in fo: + name_value = [s.strip() for s in line.split(':', 1)] + if len(name_value) != 2: + continue + name, value = name_value + if not info or name in info[-1]: # next processor + info.append({}) + info[-1][name] = value + fo.close() + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['vendor_id']=='AuthenticAMD' + + def _is_AthlonK6_2(self): + return self._is_AMD() and self.info[0]['model'] == '2' + + def _is_AthlonK6_3(self): + return self._is_AMD() and self.info[0]['model'] == '3' + + def _is_AthlonK6(self): + return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None + + def _is_AthlonK7(self): + return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None + + def _is_AthlonMP(self): + return re.match(r'.*?Athlon\(tm\) MP\b', + self.info[0]['model name']) is not None + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['family'] == '15' + + def _is_Athlon64(self): + return re.match(r'.*?Athlon\(tm\) 64\b', + self.info[0]['model name']) is not None + + def _is_AthlonHX(self): + return re.match(r'.*?Athlon HX\b', + self.info[0]['model name']) is not None + + def _is_Opteron(self): + return re.match(r'.*?Opteron\b', + self.info[0]['model name']) is not None + + def _is_Hammer(self): + return re.match(r'.*?Hammer\b', + self.info[0]['model name']) is not None + + # Alpha + + def _is_Alpha(self): + return self.info[0]['cpu']=='Alpha' + + def _is_EV4(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' + + def _is_EV5(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' + + def _is_EV56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' + + def _is_PCA56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' + + # Intel + + #XXX + _is_i386 = _not_impl + + def _is_Intel(self): + return self.info[0]['vendor_id']=='GenuineIntel' + + def _is_i486(self): + return self.info[0]['cpu']=='i486' + + def _is_i586(self): + return self.is_Intel() and self.info[0]['cpu family'] == '5' + + def _is_i686(self): + return self.is_Intel() and self.info[0]['cpu family'] == '6' + + def _is_Celeron(self): + return re.match(r'.*?Celeron', + self.info[0]['model name']) is not None + + def _is_Pentium(self): + return re.match(r'.*?Pentium', + self.info[0]['model name']) is not None + + def _is_PentiumII(self): + return re.match(r'.*?Pentium.*?II\b', + self.info[0]['model name']) is not None + + def _is_PentiumPro(self): + return re.match(r'.*?PentiumPro\b', + self.info[0]['model name']) is not None + + def _is_PentiumMMX(self): + return re.match(r'.*?Pentium.*?MMX\b', + self.info[0]['model name']) is not None + + def _is_PentiumIII(self): + return re.match(r'.*?Pentium.*?III\b', + self.info[0]['model name']) is not None + + def _is_PentiumIV(self): + return re.match(r'.*?Pentium.*?(IV|4)\b', + self.info[0]['model name']) is not None + + def _is_PentiumM(self): + return re.match(r'.*?Pentium.*?M\b', + self.info[0]['model name']) is not None + + def _is_Prescott(self): + return self.is_PentiumIV() and self.has_sse3() + + def _is_Nocona(self): + return (self.is_Intel() + and (self.info[0]['cpu family'] == '6' + or self.info[0]['cpu family'] == '15') + and (self.has_sse3() and not self.has_ssse3()) + and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) + + def _is_Core2(self): + return (self.is_64bit() and self.is_Intel() and + re.match(r'.*?Core\(TM\)2\b', + self.info[0]['model name']) is not None) + + def _is_Itanium(self): + return re.match(r'.*?Itanium\b', + self.info[0]['family']) is not None + + def _is_XEON(self): + return re.match(r'.*?XEON\b', + self.info[0]['model name'], re.IGNORECASE) is not None + + _is_Xeon = _is_XEON + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_fdiv_bug(self): + return self.info[0]['fdiv_bug']=='yes' + + def _has_f00f_bug(self): + return self.info[0]['f00f_bug']=='yes' + + def _has_mmx(self): + return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None + + def _has_sse(self): + return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None + + def _has_sse2(self): + return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None + + def _has_sse3(self): + return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None + + def _has_ssse3(self): + return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None + + def _has_3dnow(self): + return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None + + def _has_3dnowext(self): + return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None + +class IRIXCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = key_value_from_command('sysconf', sep=' ', + successful_status=(0, 1)) + self.__class__.info = info + + def _not_impl(self): pass + + def _is_singleCPU(self): + return self.info.get('NUM_PROCESSORS') == '1' + + def _getNCPUs(self): + return int(self.info.get('NUM_PROCESSORS', 1)) + + def __cputype(self, n): + return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) + def _is_r2000(self): return self.__cputype(2000) + def _is_r3000(self): return self.__cputype(3000) + def _is_r3900(self): return self.__cputype(3900) + def _is_r4000(self): return self.__cputype(4000) + def _is_r4100(self): return self.__cputype(4100) + def _is_r4300(self): return self.__cputype(4300) + def _is_r4400(self): return self.__cputype(4400) + def _is_r4600(self): return self.__cputype(4600) + def _is_r4650(self): return self.__cputype(4650) + def _is_r5000(self): return self.__cputype(5000) + def _is_r6000(self): return self.__cputype(6000) + def _is_r8000(self): return self.__cputype(8000) + def _is_r10000(self): return self.__cputype(10000) + def _is_r12000(self): return self.__cputype(12000) + def _is_rorion(self): return self.__cputype('orion') + + def get_ip(self): + try: return self.info.get('MACHINE') + except Exception: pass + def __machine(self, n): + return self.info.get('MACHINE').lower() == 'ip%s' % (n) + def _is_IP19(self): return self.__machine(19) + def _is_IP20(self): return self.__machine(20) + def _is_IP21(self): return self.__machine(21) + def _is_IP22(self): return self.__machine(22) + def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() + def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() + def _is_IP24(self): return self.__machine(24) + def _is_IP25(self): return self.__machine(25) + def _is_IP26(self): return self.__machine(26) + def _is_IP27(self): return self.__machine(27) + def _is_IP28(self): return self.__machine(28) + def _is_IP30(self): return self.__machine(30) + def _is_IP32(self): return self.__machine(32) + def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() + def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() + + +class DarwinCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + machine='machine') + info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') + self.__class__.info = info + + def _not_impl(self): pass + + def _getNCPUs(self): + return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) + + def _is_Power_Macintosh(self): + return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' + + def _is_i386(self): + return self.info['arch']=='i386' + def _is_ppc(self): + return self.info['arch']=='ppc' + + def __machine(self, n): + return self.info['machine'] == 'ppc%s'%n + def _is_ppc601(self): return self.__machine(601) + def _is_ppc602(self): return self.__machine(602) + def _is_ppc603(self): return self.__machine(603) + def _is_ppc603e(self): return self.__machine('603e') + def _is_ppc604(self): return self.__machine(604) + def _is_ppc604e(self): return self.__machine('604e') + def _is_ppc620(self): return self.__machine(620) + def _is_ppc630(self): return self.__machine(630) + def _is_ppc740(self): return self.__machine(740) + def _is_ppc7400(self): return self.__machine(7400) + def _is_ppc7450(self): return self.__machine(7450) + def _is_ppc750(self): return self.__machine(750) + def _is_ppc403(self): return self.__machine(403) + def _is_ppc505(self): return self.__machine(505) + def _is_ppc801(self): return self.__machine(801) + def _is_ppc821(self): return self.__machine(821) + def _is_ppc823(self): return self.__machine(823) + def _is_ppc860(self): return self.__machine(860) + + +class SunOSCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + mach='mach', + uname_i='uname_i', + isainfo_b='isainfo -b', + isainfo_n='isainfo -n', + ) + info['uname_X'] = key_value_from_command('uname -X', sep='=') + for line in command_by_line('psrinfo -v 0'): + m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) + if m: + info['processor'] = m.group('p') + break + self.__class__.info = info + + def _not_impl(self): pass + + def _is_i386(self): + return self.info['isainfo_n']=='i386' + def _is_sparc(self): + return self.info['isainfo_n']=='sparc' + def _is_sparcv9(self): + return self.info['isainfo_n']=='sparcv9' + + def _getNCPUs(self): + return int(self.info['uname_X'].get('NumCPU', 1)) + + def _is_sun4(self): + return self.info['arch']=='sun4' + + def _is_SUNW(self): + return re.match(r'SUNW', self.info['uname_i']) is not None + def _is_sparcstation5(self): + return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None + def _is_ultra1(self): + return re.match(r'.*Ultra-1', self.info['uname_i']) is not None + def _is_ultra250(self): + return re.match(r'.*Ultra-250', self.info['uname_i']) is not None + def _is_ultra2(self): + return re.match(r'.*Ultra-2', self.info['uname_i']) is not None + def _is_ultra30(self): + return re.match(r'.*Ultra-30', self.info['uname_i']) is not None + def _is_ultra4(self): + return re.match(r'.*Ultra-4', self.info['uname_i']) is not None + def _is_ultra5_10(self): + return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None + def _is_ultra5(self): + return re.match(r'.*Ultra-5', self.info['uname_i']) is not None + def _is_ultra60(self): + return re.match(r'.*Ultra-60', self.info['uname_i']) is not None + def _is_ultra80(self): + return re.match(r'.*Ultra-80', self.info['uname_i']) is not None + def _is_ultraenterprice(self): + return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None + def _is_ultraenterprice10k(self): + return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None + def _is_sunfire(self): + return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None + def _is_ultra(self): + return re.match(r'.*Ultra', self.info['uname_i']) is not None + + def _is_cpusparcv7(self): + return self.info['processor']=='sparcv7' + def _is_cpusparcv8(self): + return self.info['processor']=='sparcv8' + def _is_cpusparcv9(self): + return self.info['processor']=='sparcv9' + +class Win32CPUInfo(CPUInfoBase): + + info = None + pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" + # XXX: what does the value of + # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 + # mean? + + def __init__(self): + if self.info is not None: + return + info = [] + try: + #XXX: Bad style to use so long `try:...except:...`. Fix it! + import winreg + + prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" + r"\s+stepping\s+(?P\d+)", re.IGNORECASE) + chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) + pnum=0 + while True: + try: + proc=winreg.EnumKey(chnd, pnum) + except winreg.error: + break + else: + pnum+=1 + info.append({"Processor":proc}) + phnd=winreg.OpenKey(chnd, proc) + pidx=0 + while True: + try: + name, value, vtpe=winreg.EnumValue(phnd, pidx) + except winreg.error: + break + else: + pidx=pidx+1 + info[-1][name]=value + if name=="Identifier": + srch=prgx.search(value) + if srch: + info[-1]["Family"]=int(srch.group("FML")) + info[-1]["Model"]=int(srch.group("MDL")) + info[-1]["Stepping"]=int(srch.group("STP")) + except Exception as e: + print(e, '(ignoring)') + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['VendorIdentifier']=='AuthenticAMD' + + def _is_Am486(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_Am5x86(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_AMDK5(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [0, 1, 2, 3] + + def _is_AMDK6(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [6, 7] + + def _is_AMDK6_2(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==8 + + def _is_AMDK6_3(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==9 + + def _is_AMDK7(self): + return self.is_AMD() and self.info[0]['Family'] == 6 + + # To reliably distinguish between the different types of AMD64 chips + # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would + # require looking at the 'brand' from cpuid + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['Family'] == 15 + + # Intel + + def _is_Intel(self): + return self.info[0]['VendorIdentifier']=='GenuineIntel' + + def _is_i386(self): + return self.info[0]['Family']==3 + + def _is_i486(self): + return self.info[0]['Family']==4 + + def _is_i586(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_i686(self): + return self.is_Intel() and self.info[0]['Family']==6 + + def _is_Pentium(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_PentiumMMX(self): + return self.is_Intel() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==4 + + def _is_PentiumPro(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model']==1 + + def _is_PentiumII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [3, 5, 6] + + def _is_PentiumIII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [7, 8, 9, 10, 11] + + def _is_PentiumIV(self): + return self.is_Intel() and self.info[0]['Family']==15 + + def _is_PentiumM(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [9, 13, 14] + + def _is_Core2(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [15, 16, 17] + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_mmx(self): + if self.is_Intel(): + return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ + or (self.info[0]['Family'] in [6, 15]) + elif self.is_AMD(): + return self.info[0]['Family'] in [5, 6, 15] + else: + return False + + def _has_sse(self): + if self.is_Intel(): + return ((self.info[0]['Family']==6 and + self.info[0]['Model'] in [7, 8, 9, 10, 11]) + or self.info[0]['Family']==15) + elif self.is_AMD(): + return ((self.info[0]['Family']==6 and + self.info[0]['Model'] in [6, 7, 8, 10]) + or self.info[0]['Family']==15) + else: + return False + + def _has_sse2(self): + if self.is_Intel(): + return self.is_Pentium4() or self.is_PentiumM() \ + or self.is_Core2() + elif self.is_AMD(): + return self.is_AMD64() + else: + return False + + def _has_3dnow(self): + return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] + + def _has_3dnowext(self): + return self.is_AMD() and self.info[0]['Family'] in [6, 15] + +if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) + cpuinfo = LinuxCPUInfo +elif sys.platform.startswith('irix'): + cpuinfo = IRIXCPUInfo +elif sys.platform == 'darwin': + cpuinfo = DarwinCPUInfo +elif sys.platform.startswith('sunos'): + cpuinfo = SunOSCPUInfo +elif sys.platform.startswith('win32'): + cpuinfo = Win32CPUInfo +elif sys.platform.startswith('cygwin'): + cpuinfo = LinuxCPUInfo +#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. +else: + cpuinfo = CPUInfoBase + +cpu = cpuinfo() + +#if __name__ == "__main__": +# +# cpu.is_blaa() +# cpu.is_Intel() +# cpu.is_Alpha() +# +# print('CPU information:'), +# for name in dir(cpuinfo): +# if name[0]=='_' and name[1]!='_': +# r = getattr(cpu,name[1:])() +# if r: +# if r!=1: +# print('%s=%s' %(name[1:],r)) +# else: +# print(name[1:]), +# print() diff --git a/MLPY/Lib/site-packages/numpy/distutils/exec_command.py b/MLPY/Lib/site-packages/numpy/distutils/exec_command.py new file mode 100644 index 0000000000000000000000000000000000000000..b58e425208f1c1f5461b6d8a40f39e25534c1d62 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/exec_command.py @@ -0,0 +1,316 @@ +""" +exec_command + +Implements exec_command function that is (almost) equivalent to +commands.getstatusoutput function but on NT, DOS systems the +returned status is actually correct (though, the returned status +values may be different by a factor). In addition, exec_command +takes keyword arguments for (re-)defining environment variables. + +Provides functions: + + exec_command --- execute command in a specified directory and + in the modified environment. + find_executable --- locate a command using info from environment + variable PATH. Equivalent to posix `which` + command. + +Author: Pearu Peterson +Created: 11 January 2003 + +Requires: Python 2.x + +Successfully tested on: + +======== ============ ================================================= +os.name sys.platform comments +======== ============ ================================================= +posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 + PyCrust 0.9.3, Idle 1.0.2 +posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 +posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 +posix darwin Darwin 7.2.0, Python 2.3 +nt win32 Windows Me + Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 + Python 2.1.1 Idle 0.8 +nt win32 Windows 98, Python 2.1.1. Idle 0.8 +nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests + fail i.e. redefining environment variables may + not work. FIXED: don't use cygwin echo! + Comment: also `cmd /c echo` will not work + but redefining environment variables do work. +posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) +nt win32 Windows XP, Python 2.3.3 +======== ============ ================================================= + +Known bugs: + +* Tests, that send messages to stderr, fail when executed from MSYS prompt + because the messages are lost at some point. + +""" +__all__ = ['exec_command', 'find_executable'] + +import os +import sys +import subprocess +import locale +import warnings + +from numpy.distutils.misc_util import is_sequence, make_temp_file +from numpy.distutils import log + +def filepath_from_subprocess_output(output): + """ + Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. + + Inherited from `exec_command`, and possibly incorrect. + """ + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + output = output.decode(mylocale, errors='replace') + output = output.replace('\r\n', '\n') + # Another historical oddity + if output[-1:] == '\n': + output = output[:-1] + return output + + +def forward_bytes_to_stdout(val): + """ + Forward bytes from a subprocess call to the console, without attempting to + decode them. + + The assumption is that the subprocess call already returned bytes in + a suitable encoding. + """ + if hasattr(sys.stdout, 'buffer'): + # use the underlying binary output if there is one + sys.stdout.buffer.write(val) + elif hasattr(sys.stdout, 'encoding'): + # round-trip the encoding if necessary + sys.stdout.write(val.decode(sys.stdout.encoding)) + else: + # make a best-guess at the encoding + sys.stdout.write(val.decode('utf8', errors='replace')) + + +def temp_file_name(): + # 2019-01-30, 1.17 + warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' + 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) + fo, name = make_temp_file() + fo.close() + return name + +def get_pythonexe(): + pythonexe = sys.executable + if os.name in ['nt', 'dos']: + fdir, fn = os.path.split(pythonexe) + fn = fn.upper().replace('PYTHONW', 'PYTHON') + pythonexe = os.path.join(fdir, fn) + assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) + return pythonexe + +def find_executable(exe, path=None, _cache={}): + """Return full path of a executable or None. + + Symbolic links are not followed. + """ + key = exe, path + try: + return _cache[key] + except KeyError: + pass + log.debug('find_executable(%r)' % exe) + orig_exe = exe + + if path is None: + path = os.environ.get('PATH', os.defpath) + if os.name=='posix': + realpath = os.path.realpath + else: + realpath = lambda a:a + + if exe.startswith('"'): + exe = exe[1:-1] + + suffixes = [''] + if os.name in ['nt', 'dos', 'os2']: + fn, ext = os.path.splitext(exe) + extra_suffixes = ['.exe', '.com', '.bat'] + if ext.lower() not in extra_suffixes: + suffixes = extra_suffixes + + if os.path.isabs(exe): + paths = [''] + else: + paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] + + for path in paths: + fn = os.path.join(path, exe) + for s in suffixes: + f_ext = fn+s + if not os.path.islink(f_ext): + f_ext = realpath(f_ext) + if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): + log.info('Found executable %s' % f_ext) + _cache[key] = f_ext + return f_ext + + log.warn('Could not locate executable %s' % orig_exe) + return None + +############################################################ + +def _preserve_environment( names ): + log.debug('_preserve_environment(%r)' % (names)) + env = {name: os.environ.get(name) for name in names} + return env + +def _update_environment( **env ): + log.debug('_update_environment(...)') + for name, value in env.items(): + os.environ[name] = value or '' + +def exec_command(command, execute_in='', use_shell=None, use_tee=None, + _with_python = 1, **env ): + """ + Return (status,output) of executed command. + + .. deprecated:: 1.17 + Use subprocess.Popen instead + + Parameters + ---------- + command : str + A concatenated string of executable and arguments. + execute_in : str + Before running command ``cd execute_in`` and after ``cd -``. + use_shell : {bool, None}, optional + If True, execute ``sh -c command``. Default None (True) + use_tee : {bool, None}, optional + If True use tee. Default None (True) + + + Returns + ------- + res : str + Both stdout and stderr messages. + + Notes + ----- + On NT, DOS systems the returned status is correct for external commands. + Wild cards will not work for non-posix systems or when use_shell=0. + + """ + # 2019-01-30, 1.17 + warnings.warn('exec_command is deprecated since NumPy v1.17, use ' + 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) + log.debug('exec_command(%r,%s)' % (command, + ','.join(['%s=%r'%kv for kv in env.items()]))) + + if use_tee is None: + use_tee = os.name=='posix' + if use_shell is None: + use_shell = os.name=='posix' + execute_in = os.path.abspath(execute_in) + oldcwd = os.path.abspath(os.getcwd()) + + if __name__[-12:] == 'exec_command': + exec_dir = os.path.dirname(os.path.abspath(__file__)) + elif os.path.isfile('exec_command.py'): + exec_dir = os.path.abspath('.') + else: + exec_dir = os.path.abspath(sys.argv[0]) + if os.path.isfile(exec_dir): + exec_dir = os.path.dirname(exec_dir) + + if oldcwd!=execute_in: + os.chdir(execute_in) + log.debug('New cwd: %s' % execute_in) + else: + log.debug('Retaining cwd: %s' % oldcwd) + + oldenv = _preserve_environment( list(env.keys()) ) + _update_environment( **env ) + + try: + st = _exec_command(command, + use_shell=use_shell, + use_tee=use_tee, + **env) + finally: + if oldcwd!=execute_in: + os.chdir(oldcwd) + log.debug('Restored cwd to %s' % oldcwd) + _update_environment(**oldenv) + + return st + + +def _exec_command(command, use_shell=None, use_tee = None, **env): + """ + Internal workhorse for exec_command(). + """ + if use_shell is None: + use_shell = os.name=='posix' + if use_tee is None: + use_tee = os.name=='posix' + + if os.name == 'posix' and use_shell: + # On POSIX, subprocess always uses /bin/sh, override + sh = os.environ.get('SHELL', '/bin/sh') + if is_sequence(command): + command = [sh, '-c', ' '.join(command)] + else: + command = [sh, '-c', command] + use_shell = False + + elif os.name == 'nt' and is_sequence(command): + # On Windows, join the string for CreateProcess() ourselves as + # subprocess does it a bit differently + command = ' '.join(_quote_arg(arg) for arg in command) + + # Inherit environment by default + env = env or None + try: + # universal_newlines is set to False so that communicate() + # will return bytes. We need to decode the output ourselves + # so that Python will not raise a UnicodeDecodeError when + # it encounters an invalid character; rather, we simply replace it + proc = subprocess.Popen(command, shell=use_shell, env=env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=False) + except EnvironmentError: + # Return 127, as os.spawn*() and /bin/sh do + return 127, '' + + text, err = proc.communicate() + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + text = text.decode(mylocale, errors='replace') + text = text.replace('\r\n', '\n') + # Another historical oddity + if text[-1:] == '\n': + text = text[:-1] + + if use_tee and text: + print(text) + return proc.returncode, text + + +def _quote_arg(arg): + """ + Quote the argument for safe use in a shell command line. + """ + # If there is a quote in the string, assume relevants parts of the + # string are already quoted (e.g. '-I"C:\\Program Files\\..."') + if '"' not in arg and ' ' in arg: + return '"%s"' % arg + return arg + +############################################################ diff --git a/MLPY/Lib/site-packages/numpy/distutils/extension.py b/MLPY/Lib/site-packages/numpy/distutils/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..be2dd453009420a44efdb95f1ee5e6ecb3c87cc4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/extension.py @@ -0,0 +1,103 @@ +"""distutils.extension + +Provides the Extension class, used to describe C/C++ extension +modules in setup scripts. + +Overridden to support f2py. + +""" +import re +from distutils.extension import Extension as old_Extension + + +cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match + + +class Extension(old_Extension): + """ + Parameters + ---------- + name : str + Extension name. + sources : list of str + List of source file locations relative to the top directory of + the package. + extra_compile_args : list of str + Extra command line arguments to pass to the compiler. + extra_f77_compile_args : list of str + Extra command line arguments to pass to the fortran77 compiler. + extra_f90_compile_args : list of str + Extra command line arguments to pass to the fortran90 compiler. + """ + def __init__( + self, name, sources, + include_dirs=None, + define_macros=None, + undef_macros=None, + library_dirs=None, + libraries=None, + runtime_library_dirs=None, + extra_objects=None, + extra_compile_args=None, + extra_link_args=None, + export_symbols=None, + swig_opts=None, + depends=None, + language=None, + f2py_options=None, + module_dirs=None, + extra_f77_compile_args=None, + extra_f90_compile_args=None,): + + old_Extension.__init__( + self, name, [], + include_dirs=include_dirs, + define_macros=define_macros, + undef_macros=undef_macros, + library_dirs=library_dirs, + libraries=libraries, + runtime_library_dirs=runtime_library_dirs, + extra_objects=extra_objects, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + export_symbols=export_symbols) + + # Avoid assert statements checking that sources contains strings: + self.sources = sources + + # Python 2.4 distutils new features + self.swig_opts = swig_opts or [] + # swig_opts is assumed to be a list. Here we handle the case where it + # is specified as a string instead. + if isinstance(self.swig_opts, str): + import warnings + msg = "swig_opts is specified as a string instead of a list" + warnings.warn(msg, SyntaxWarning, stacklevel=2) + self.swig_opts = self.swig_opts.split() + + # Python 2.3 distutils new features + self.depends = depends or [] + self.language = language + + # numpy_distutils features + self.f2py_options = f2py_options or [] + self.module_dirs = module_dirs or [] + self.extra_f77_compile_args = extra_f77_compile_args or [] + self.extra_f90_compile_args = extra_f90_compile_args or [] + + return + + def has_cxx_sources(self): + for source in self.sources: + if cxx_ext_re(str(source)): + return True + return False + + def has_f2py_sources(self): + for source in self.sources: + if fortran_pyf_ext_re(source): + return True + return False + +# class Extension diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__init__.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..423496415ac59ed664b89b42cc0fdff8a1ecbe8a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__init__.py @@ -0,0 +1,1022 @@ +"""numpy.distutils.fcompiler + +Contains FCompiler, an abstract base class that defines the interface +for the numpy.distutils Fortran compiler abstraction model. + +Terminology: + +To be consistent, where the term 'executable' is used, it means the single +file, like 'gcc', that is executed, and should be a string. In contrast, +'command' means the entire command line, like ['gcc', '-c', 'file.c'], and +should be a list. + +But note that FCompiler.executables is actually a dictionary of commands. + +""" +__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', + 'dummy_fortran_file'] + +import os +import sys +import re + +from distutils.sysconfig import get_python_lib +from distutils.fancy_getopt import FancyGetopt +from distutils.errors import DistutilsModuleError, \ + DistutilsExecError, CompileError, LinkError, DistutilsPlatformError +from distutils.util import split_quoted, strtobool + +from numpy.distutils.ccompiler import CCompiler, gen_lib_options +from numpy.distutils import log +from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ + make_temp_file, get_shared_lib_extension +from numpy.distutils.exec_command import find_executable +from numpy.distutils import _shell_utils + +from .environment import EnvironmentConfig + +__metaclass__ = type + +class CompilerNotFound(Exception): + pass + +def flaglist(s): + if is_string(s): + return split_quoted(s) + else: + return s + +def str2bool(s): + if is_string(s): + return strtobool(s) + return bool(s) + +def is_sequence_of_strings(seq): + return is_sequence(seq) and all_strings(seq) + +class FCompiler(CCompiler): + """Abstract base class to define the interface that must be implemented + by real Fortran compiler classes. + + Methods that subclasses may redefine: + + update_executables(), find_executables(), get_version() + get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() + get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), + get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), + get_flags_arch_f90(), get_flags_debug_f90(), + get_flags_fix(), get_flags_linker_so() + + DON'T call these methods (except get_version) after + constructing a compiler instance or inside any other method. + All methods, except update_executables() and find_executables(), + may call the get_version() method. + + After constructing a compiler instance, always call customize(dist=None) + method that finalizes compiler construction and makes the following + attributes available: + compiler_f77 + compiler_f90 + compiler_fix + linker_so + archiver + ranlib + libraries + library_dirs + """ + + # These are the environment variables and distutils keys used. + # Each configuration description is + # (, , , , ) + # The hook names are handled by the self._environment_hook method. + # - names starting with 'self.' call methods in this class + # - names starting with 'exe.' return the key in the executables dict + # - names like 'flags.YYY' return self.get_flag_YYY() + # convert is either None or a function to convert a string to the + # appropriate type used. + + distutils_vars = EnvironmentConfig( + distutils_section='config_fc', + noopt = (None, None, 'noopt', str2bool, False), + noarch = (None, None, 'noarch', str2bool, False), + debug = (None, None, 'debug', str2bool, False), + verbose = (None, None, 'verbose', str2bool, False), + ) + + command_vars = EnvironmentConfig( + distutils_section='config_fc', + compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), + compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), + compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), + version_cmd = ('exe.version_cmd', None, None, None, False), + linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), + linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), + archiver = (None, 'AR', 'ar', None, False), + ranlib = (None, 'RANLIB', 'ranlib', None, False), + ) + + flag_vars = EnvironmentConfig( + distutils_section='config_fc', + f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), + f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), + free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), + fix = ('flags.fix', None, None, flaglist, False), + opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), + opt_f77 = ('flags.opt_f77', None, None, flaglist, False), + opt_f90 = ('flags.opt_f90', None, None, flaglist, False), + arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), + arch_f77 = ('flags.arch_f77', None, None, flaglist, False), + arch_f90 = ('flags.arch_f90', None, None, flaglist, False), + debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), + debug_f77 = ('flags.debug_f77', None, None, flaglist, False), + debug_f90 = ('flags.debug_f90', None, None, flaglist, False), + flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), + linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), + linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), + ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), + ) + + language_map = {'.f': 'f77', + '.for': 'f77', + '.F': 'f77', # XXX: needs preprocessor + '.ftn': 'f77', + '.f77': 'f77', + '.f90': 'f90', + '.F90': 'f90', # XXX: needs preprocessor + '.f95': 'f90', + } + language_order = ['f90', 'f77'] + + + # These will be set by the subclass + + compiler_type = None + compiler_aliases = () + version_pattern = None + + possible_executables = [] + executables = { + 'version_cmd': ["f77", "-v"], + 'compiler_f77': ["f77"], + 'compiler_f90': ["f90"], + 'compiler_fix': ["f90", "-fixed"], + 'linker_so': ["f90", "-shared"], + 'linker_exe': ["f90"], + 'archiver': ["ar", "-cr"], + 'ranlib': None, + } + + # If compiler does not support compiling Fortran 90 then it can + # suggest using another compiler. For example, gnu would suggest + # gnu95 compiler type when there are F90 sources. + suggested_f90_compiler = None + + compile_switch = "-c" + object_switch = "-o " # Ending space matters! It will be stripped + # but if it is missing then object_switch + # will be prefixed to object file name by + # string concatenation. + library_switch = "-o " # Ditto! + + # Switch to specify where module files are created and searched + # for USE statement. Normally it is a string and also here ending + # space matters. See above. + module_dir_switch = None + + # Switch to specify where module files are searched for USE statement. + module_include_switch = '-I' + + pic_flags = [] # Flags to create position-independent code + + src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] + obj_extension = ".o" + + shared_lib_extension = get_shared_lib_extension() + static_lib_extension = ".a" # or .lib + static_lib_format = "lib%s%s" # or %s%s + shared_lib_format = "%s%s" + exe_extension = "" + + _exe_cache = {} + + _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', + 'ranlib'] + + # This will be set by new_fcompiler when called in + # command/{build_ext.py, build_clib.py, config.py} files. + c_compiler = None + + # extra_{f77,f90}_compile_args are set by build_ext.build_extension method + extra_f77_compile_args = [] + extra_f90_compile_args = [] + + def __init__(self, *args, **kw): + CCompiler.__init__(self, *args, **kw) + self.distutils_vars = self.distutils_vars.clone(self._environment_hook) + self.command_vars = self.command_vars.clone(self._environment_hook) + self.flag_vars = self.flag_vars.clone(self._environment_hook) + self.executables = self.executables.copy() + for e in self._executable_keys: + if e not in self.executables: + self.executables[e] = None + + # Some methods depend on .customize() being called first, so + # this keeps track of whether that's happened yet. + self._is_customised = False + + def __copy__(self): + obj = self.__new__(self.__class__) + obj.__dict__.update(self.__dict__) + obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) + obj.command_vars = obj.command_vars.clone(obj._environment_hook) + obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) + obj.executables = obj.executables.copy() + return obj + + def copy(self): + return self.__copy__() + + # Use properties for the attributes used by CCompiler. Setting them + # as attributes from the self.executables dictionary is error-prone, + # so we get them from there each time. + def _command_property(key): + def fget(self): + assert self._is_customised + return self.executables[key] + return property(fget=fget) + version_cmd = _command_property('version_cmd') + compiler_f77 = _command_property('compiler_f77') + compiler_f90 = _command_property('compiler_f90') + compiler_fix = _command_property('compiler_fix') + linker_so = _command_property('linker_so') + linker_exe = _command_property('linker_exe') + archiver = _command_property('archiver') + ranlib = _command_property('ranlib') + + # Make our terminology consistent. + def set_executable(self, key, value): + self.set_command(key, value) + + def set_commands(self, **kw): + for k, v in kw.items(): + self.set_command(k, v) + + def set_command(self, key, value): + if not key in self._executable_keys: + raise ValueError( + "unknown executable '%s' for class %s" % + (key, self.__class__.__name__)) + if is_string(value): + value = split_quoted(value) + assert value is None or is_sequence_of_strings(value[1:]), (key, value) + self.executables[key] = value + + ###################################################################### + ## Methods that subclasses may redefine. But don't call these methods! + ## They are private to FCompiler class and may return unexpected + ## results if used elsewhere. So, you have been warned.. + + def find_executables(self): + """Go through the self.executables dictionary, and attempt to + find and assign appropriate executables. + + Executable names are looked for in the environment (environment + variables, the distutils.cfg, and command line), the 0th-element of + the command list, and the self.possible_executables list. + + Also, if the 0th element is "" or "", the Fortran 77 + or the Fortran 90 compiler executable is used, unless overridden + by an environment setting. + + Subclasses should call this if overridden. + """ + assert self._is_customised + exe_cache = self._exe_cache + def cached_find_executable(exe): + if exe in exe_cache: + return exe_cache[exe] + fc_exe = find_executable(exe) + exe_cache[exe] = exe_cache[fc_exe] = fc_exe + return fc_exe + def verify_command_form(name, value): + if value is not None and not is_sequence_of_strings(value): + raise ValueError( + "%s value %r is invalid in class %s" % + (name, value, self.__class__.__name__)) + def set_exe(exe_key, f77=None, f90=None): + cmd = self.executables.get(exe_key, None) + if not cmd: + return None + # Note that we get cmd[0] here if the environment doesn't + # have anything set + exe_from_environ = getattr(self.command_vars, exe_key) + if not exe_from_environ: + possibles = [f90, f77] + self.possible_executables + else: + possibles = [exe_from_environ] + self.possible_executables + + seen = set() + unique_possibles = [] + for e in possibles: + if e == '': + e = f77 + elif e == '': + e = f90 + if not e or e in seen: + continue + seen.add(e) + unique_possibles.append(e) + + for exe in unique_possibles: + fc_exe = cached_find_executable(exe) + if fc_exe: + cmd[0] = fc_exe + return fc_exe + self.set_command(exe_key, None) + return None + + ctype = self.compiler_type + f90 = set_exe('compiler_f90') + if not f90: + f77 = set_exe('compiler_f77') + if f77: + log.warn('%s: no Fortran 90 compiler found' % ctype) + else: + raise CompilerNotFound('%s: f90 nor f77' % ctype) + else: + f77 = set_exe('compiler_f77', f90=f90) + if not f77: + log.warn('%s: no Fortran 77 compiler found' % ctype) + set_exe('compiler_fix', f90=f90) + + set_exe('linker_so', f77=f77, f90=f90) + set_exe('linker_exe', f77=f77, f90=f90) + set_exe('version_cmd', f77=f77, f90=f90) + set_exe('archiver') + set_exe('ranlib') + + def update_executables(self): + """Called at the beginning of customisation. Subclasses should + override this if they need to set up the executables dictionary. + + Note that self.find_executables() is run afterwards, so the + self.executables dictionary values can contain or as + the command, which will be replaced by the found F77 or F90 + compiler. + """ + pass + + def get_flags(self): + """List of flags common to all compiler types.""" + return [] + self.pic_flags + + def _get_command_flags(self, key): + cmd = self.executables.get(key, None) + if cmd is None: + return [] + return cmd[1:] + + def get_flags_f77(self): + """List of Fortran 77 specific flags.""" + return self._get_command_flags('compiler_f77') + def get_flags_f90(self): + """List of Fortran 90 specific flags.""" + return self._get_command_flags('compiler_f90') + def get_flags_free(self): + """List of Fortran 90 free format specific flags.""" + return [] + def get_flags_fix(self): + """List of Fortran 90 fixed format specific flags.""" + return self._get_command_flags('compiler_fix') + def get_flags_linker_so(self): + """List of linker flags to build a shared library.""" + return self._get_command_flags('linker_so') + def get_flags_linker_exe(self): + """List of linker flags to build an executable.""" + return self._get_command_flags('linker_exe') + def get_flags_ar(self): + """List of archiver flags. """ + return self._get_command_flags('archiver') + def get_flags_opt(self): + """List of architecture independent compiler flags.""" + return [] + def get_flags_arch(self): + """List of architecture dependent compiler flags.""" + return [] + def get_flags_debug(self): + """List of compiler flags to compile with debugging information.""" + return [] + + get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt + get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch + get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug + + def get_libraries(self): + """List of compiler libraries.""" + return self.libraries[:] + def get_library_dirs(self): + """List of compiler library directories.""" + return self.library_dirs[:] + + def get_version(self, force=False, ok_status=[0]): + assert self._is_customised + version = CCompiler.get_version(self, force=force, ok_status=ok_status) + if version is None: + raise CompilerNotFound() + return version + + + ############################################################ + + ## Public methods: + + def customize(self, dist = None): + """Customize Fortran compiler. + + This method gets Fortran compiler specific information from + (i) class definition, (ii) environment, (iii) distutils config + files, and (iv) command line (later overrides earlier). + + This method should be always called after constructing a + compiler instance. But not in __init__ because Distribution + instance is needed for (iii) and (iv). + """ + log.info('customize %s' % (self.__class__.__name__)) + + self._is_customised = True + + self.distutils_vars.use_distribution(dist) + self.command_vars.use_distribution(dist) + self.flag_vars.use_distribution(dist) + + self.update_executables() + + # find_executables takes care of setting the compiler commands, + # version_cmd, linker_so, linker_exe, ar, and ranlib + self.find_executables() + + noopt = self.distutils_vars.get('noopt', False) + noarch = self.distutils_vars.get('noarch', noopt) + debug = self.distutils_vars.get('debug', False) + + f77 = self.command_vars.compiler_f77 + f90 = self.command_vars.compiler_f90 + + f77flags = [] + f90flags = [] + freeflags = [] + fixflags = [] + + if f77: + f77 = _shell_utils.NativeParser.split(f77) + f77flags = self.flag_vars.f77 + if f90: + f90 = _shell_utils.NativeParser.split(f90) + f90flags = self.flag_vars.f90 + freeflags = self.flag_vars.free + # XXX Assuming that free format is default for f90 compiler. + fix = self.command_vars.compiler_fix + # NOTE: this and similar examples are probably just + # excluding --coverage flag when F90 = gfortran --coverage + # instead of putting that flag somewhere more appropriate + # this and similar examples where a Fortran compiler + # environment variable has been customized by CI or a user + # should perhaps eventually be more thoroughly tested and more + # robustly handled + if fix: + fix = _shell_utils.NativeParser.split(fix) + fixflags = self.flag_vars.fix + f90flags + + oflags, aflags, dflags = [], [], [] + # examine get_flags__ for extra flags + # only add them if the method is different from get_flags_ + def get_flags(tag, flags): + # note that self.flag_vars. calls self.get_flags_() + flags.extend(getattr(self.flag_vars, tag)) + this_get = getattr(self, 'get_flags_' + tag) + for name, c, flagvar in [('f77', f77, f77flags), + ('f90', f90, f90flags), + ('f90', fix, fixflags)]: + t = '%s_%s' % (tag, name) + if c and this_get is not getattr(self, 'get_flags_' + t): + flagvar.extend(getattr(self.flag_vars, t)) + if not noopt: + get_flags('opt', oflags) + if not noarch: + get_flags('arch', aflags) + if debug: + get_flags('debug', dflags) + + fflags = self.flag_vars.flags + dflags + oflags + aflags + + if f77: + self.set_commands(compiler_f77=f77+f77flags+fflags) + if f90: + self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) + if fix: + self.set_commands(compiler_fix=fix+fixflags+fflags) + + + #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS + linker_so = self.linker_so + if linker_so: + linker_so_flags = self.flag_vars.linker_so + if sys.platform.startswith('aix'): + python_lib = get_python_lib(standard_lib=1) + ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') + python_exp = os.path.join(python_lib, 'config', 'python.exp') + linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] + self.set_commands(linker_so=linker_so+linker_so_flags) + + linker_exe = self.linker_exe + if linker_exe: + linker_exe_flags = self.flag_vars.linker_exe + self.set_commands(linker_exe=linker_exe+linker_exe_flags) + + ar = self.command_vars.archiver + if ar: + arflags = self.flag_vars.ar + self.set_commands(archiver=[ar]+arflags) + + self.set_library_dirs(self.get_library_dirs()) + self.set_libraries(self.get_libraries()) + + def dump_properties(self): + """Print out the attributes of a compiler instance.""" + props = [] + for key in list(self.executables.keys()) + \ + ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch']: + if hasattr(self, key): + v = getattr(self, key) + props.append((key, None, '= '+repr(v))) + props.sort() + + pretty_printer = FancyGetopt(props) + for l in pretty_printer.generate_help("%s instance properties:" \ + % (self.__class__.__name__)): + if l[:4]==' --': + l = ' ' + l[4:] + print(l) + + ################### + + def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile 'src' to product 'obj'.""" + src_flags = {} + if is_f_file(src) and not has_f90_header(src): + flavor = ':f77' + compiler = self.compiler_f77 + src_flags = get_f77flags(src) + extra_compile_args = self.extra_f77_compile_args or [] + elif is_free_format(src): + flavor = ':f90' + compiler = self.compiler_f90 + if compiler is None: + raise DistutilsExecError('f90 not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + else: + flavor = ':fix' + compiler = self.compiler_fix + if compiler is None: + raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + if self.object_switch[-1]==' ': + o_args = [self.object_switch.strip(), obj] + else: + o_args = [self.object_switch.strip()+obj] + + assert self.compile_switch.strip() + s_args = [self.compile_switch, src] + + if extra_compile_args: + log.info('extra %s options: %r' \ + % (flavor[1:], ' '.join(extra_compile_args))) + + extra_flags = src_flags.get(self.compiler_type, []) + if extra_flags: + log.info('using compile options from source: %r' \ + % ' '.join(extra_flags)) + + command = compiler + cc_args + extra_flags + s_args + o_args \ + + extra_postargs + extra_compile_args + + display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, + src) + try: + self.spawn(command, display=display) + except DistutilsExecError as e: + msg = str(e) + raise CompileError(msg) from None + + def module_options(self, module_dirs, module_build_dir): + options = [] + if self.module_dir_switch is not None: + if self.module_dir_switch[-1]==' ': + options.extend([self.module_dir_switch.strip(), module_build_dir]) + else: + options.append(self.module_dir_switch.strip()+module_build_dir) + else: + print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) + print('XXX: Fix module_dir_switch for ', self.__class__.__name__) + if self.module_include_switch is not None: + for d in [module_build_dir]+module_dirs: + options.append('%s%s' % (self.module_include_switch, d)) + else: + print('XXX: module_dirs=%r option ignored' % (module_dirs)) + print('XXX: Fix module_include_switch for ', self.__class__.__name__) + return options + + def library_option(self, lib): + return "-l" + lib + def library_dir_option(self, dir): + return "-L" + dir + + def link(self, target_desc, objects, + output_filename, output_dir=None, libraries=None, + library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None): + objects, output_dir = self._fix_object_args(objects, output_dir) + libraries, library_dirs, runtime_library_dirs = \ + self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) + + lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, + libraries) + if is_string(output_dir): + output_filename = os.path.join(output_dir, output_filename) + elif output_dir is not None: + raise TypeError("'output_dir' must be a string or None") + + if self._need_link(objects, output_filename): + if self.library_switch[-1]==' ': + o_args = [self.library_switch.strip(), output_filename] + else: + o_args = [self.library_switch.strip()+output_filename] + + if is_string(self.objects): + ld_args = objects + [self.objects] + else: + ld_args = objects + self.objects + ld_args = ld_args + lib_opts + o_args + if debug: + ld_args[:0] = ['-g'] + if extra_preargs: + ld_args[:0] = extra_preargs + if extra_postargs: + ld_args.extend(extra_postargs) + self.mkpath(os.path.dirname(output_filename)) + if target_desc == CCompiler.EXECUTABLE: + linker = self.linker_exe[:] + else: + linker = self.linker_so[:] + command = linker + ld_args + try: + self.spawn(command) + except DistutilsExecError as e: + msg = str(e) + raise LinkError(msg) from None + else: + log.debug("skipping %s (up-to-date)", output_filename) + + def _environment_hook(self, name, hook_name): + if hook_name is None: + return None + if is_string(hook_name): + if hook_name.startswith('self.'): + hook_name = hook_name[5:] + hook = getattr(self, hook_name) + return hook() + elif hook_name.startswith('exe.'): + hook_name = hook_name[4:] + var = self.executables[hook_name] + if var: + return var[0] + else: + return None + elif hook_name.startswith('flags.'): + hook_name = hook_name[6:] + hook = getattr(self, 'get_flags_' + hook_name) + return hook() + else: + return hook_name() + + def can_ccompiler_link(self, ccompiler): + """ + Check if the given C compiler can link objects produced by + this compiler. + """ + return True + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + + Parameters + ---------- + objects : list + List of object files to include. + output_dir : str + Output directory to place generated object files. + extra_dll_dir : str + Output directory to place extra DLL files that need to be + included on Windows. + + Returns + ------- + converted_objects : list of str + List of converted object files. + Note that the number of output files is not necessarily + the same as inputs. + + """ + raise NotImplementedError() + + ## class FCompiler + +_default_compilers = ( + # sys.platform mappings + ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', + 'intelvem', 'intelem', 'flang')), + ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), + ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', 'vast', 'compaq', + 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor', 'fujitsu')), + ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), + ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), + ('irix.*', ('mips', 'gnu', 'gnu95',)), + ('aix.*', ('ibm', 'gnu', 'gnu95',)), + # os.name mappings + ('posix', ('gnu', 'gnu95',)), + ('nt', ('gnu', 'gnu95',)), + ('mac', ('gnu95', 'gnu', 'pg')), + ) + +fcompiler_class = None +fcompiler_aliases = None + +def load_all_fcompiler_classes(): + """Cache all the FCompiler classes found in modules in the + numpy.distutils.fcompiler package. + """ + from glob import glob + global fcompiler_class, fcompiler_aliases + if fcompiler_class is not None: + return + pys = os.path.join(os.path.dirname(__file__), '*.py') + fcompiler_class = {} + fcompiler_aliases = {} + for fname in glob(pys): + module_name, ext = os.path.splitext(os.path.basename(fname)) + module_name = 'numpy.distutils.fcompiler.' + module_name + __import__ (module_name) + module = sys.modules[module_name] + if hasattr(module, 'compilers'): + for cname in module.compilers: + klass = getattr(module, cname) + desc = (klass.compiler_type, klass, klass.description) + fcompiler_class[klass.compiler_type] = desc + for alias in klass.compiler_aliases: + if alias in fcompiler_aliases: + raise ValueError("alias %r defined for both %s and %s" + % (alias, klass.__name__, + fcompiler_aliases[alias][1].__name__)) + fcompiler_aliases[alias] = desc + +def _find_existing_fcompiler(compiler_types, + osname=None, platform=None, + requiref90=False, + c_compiler=None): + from numpy.distutils.core import get_distribution + dist = get_distribution(always=True) + for compiler_type in compiler_types: + v = None + try: + c = new_fcompiler(plat=platform, compiler=compiler_type, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if requiref90 and c.compiler_f90 is None: + v = None + new_compiler = c.suggested_f90_compiler + if new_compiler: + log.warn('Trying %r compiler as suggested by %r ' + 'compiler for f90 support.' % (compiler_type, + new_compiler)) + c = new_fcompiler(plat=platform, compiler=new_compiler, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if v is not None: + compiler_type = new_compiler + if requiref90 and c.compiler_f90 is None: + raise ValueError('%s does not support compiling f90 codes, ' + 'skipping.' % (c.__class__.__name__)) + except DistutilsModuleError: + log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) + except CompilerNotFound: + log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) + if v is not None: + return compiler_type + return None + +def available_fcompilers_for_platform(osname=None, platform=None): + if osname is None: + osname = os.name + if platform is None: + platform = sys.platform + matching_compiler_types = [] + for pattern, compiler_type in _default_compilers: + if re.match(pattern, platform) or re.match(pattern, osname): + for ct in compiler_type: + if ct not in matching_compiler_types: + matching_compiler_types.append(ct) + if not matching_compiler_types: + matching_compiler_types.append('gnu') + return matching_compiler_types + +def get_default_fcompiler(osname=None, platform=None, requiref90=False, + c_compiler=None): + """Determine the default Fortran compiler to use for the given + platform.""" + matching_compiler_types = available_fcompilers_for_platform(osname, + platform) + log.info("get_default_fcompiler: matching types: '%s'", + matching_compiler_types) + compiler_type = _find_existing_fcompiler(matching_compiler_types, + osname=osname, + platform=platform, + requiref90=requiref90, + c_compiler=c_compiler) + return compiler_type + +# Flag to avoid rechecking for Fortran compiler every time +failed_fcompilers = set() + +def new_fcompiler(plat=None, + compiler=None, + verbose=0, + dry_run=0, + force=0, + requiref90=False, + c_compiler = None): + """Generate an instance of some FCompiler subclass for the supplied + platform/compiler combination. + """ + global failed_fcompilers + fcompiler_key = (plat, compiler) + if fcompiler_key in failed_fcompilers: + return None + + load_all_fcompiler_classes() + if plat is None: + plat = os.name + if compiler is None: + compiler = get_default_fcompiler(plat, requiref90=requiref90, + c_compiler=c_compiler) + if compiler in fcompiler_class: + module_name, klass, long_description = fcompiler_class[compiler] + elif compiler in fcompiler_aliases: + module_name, klass, long_description = fcompiler_aliases[compiler] + else: + msg = "don't know how to compile Fortran code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler." % compiler + msg = msg + " Supported compilers are: %s)" \ + % (','.join(fcompiler_class.keys())) + log.warn(msg) + failed_fcompilers.add(fcompiler_key) + return None + + compiler = klass(verbose=verbose, dry_run=dry_run, force=force) + compiler.c_compiler = c_compiler + return compiler + +def show_fcompilers(dist=None): + """Print list of available compilers (used by the "--help-fcompiler" + option to "config_fc"). + """ + if dist is None: + from distutils.dist import Distribution + from numpy.distutils.command.config_compiler import config_fc + dist = Distribution() + dist.script_name = os.path.basename(sys.argv[0]) + dist.script_args = ['config_fc'] + sys.argv[1:] + try: + dist.script_args.remove('--help-fcompiler') + except ValueError: + pass + dist.cmdclass['config_fc'] = config_fc + dist.parse_config_files() + dist.parse_command_line() + compilers = [] + compilers_na = [] + compilers_ni = [] + if not fcompiler_class: + load_all_fcompiler_classes() + platform_compilers = available_fcompilers_for_platform() + for compiler in platform_compilers: + v = None + log.set_verbosity(-2) + try: + c = new_fcompiler(compiler=compiler, verbose=dist.verbose) + c.customize(dist) + v = c.get_version() + except (DistutilsModuleError, CompilerNotFound) as e: + log.debug("show_fcompilers: %s not found" % (compiler,)) + log.debug(repr(e)) + + if v is None: + compilers_na.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2])) + else: + c.dump_properties() + compilers.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2] + ' (%s)' % v)) + + compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) + compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) + for fc in compilers_ni] + + compilers.sort() + compilers_na.sort() + compilers_ni.sort() + pretty_printer = FancyGetopt(compilers) + pretty_printer.print_help("Fortran compilers found:") + pretty_printer = FancyGetopt(compilers_na) + pretty_printer.print_help("Compilers available for this " + "platform, but not found:") + if compilers_ni: + pretty_printer = FancyGetopt(compilers_ni) + pretty_printer.print_help("Compilers not available on this platform:") + print("For compiler details, run 'config_fc --verbose' setup command.") + + +def dummy_fortran_file(): + fo, name = make_temp_file(suffix='.f') + fo.write(" subroutine dummy()\n end\n") + fo.close() + return name[:-2] + + +is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search +_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match + +def is_free_format(file): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = 0 + with open(file, encoding='latin1') as f: + line = f.readline() + n = 10000 # the number of non-comment lines to scan for hints + if _has_f_header(line) or _has_fix_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = 1 + while n>0 and line: + line = line.rstrip() + if line and line[0]!='!': + n -= 1 + if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': + result = 1 + break + line = f.readline() + return result + +def has_f90_header(src): + with open(src, encoding='latin1') as f: + line = f.readline() + return _has_f90_header(line) or _has_fix_header(line) + +_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) +def get_f77flags(src): + """ + Search the first 20 lines of fortran 77 code for line pattern + `CF77FLAGS()=` + Return a dictionary {:}. + """ + flags = {} + with open(src, encoding='latin1') as f: + i = 0 + for line in f: + i += 1 + if i>20: break + m = _f77flags_re.match(line) + if not m: continue + fcname = m.group('fcname').strip() + fflags = m.group('fflags').strip() + flags[fcname] = split_quoted(fflags) + return flags + +# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags + +if __name__ == '__main__': + show_fcompilers() diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53e57e4eb125261539631d6e486b27f3c0c32a69 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6df618bb5ab86e3ce05b0f3d69a82519c967173c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80624ebaaf2750651bf5d5f7fb89d50b8c3779c6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7619c9d04f2b77815ed31a3fe1ba4f5900e45f36 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5abf879ecfe4870d166d166ee95020602fd090e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/g95.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/g95.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0d58ff022a7889011b04b8419640f07ae8a23b9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/g95.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/gnu.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/gnu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b8cb16e6df08e96a53032e13220a3edd7da16a3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/gnu.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/hpux.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/hpux.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dde4b3661a80751896c9bae2952114fd440c3e16 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/hpux.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24fda4d012297fc9540a9aa03810faf01eaf6745 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/intel.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/intel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..660d05e14dfad8841ee048199cbe47c9f4bcc320 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/intel.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76207c27520e0623012f138acb498d09259d8621 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08ac5d68be6d3930f879911c4628f05727874175 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nag.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nag.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0579d551dbcb249a5b77c74c6f4ff6ef6b0d3e4e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nag.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/none.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/none.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc132faded4a38a110e9645b9dba102813af49b5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/none.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86a3b6692deca77b2d9b96dbef2e58f5aeabe871 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..815f529ef4185e94124bc78bf3f21d23935edaa6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2d7511dc2bb3376415b30372ff2736f3342dd73 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/sun.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/sun.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..819d08e3b78ba703cc9e8d4b8207592fe7452cfe Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/sun.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/vast.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/vast.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98ae78adb90199eca6e733f5f7b8a94ced224096 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/__pycache__/vast.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/absoft.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/absoft.py new file mode 100644 index 0000000000000000000000000000000000000000..082cda2de414f35138c00293b365274f3e0027b3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/absoft.py @@ -0,0 +1,156 @@ + +# http://www.absoft.com/literature/osxuserguide.pdf +# http://www.absoft.com/documentation.html + +# Notes: +# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py +# generated extension modules (works for f2py v2.45.241_1936 and up) +import os + +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from numpy.distutils.misc_util import cyg2win32 + +compilers = ['AbsoftFCompiler'] + +class AbsoftFCompiler(FCompiler): + + compiler_type = 'absoft' + description = 'Absoft Corp Fortran Compiler' + #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' + version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ + r' (?P[^\s*,]*)(.*?Absoft Corp|)' + + # on windows: f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 + + # samt5735(8)$ f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 + # Note that fink installs g77 as f77, so need to use f90 for detection. + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : ["f77"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + if os.name=='nt': + library_switch = '/out:' #No space after /out:! + + module_dir_switch = None + module_include_switch = '-p' + + def update_executables(self): + f = cyg2win32(dummy_fortran_file()) + self.executables['version_cmd'] = ['', '-V', '-c', + f+'.f', '-o', f+'.o'] + + def get_flags_linker_so(self): + if os.name=='nt': + opt = ['/dll'] + # The "-K shared" switches are being left in for pre-9.0 versions + # of Absoft though I don't think versions earlier than 9 can + # actually be used to build shared libraries. In fact, version + # 8 of Absoft doesn't recognize "-K shared" and will fail. + elif self.get_version() >= '9.0': + opt = ['-shared'] + else: + opt = ["-K", "shared"] + return opt + + def library_dir_option(self, dir): + if os.name=='nt': + return ['-link', '/PATH:%s' % (dir)] + return "-L" + dir + + def library_option(self, lib): + if os.name=='nt': + return '%s.lib' % (lib) + return "-l" + lib + + def get_library_dirs(self): + opt = FCompiler.get_library_dirs(self) + d = os.environ.get('ABSOFT') + if d: + if self.get_version() >= '10.0': + # use shared libraries, the static libraries were not compiled -fPIC + prefix = 'sh' + else: + prefix = '' + if cpu.is_64bit(): + suffix = '64' + else: + suffix = '' + opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) + return opt + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + if self.get_version() >= '11.0': + opt.extend(['af90math', 'afio', 'af77math', 'amisc']) + elif self.get_version() >= '10.0': + opt.extend(['af90math', 'afio', 'af77math', 'U77']) + elif self.get_version() >= '8.0': + opt.extend(['f90math', 'fio', 'f77math', 'U77']) + else: + opt.extend(['fio', 'f90math', 'fmath', 'U77']) + if os.name =='nt': + opt.append('COMDLG32') + return opt + + def get_flags(self): + opt = FCompiler.get_flags(self) + if os.name != 'nt': + opt.extend(['-s']) + if self.get_version(): + if self.get_version()>='8.2': + opt.append('-fpic') + return opt + + def get_flags_f77(self): + opt = FCompiler.get_flags_f77(self) + opt.extend(['-N22', '-N90', '-N110']) + v = self.get_version() + if os.name == 'nt': + if v and v>='8.0': + opt.extend(['-f', '-N15']) + else: + opt.append('-f') + if v: + if v<='4.6': + opt.append('-B108') + else: + # Though -N15 is undocumented, it works with + # Absoft 8.0 on Linux + opt.append('-N15') + return opt + + def get_flags_f90(self): + opt = FCompiler.get_flags_f90(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + if self.get_version(): + if self.get_version()>'4.6': + opt.extend(["-YDEALLOC=ALL"]) + return opt + + def get_flags_fix(self): + opt = FCompiler.get_flags_fix(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + opt.extend(["-f", "fixed"]) + return opt + + def get_flags_opt(self): + opt = ['-O'] + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/compaq.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/compaq.py new file mode 100644 index 0000000000000000000000000000000000000000..d381bb90f3f52521a5bf8b45f8dc8793b2ffa0d8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/compaq.py @@ -0,0 +1,120 @@ + +#http://www.compaq.com/fortran/docs/ +import os +import sys + +from numpy.distutils.fcompiler import FCompiler +from distutils.errors import DistutilsPlatformError + +compilers = ['CompaqFCompiler'] +if os.name != 'posix' or sys.platform[:6] == 'cygwin' : + # Otherwise we'd get a false positive on posix systems with + # case-insensitive filesystems (like darwin), because we'll pick + # up /bin/df + compilers.append('CompaqVisualFCompiler') + +class CompaqFCompiler(FCompiler): + + compiler_type = 'compaq' + description = 'Compaq Fortran Compiler' + version_pattern = r'Compaq Fortran (?P[^\s]*).*' + + if sys.platform[:5]=='linux': + fc_exe = 'fort' + else: + fc_exe = 'f90' + + executables = { + 'version_cmd' : ['', "-version"], + 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], + 'compiler_fix' : [fc_exe, "-fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = '-module ' # not tested + module_include_switch = '-I' + + def get_flags(self): + return ['-assume no2underscore', '-nomixed_str_len_arg'] + def get_flags_debug(self): + return ['-g', '-check bounds'] + def get_flags_opt(self): + return ['-O4', '-align dcommons', '-assume bigarrays', + '-assume nozsize', '-math_library fast'] + def get_flags_arch(self): + return ['-arch host', '-tune host'] + def get_flags_linker_so(self): + if sys.platform[:5]=='linux': + return ['-shared'] + return ['-shared', '-Wl,-expect_unresolved,*'] + +class CompaqVisualFCompiler(FCompiler): + + compiler_type = 'compaqv' + description = 'DIGITAL or Compaq Visual Fortran Compiler' + version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' + r' Version (?P[^\s]*).*') + + compile_switch = '/compile_only' + object_switch = '/object:' + library_switch = '/OUT:' #No space after /OUT:! + + static_lib_extension = ".lib" + static_lib_format = "%s%s" + module_dir_switch = '/module:' + module_include_switch = '/I' + + ar_exe = 'lib.exe' + fc_exe = 'DF' + + if sys.platform=='win32': + from numpy.distutils.msvccompiler import MSVCCompiler + + try: + m = MSVCCompiler() + m.initialize() + ar_exe = m.lib + except DistutilsPlatformError: + pass + except AttributeError as e: + if '_MSVCCompiler__root' in str(e): + print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) + else: + raise + except IOError as e: + if not "vcvarsall.bat" in str(e): + print("Unexpected IOError in", __file__) + raise + except ValueError as e: + if not "'path'" in str(e): + print("Unexpected ValueError in", __file__) + raise + + executables = { + 'version_cmd' : ['', "/what"], + 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], + 'compiler_fix' : [fc_exe, "/fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : [ar_exe, "/OUT:"], + 'ranlib' : None + } + + def get_flags(self): + return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', + '/names:lowercase', '/assume:underscore'] + def get_flags_opt(self): + return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] + def get_flags_arch(self): + return ['/threads'] + def get_flags_debug(self): + return ['/debug'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/environment.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..defa2db226d51ecce028ce286e5dbc2bd1bab2a4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/environment.py @@ -0,0 +1,88 @@ +import os +from distutils.dist import Distribution + +__metaclass__ = type + +class EnvironmentConfig: + def __init__(self, distutils_section='ALL', **kw): + self._distutils_section = distutils_section + self._conf_keys = kw + self._conf = None + self._hook_handler = None + + def dump_variable(self, name): + conf_desc = self._conf_keys[name] + hook, envvar, confvar, convert, append = conf_desc + if not convert: + convert = lambda x : x + print('%s.%s:' % (self._distutils_section, name)) + v = self._hook_handler(name, hook) + print(' hook : %s' % (convert(v),)) + if envvar: + v = os.environ.get(envvar, None) + print(' environ: %s' % (convert(v),)) + if confvar and self._conf: + v = self._conf.get(confvar, (None, None))[1] + print(' config : %s' % (convert(v),)) + + def dump_variables(self): + for name in self._conf_keys: + self.dump_variable(name) + + def __getattr__(self, name): + try: + conf_desc = self._conf_keys[name] + except KeyError: + raise AttributeError( + f"'EnvironmentConfig' object has no attribute '{name}'" + ) from None + + return self._get_var(name, conf_desc) + + def get(self, name, default=None): + try: + conf_desc = self._conf_keys[name] + except KeyError: + return default + var = self._get_var(name, conf_desc) + if var is None: + var = default + return var + + def _get_var(self, name, conf_desc): + hook, envvar, confvar, convert, append = conf_desc + if convert is None: + convert = lambda x: x + var = self._hook_handler(name, hook) + if envvar is not None: + envvar_contents = os.environ.get(envvar) + if envvar_contents is not None: + envvar_contents = convert(envvar_contents) + if var and append: + if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': + var.extend(envvar_contents) + else: + # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 + # to keep old (overwrite flags rather than append to + # them) behavior + var = envvar_contents + else: + var = envvar_contents + if confvar is not None and self._conf: + if confvar in self._conf: + source, confvar_contents = self._conf[confvar] + var = convert(confvar_contents) + return var + + + def clone(self, hook_handler): + ec = self.__class__(distutils_section=self._distutils_section, + **self._conf_keys) + ec._hook_handler = hook_handler + return ec + + def use_distribution(self, dist): + if isinstance(dist, Distribution): + self._conf = dist.get_option_dict(self._distutils_section) + else: + self._conf = dist diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/fujitsu.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/fujitsu.py new file mode 100644 index 0000000000000000000000000000000000000000..21562509368738567ad339d092155fe40672132e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/fujitsu.py @@ -0,0 +1,46 @@ +""" +fujitsu + +Supports Fujitsu compiler function. +This compiler is developed by Fujitsu and is used in A64FX on Fugaku. +""" +from numpy.distutils.fcompiler import FCompiler + +compilers = ['FujitsuFCompiler'] + +class FujitsuFCompiler(FCompiler): + compiler_type = 'fujitsu' + description = 'Fujitsu Fortran Compiler' + + possible_executables = ['frt'] + version_pattern = r'frt \(FRT\) (?P[a-z\d.]+)' + # $ frt --version + # frt (FRT) x.x.x yyyymmdd + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["frt", "-Fixed"], + 'compiler_fix' : ["frt", "-Fixed"], + 'compiler_f90' : ["frt"], + 'linker_so' : ["frt", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-KPIC'] + module_dir_switch = '-M' + module_include_switch = '-I' + + def get_flags_opt(self): + return ['-O3'] + def get_flags_debug(self): + return ['-g'] + def runtime_library_dir_option(self, dir): + return f'-Wl,-rpath={dir}' + def get_libraries(self): + return ['fj90f', 'fj90i', 'fjsrcinfo'] + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler('fujitsu').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/g95.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/g95.py new file mode 100644 index 0000000000000000000000000000000000000000..847cb0c04bad6000908d01e7911f8fb9ac76c027 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/g95.py @@ -0,0 +1,42 @@ +# http://g95.sourceforge.net/ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['G95FCompiler'] + +class G95FCompiler(FCompiler): + compiler_type = 'g95' + description = 'G95 Fortran Compiler' + +# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95!) May 22 2006) + + version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["g95", "-ffixed-form"], + 'compiler_fix' : ["g95", "-ffixed-form"], + 'compiler_f90' : ["g95"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fpic'] + module_dir_switch = '-fmod=' + module_include_switch = '-I' + + def get_flags(self): + return ['-fno-second-underscore'] + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler('g95').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/gnu.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/gnu.py new file mode 100644 index 0000000000000000000000000000000000000000..b0337829920d025b02b775d0a3dd73e2f91d1355 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/gnu.py @@ -0,0 +1,550 @@ +import re +import os +import sys +import warnings +import platform +import tempfile +import hashlib +import base64 +import subprocess +from subprocess import Popen, PIPE, STDOUT +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.fcompiler import FCompiler +from distutils.version import LooseVersion + +compilers = ['GnuFCompiler', 'Gnu95FCompiler'] + +TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") + +# XXX: handle cross compilation + + +def is_win64(): + return sys.platform == "win32" and platform.architecture()[0] == "64bit" + + +class GnuFCompiler(FCompiler): + compiler_type = 'gnu' + compiler_aliases = ('g77', ) + description = 'GNU Fortran 77 compiler' + + def gnu_version_match(self, version_string): + """Handle the different versions of GNU fortran compilers""" + # Strip warning(s) that may be emitted by gfortran + while version_string.startswith('gfortran: warning'): + version_string =\ + version_string[version_string.find('\n') + 1:].strip() + + # Gfortran versions from after 2010 will output a simple string + # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older + # gfortrans may still return long version strings (``-dumpversion`` was + # an alias for ``--version``) + if len(version_string) <= 20: + # Try to find a valid version string + m = re.search(r'([0-9.]+)', version_string) + if m: + # g77 provides a longer version string that starts with GNU + # Fortran + if version_string.startswith('GNU Fortran'): + return ('g77', m.group(1)) + + # gfortran only outputs a version string such as #.#.#, so check + # if the match is at the start of the string + elif m.start() == 0: + return ('gfortran', m.group(1)) + else: + # Output probably from --version, try harder: + m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) + if m: + return ('gfortran', m.group(1)) + m = re.search( + r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) + if m: + v = m.group(1) + if v.startswith('0') or v.startswith('2') or v.startswith('3'): + # the '0' is for early g77's + return ('g77', v) + else: + # at some point in the 4.x series, the ' 95' was dropped + # from the version string + return ('gfortran', v) + + # If still nothing, raise an error to make the problem easy to find. + err = 'A valid Fortran version was not found in this string:\n' + raise ValueError(err + version_string) + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'g77': + return None + return v[1] + + possible_executables = ['g77', 'f77'] + executables = { + 'version_cmd' : [None, "-dumpversion"], + 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], + 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes + 'compiler_fix' : None, + 'linker_so' : [None, "-g", "-Wall"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-g", "-Wall"] + } + module_dir_switch = None + module_include_switch = None + + # Cygwin: f771: warning: -fPIC ignored for target (all code is + # position independent) + if os.name != 'nt' and sys.platform != 'cygwin': + pic_flags = ['-fPIC'] + + # use -mno-cygwin for g77 when Python is not Cygwin-Python + if sys.platform == 'win32': + for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: + executables[key].append('-mno-cygwin') + + g2c = 'g2c' + suggested_f90_compiler = 'gnu95' + + def get_flags_linker_so(self): + opt = self.linker_so[1:] + if sys.platform == 'darwin': + target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) + # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value + # and leave it alone. But, distutils will complain if the + # environment's value is different from the one in the Python + # Makefile used to build Python. We let disutils handle this + # error checking. + if not target: + # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, + # we try to get it first from sysconfig and then + # fall back to setting it to 10.9 This is a reasonable default + # even when using the official Python dist and those derived + # from it. + import sysconfig + target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') + if not target: + target = '10.9' + s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' + warnings.warn(s, stacklevel=2) + os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) + opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) + else: + opt.append("-shared") + if sys.platform.startswith('sunos'): + # SunOS often has dynamically loaded symbols defined in the + # static library libg2c.a The linker doesn't like this. To + # ignore the problem, use the -mimpure-text flag. It isn't + # the safest thing, but seems to work. 'man gcc' says: + # ".. Instead of using -mimpure-text, you should compile all + # source code with -fpic or -fPIC." + opt.append('-mimpure-text') + return opt + + def get_libgcc_dir(self): + try: + output = subprocess.check_output(self.compiler_f77 + + ['-print-libgcc-file-name']) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + return os.path.dirname(output) + return None + + def get_libgfortran_dir(self): + if sys.platform[:5] == 'linux': + libgfortran_name = 'libgfortran.so' + elif sys.platform == 'darwin': + libgfortran_name = 'libgfortran.dylib' + else: + libgfortran_name = None + + libgfortran_dir = None + if libgfortran_name: + find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] + try: + output = subprocess.check_output( + self.compiler_f77 + find_lib_arg) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + libgfortran_dir = os.path.dirname(output) + return libgfortran_dir + + def get_library_dirs(self): + opt = [] + if sys.platform[:5] != 'linux': + d = self.get_libgcc_dir() + if d: + # if windows and not cygwin, libg2c lies in a different folder + if sys.platform == 'win32' and not d.startswith('/usr/lib'): + d = os.path.normpath(d) + path = os.path.join(d, "lib%s.a" % self.g2c) + if not os.path.exists(path): + root = os.path.join(d, *((os.pardir, ) * 4)) + d2 = os.path.abspath(os.path.join(root, 'lib')) + path = os.path.join(d2, "lib%s.a" % self.g2c) + if os.path.exists(path): + opt.append(d2) + opt.append(d) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = [] + d = self.get_libgcc_dir() + if d is not None: + g2c = self.g2c + '-pic' + f = self.static_lib_format % (g2c, self.static_lib_extension) + if not os.path.isfile(os.path.join(d, f)): + g2c = self.g2c + else: + g2c = self.g2c + + if g2c is not None: + opt.append(g2c) + c_compiler = self.c_compiler + if sys.platform == 'win32' and c_compiler and \ + c_compiler.compiler_type == 'msvc': + opt.append('gcc') + if sys.platform == 'darwin': + opt.append('cc_dynamic') + return opt + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + v = self.get_version() + if v and v <= '3.3.3': + # With this compiler version building Fortran BLAS/LAPACK + # with -O3 caused failures in lib.lapack heevr,syevr tests. + opt = ['-O2'] + else: + opt = ['-O3'] + opt.append('-funroll-loops') + return opt + + def _c_arch_flags(self): + """ Return detected arch flags from CFLAGS """ + import sysconfig + try: + cflags = sysconfig.get_config_vars()['CFLAGS'] + except KeyError: + return [] + arch_re = re.compile(r"-arch\s+(\w+)") + arch_flags = [] + for arch in arch_re.findall(cflags): + arch_flags += ['-arch', arch] + return arch_flags + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + if sys.platform == 'win32': + # Linux/Solaris/Unix support RPATH, Windows does not + raise NotImplementedError + + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + if sys.platform == 'darwin': + return f'-Wl,-rpath,{dir}' + elif sys.platform[:3] == 'aix': + # AIX RPATH is called LIBPATH + return f'-Wl,-blibpath:{dir}' + else: + return f'-Wl,-rpath={dir}' + + +class Gnu95FCompiler(GnuFCompiler): + compiler_type = 'gnu95' + compiler_aliases = ('gfortran', ) + description = 'GNU Fortran 95 compiler' + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'gfortran': + return None + v = v[1] + if LooseVersion(v) >= "4": + # gcc-4 series releases do not support -mno-cygwin option + pass + else: + # use -mno-cygwin flag for gfortran when Python is not + # Cygwin-Python + if sys.platform == 'win32': + for key in [ + 'version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe' + ]: + self.executables[key].append('-mno-cygwin') + return v + + possible_executables = ['gfortran', 'f95'] + executables = { + 'version_cmd' : ["", "-dumpversion"], + 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", + "-fno-second-underscore"], + 'compiler_f90' : [None, "-Wall", "-g", + "-fno-second-underscore"], + 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", + "-fno-second-underscore"], + 'linker_so' : ["", "-Wall", "-g"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-Wall"] + } + + module_dir_switch = '-J' + module_include_switch = '-I' + + if sys.platform[:3] == 'aix': + executables['linker_so'].append('-lpthread') + if platform.architecture()[0][:2] == '64': + for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: + executables[key].append('-maix64') + + g2c = 'gfortran' + + def _universal_flags(self, cmd): + """Return a list of -arch flags for every supported architecture.""" + if not sys.platform == 'darwin': + return [] + arch_flags = [] + # get arches the C compiler gets. + c_archs = self._c_arch_flags() + if "i386" in c_archs: + c_archs[c_archs.index("i386")] = "i686" + # check the arches the Fortran compiler supports, and compare with + # arch flags from C compiler + for arch in ["ppc", "i686", "x86_64", "ppc64"]: + if _can_target(cmd, arch) and arch in c_archs: + arch_flags.extend(["-arch", arch]) + return arch_flags + + def get_flags(self): + flags = GnuFCompiler.get_flags(self) + arch_flags = self._universal_flags(self.compiler_f90) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_flags_linker_so(self): + flags = GnuFCompiler.get_flags_linker_so(self) + arch_flags = self._universal_flags(self.linker_so) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_library_dirs(self): + opt = GnuFCompiler.get_library_dirs(self) + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + target = self.get_target() + if target: + d = os.path.normpath(self.get_libgcc_dir()) + root = os.path.join(d, *((os.pardir, ) * 4)) + path = os.path.join(root, "lib") + mingwdir = os.path.normpath(path) + if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): + opt.append(mingwdir) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = GnuFCompiler.get_libraries(self) + if sys.platform == 'darwin': + opt.remove('cc_dynamic') + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + if "gcc" in opt: + i = opt.index("gcc") + opt.insert(i + 1, "mingwex") + opt.insert(i + 1, "mingw32") + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + return [] + else: + pass + return opt + + def get_target(self): + try: + output = subprocess.check_output(self.compiler_f77 + ['-v']) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + m = TARGET_R.search(output) + if m: + return m.group(1) + return "" + + def _hash_files(self, filenames): + h = hashlib.sha1() + for fn in filenames: + with open(fn, 'rb') as f: + while True: + block = f.read(131072) + if not block: + break + h.update(block) + text = base64.b32encode(h.digest()) + text = text.decode('ascii') + return text.rstrip('=') + + def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, + chained_dlls, is_archive): + """Create a wrapper shared library for the given objects + + Return an MSVC-compatible lib + """ + + c_compiler = self.c_compiler + if c_compiler.compiler_type != "msvc": + raise ValueError("This method only supports MSVC") + + object_hash = self._hash_files(list(objects) + list(chained_dlls)) + + if is_win64(): + tag = 'win_amd64' + else: + tag = 'win32' + + basename = 'lib' + os.path.splitext( + os.path.basename(objects[0]))[0][:8] + root_name = basename + '.' + object_hash + '.gfortran-' + tag + dll_name = root_name + '.dll' + def_name = root_name + '.def' + lib_name = root_name + '.lib' + dll_path = os.path.join(extra_dll_dir, dll_name) + def_path = os.path.join(output_dir, def_name) + lib_path = os.path.join(output_dir, lib_name) + + if os.path.isfile(lib_path): + # Nothing to do + return lib_path, dll_path + + if is_archive: + objects = (["-Wl,--whole-archive"] + list(objects) + + ["-Wl,--no-whole-archive"]) + self.link_shared_object( + objects, + dll_name, + output_dir=extra_dll_dir, + extra_postargs=list(chained_dlls) + [ + '-Wl,--allow-multiple-definition', + '-Wl,--output-def,' + def_path, + '-Wl,--export-all-symbols', + '-Wl,--enable-auto-import', + '-static', + '-mlong-double-64', + ]) + + # No PowerPC! + if is_win64(): + specifier = '/MACHINE:X64' + else: + specifier = '/MACHINE:X86' + + # MSVC specific code + lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] + if not c_compiler.initialized: + c_compiler.initialize() + c_compiler.spawn([c_compiler.lib] + lib_args) + + return lib_path, dll_path + + def can_ccompiler_link(self, compiler): + # MSVC cannot link objects compiled by GNU fortran + return compiler.compiler_type not in ("msvc", ) + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + """ + if self.c_compiler.compiler_type == "msvc": + # Compile a DLL and return the lib for the DLL as + # the object. Also keep track of previous DLLs that + # we have compiled so that we can link against them. + + # If there are .a archives, assume they are self-contained + # static libraries, and build separate DLLs for each + archives = [] + plain_objects = [] + for obj in objects: + if obj.lower().endswith('.a'): + archives.append(obj) + else: + plain_objects.append(obj) + + chained_libs = [] + chained_dlls = [] + for archive in archives[::-1]: + lib, dll = self._link_wrapper_lib( + [archive], + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=True) + chained_libs.insert(0, lib) + chained_dlls.insert(0, dll) + + if not plain_objects: + return chained_libs + + lib, dll = self._link_wrapper_lib( + plain_objects, + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=False) + return [lib] + chained_libs + else: + raise ValueError("Unsupported C compiler") + + +def _can_target(cmd, arch): + """Return true if the architecture supports the -arch flag""" + newcmd = cmd[:] + fid, filename = tempfile.mkstemp(suffix=".f") + os.close(fid) + try: + d = os.path.dirname(filename) + output = os.path.splitext(filename)[0] + ".o" + try: + newcmd.extend(["-arch", arch, "-c", filename]) + p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) + p.communicate() + return p.returncode == 0 + finally: + if os.path.exists(output): + os.remove(output) + finally: + os.remove(filename) + return False + + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + + print(customized_fcompiler('gnu').get_version()) + try: + print(customized_fcompiler('g95').get_version()) + except Exception as e: + print(e) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/hpux.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/hpux.py new file mode 100644 index 0000000000000000000000000000000000000000..66ad243c3cb5b54d78392eda27a58a332f397f0f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/hpux.py @@ -0,0 +1,41 @@ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['HPUXFCompiler'] + +class HPUXFCompiler(FCompiler): + + compiler_type = 'hpux' + description = 'HP Fortran 90 Compiler' + version_pattern = r'HP F90 (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["f90", "+version"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["ld", "-b"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['+Z'] + def get_flags(self): + return self.pic_flags + ['+ppu', '+DD64'] + def get_flags_opt(self): + return ['-O3'] + def get_libraries(self): + return ['m'] + def get_library_dirs(self): + opt = ['/usr/lib/hpux64'] + return opt + def get_version(self, force=0, ok_status=[256, 0, 1]): + # XXX status==256 may indicate 'unrecognized option' or + # 'no input file'. So, version_cmd needs more work. + return FCompiler.get_version(self, force, ok_status) + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(10) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/ibm.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/ibm.py new file mode 100644 index 0000000000000000000000000000000000000000..2060bfcb0a0c7fe4e9abe05c9f818777640c1b4e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/ibm.py @@ -0,0 +1,97 @@ +import os +import re +import sys +import subprocess + +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.misc_util import make_temp_file +from distutils import log + +compilers = ['IBMFCompiler'] + +class IBMFCompiler(FCompiler): + compiler_type = 'ibm' + description = 'IBM XL Fortran Compiler' + version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' + #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 + + executables = { + 'version_cmd' : ["", "-qversion"], + 'compiler_f77' : ["xlf"], + 'compiler_fix' : ["xlf90", "-qfixed"], + 'compiler_f90' : ["xlf90"], + 'linker_so' : ["xlf95"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_version(self,*args,**kwds): + version = FCompiler.get_version(self,*args,**kwds) + + if version is None and sys.platform.startswith('aix'): + # use lslpp to find out xlf version + lslpp = find_executable('lslpp') + xlf = find_executable('xlf') + if os.path.exists(xlf) and os.path.exists(lslpp): + try: + o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) + except (OSError, subprocess.CalledProcessError): + pass + else: + m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) + if m: version = m.group('version') + + xlf_dir = '/etc/opt/ibmcmp/xlf' + if version is None and os.path.isdir(xlf_dir): + # linux: + # If the output of xlf does not contain version info + # (that's the case with xlf 8.1, for instance) then + # let's try another method: + l = sorted(os.listdir(xlf_dir)) + l.reverse() + l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] + if l: + from distutils.version import LooseVersion + self.version = version = LooseVersion(l[0]) + return version + + def get_flags(self): + return ['-qextname'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + opt = [] + if sys.platform=='darwin': + opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') + else: + opt.append('-bshared') + version = self.get_version(ok_status=[0, 40]) + if version is not None: + if sys.platform.startswith('aix'): + xlf_cfg = '/etc/xlf.cfg' + else: + xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version + fo, new_cfg = make_temp_file(suffix='_xlf.cfg') + log.info('Creating '+new_cfg) + with open(xlf_cfg, 'r') as fi: + crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match + for line in fi: + m = crt1_match(line) + if m: + fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) + else: + fo.write(line) + fo.close() + opt.append('-F'+new_cfg) + return opt + + def get_flags_opt(self): + return ['-O3'] + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/intel.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/intel.py new file mode 100644 index 0000000000000000000000000000000000000000..af7e104e324fa12acc40a6073d51dc194873772c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/intel.py @@ -0,0 +1,210 @@ +# http://developer.intel.com/software/products/compilers/flin/ +import sys + +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file + +compilers = ['IntelFCompiler', 'IntelVisualFCompiler', + 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', + 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] + + +def intel_version_match(type): + # Match against the important stuff in the version string + return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) + + +class BaseIntelFCompiler(FCompiler): + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '-FI', '-V', '-c', + f + '.f', '-o', f + '.o'] + + def runtime_library_dir_option(self, dir): + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + return '-Wl,-rpath=%s' % dir + + +class IntelFCompiler(BaseIntelFCompiler): + + compiler_type = 'intel' + compiler_aliases = ('ifort',) + description = 'Intel Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + possible_executables = ['ifort', 'ifc'] + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : [None, "-72", "-w90", "-w95"], + 'compiler_f90' : [None], + 'compiler_fix' : [None, "-FI"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_free(self): + return ['-FR'] + + def get_flags(self): + return ['-fPIC'] + + def get_flags_opt(self): # Scipy test failures with -O2 + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + return ['-fp-model', 'strict', '-O1', + '-assume', 'minus0', '-{}'.format(mpopt)] + + def get_flags_arch(self): + return [] + + def get_flags_linker_so(self): + opt = FCompiler.get_flags_linker_so(self) + v = self.get_version() + if v and v >= '8.0': + opt.append('-nofor_main') + if sys.platform == 'darwin': + # Here, it's -dynamiclib + try: + idx = opt.index('-shared') + opt.remove('-shared') + except ValueError: + idx = 0 + opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] + return opt + + +class IntelItaniumFCompiler(IntelFCompiler): + compiler_type = 'intele' + compiler_aliases = () + description = 'Intel Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium|IA-64') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + +class IntelEM64TFCompiler(IntelFCompiler): + compiler_type = 'intelem' + compiler_aliases = () + description = 'Intel Fortran Compiler for 64-bit apps' + + version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + +# Is there no difference in the version string between the above compilers +# and the Visual compilers? + + +class IntelVisualFCompiler(BaseIntelFCompiler): + compiler_type = 'intelv' + description = 'Intel Visual Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '/FI', '/c', + f + '.f', '/o', f + '.o'] + + ar_exe = 'lib.exe' + possible_executables = ['ifort', 'ifl'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None], + 'compiler_fix' : [None], + 'compiler_f90' : [None], + 'linker_so' : [None], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + compile_switch = '/c ' + object_switch = '/Fo' # No space after /Fo! + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '/module:' # No space after /module: + module_include_switch = '/I' + + def get_flags(self): + opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', '/assume:underscore'] + return opt + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['/4Yb', '/d2'] + + def get_flags_opt(self): + return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 + + def get_flags_arch(self): + return ["/arch:IA32", "/QaxSSE3"] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + + +class IntelItaniumVisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelev' + description = 'Intel Visual Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium') + + possible_executables = ['efl'] # XXX this is a wild guess + ar_exe = IntelVisualFCompiler.ar_exe + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI", "-4L72", "-w"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + +class IntelEM64VisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelvem' + description = 'Intel Visual Fortran Compiler for 64-bit apps' + + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + + def get_flags_arch(self): + return [] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='intel').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/lahey.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/lahey.py new file mode 100644 index 0000000000000000000000000000000000000000..d99c25fb1c1bb71847c929e1b46d36f879bc3fc5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/lahey.py @@ -0,0 +1,45 @@ +import os + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['LaheyFCompiler'] + +class LaheyFCompiler(FCompiler): + + compiler_type = 'lahey' + description = 'Lahey/Fujitsu Fortran 95 Compiler' + version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["lf95", "--fix"], + 'compiler_fix' : ["lf95", "--fix"], + 'compiler_f90' : ["lf95"], + 'linker_so' : ["lf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g', '--chk', '--chkglobal'] + def get_library_dirs(self): + opt = [] + d = os.environ.get('LAHEY') + if d: + opt.append(os.path.join(d, 'lib')) + return opt + def get_libraries(self): + opt = [] + opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/mips.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/mips.py new file mode 100644 index 0000000000000000000000000000000000000000..67d2f8908c001d71c2f1aaf03cb84a304880b455 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/mips.py @@ -0,0 +1,54 @@ +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler + +compilers = ['MIPSFCompiler'] + +class MIPSFCompiler(FCompiler): + + compiler_type = 'mips' + description = 'MIPSpro Fortran Compiler' + version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["", "-version"], + 'compiler_f77' : ["f77", "-f77"], + 'compiler_fix' : ["f90", "-fixedform"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["f90", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : None + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['-KPIC'] + + def get_flags(self): + return self.pic_flags + ['-n32'] + def get_flags_opt(self): + return ['-O3'] + def get_flags_arch(self): + opt = [] + for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): + if getattr(cpu, 'is_IP%s'%a)(): + opt.append('-TARG:platform=IP%s' % a) + break + return opt + def get_flags_arch_f77(self): + r = None + if cpu.is_r10000(): r = 10000 + elif cpu.is_r12000(): r = 12000 + elif cpu.is_r8000(): r = 8000 + elif cpu.is_r5000(): r = 5000 + elif cpu.is_r4000(): r = 4000 + if r is not None: + return ['r%s' % (r)] + return [] + def get_flags_arch_f90(self): + r = self.get_flags_arch_f77() + if r: + r[0] = '-' + r[0] + return r + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='mips').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/nag.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/nag.py new file mode 100644 index 0000000000000000000000000000000000000000..87e96dc4e95d2f6ae4d76865d27a346d88b0c333 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/nag.py @@ -0,0 +1,82 @@ +import sys +import re +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NAGFCompiler', 'NAGFORCompiler'] + +class BaseNAGFCompiler(FCompiler): + version_pattern = r'NAG.* Release (?P[^(\s]*)' + + def version_match(self, version_string): + m = re.search(self.version_pattern, version_string) + if m: + return m.group('version') + else: + return None + + def get_flags_linker_so(self): + return ["-Wl,-shared"] + def get_flags_opt(self): + return ['-O4'] + def get_flags_arch(self): + return [] + +class NAGFCompiler(BaseNAGFCompiler): + + compiler_type = 'nag' + description = 'NAGWare Fortran 95 Compiler' + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f95", "-fixed"], + 'compiler_fix' : ["f95", "-fixed"], + 'compiler_f90' : ["f95"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_linker_so(self): + if sys.platform == 'darwin': + return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return BaseNAGFCompiler.get_flags_linker_so(self) + def get_flags_arch(self): + version = self.get_version() + if version and version < '5.1': + return ['-target=native'] + else: + return BaseNAGFCompiler.get_flags_arch(self) + def get_flags_debug(self): + return ['-g', '-gline', '-g90', '-nan', '-C'] + +class NAGFORCompiler(BaseNAGFCompiler): + + compiler_type = 'nagfor' + description = 'NAG Fortran Compiler' + + executables = { + 'version_cmd' : ["nagfor", "-V"], + 'compiler_f77' : ["nagfor", "-fixed"], + 'compiler_fix' : ["nagfor", "-fixed"], + 'compiler_f90' : ["nagfor"], + 'linker_so' : ["nagfor"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_debug(self): + version = self.get_version() + if version and version > '6.1': + return ['-g', '-u', '-nan', '-C=all', '-thread_safe', + '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] + else: + return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + compiler = customized_fcompiler(compiler='nagfor') + print(compiler.get_version()) + print(compiler.get_flags_debug()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/none.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/none.py new file mode 100644 index 0000000000000000000000000000000000000000..1279101219b3f259a2a3c7093c1c71e51c3745eb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/none.py @@ -0,0 +1,28 @@ +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils import customized_fcompiler + +compilers = ['NoneFCompiler'] + +class NoneFCompiler(FCompiler): + + compiler_type = 'none' + description = 'Fake Fortran compiler' + + executables = {'compiler_f77': None, + 'compiler_f90': None, + 'compiler_fix': None, + 'linker_so': None, + 'linker_exe': None, + 'archiver': None, + 'ranlib': None, + 'version_cmd': None, + } + + def find_executables(self): + pass + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + print(customized_fcompiler(compiler='none').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/nv.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/nv.py new file mode 100644 index 0000000000000000000000000000000000000000..4e8bd07033ec8652af1323fcbaf913e930556050 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/nv.py @@ -0,0 +1,55 @@ +import sys + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NVHPCFCompiler'] + +class NVHPCFCompiler(FCompiler): + """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler + + https://developer.nvidia.com/hpc-sdk + + Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, + https://www.pgroup.com/index.htm. + See also `numpy.distutils.fcompiler.pg`. + """ + + compiler_type = 'nv' + description = 'NVIDIA HPC SDK' + version_pattern = r'\s*(nvfortran|(pg(f77|f90|fortran)) \(aka nvfortran\)) (?P[\d.-]+).*' + + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["nvfortran"], + 'compiler_fix': ["nvfortran", "-Mfixed"], + 'compiler_f90': ["nvfortran"], + 'linker_so': [""], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = ['-fpic'] + + module_dir_switch = '-module ' + module_include_switch = '-I' + + def get_flags(self): + opt = ['-Minform=inform', '-Mnosecond_underscore'] + return self.pic_flags + opt + + def get_flags_opt(self): + return ['-fast'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + return ["-shared", '-fpic'] + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='nv').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/pathf95.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/pathf95.py new file mode 100644 index 0000000000000000000000000000000000000000..8efc32a90678b3d54d25f5b6cd82528116fb4d03 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/pathf95.py @@ -0,0 +1,33 @@ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['PathScaleFCompiler'] + +class PathScaleFCompiler(FCompiler): + + compiler_type = 'pathf95' + description = 'PathScale Fortran Compiler' + version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' + + executables = { + 'version_cmd' : ["pathf95", "-version"], + 'compiler_f77' : ["pathf95", "-fixedform"], + 'compiler_fix' : ["pathf95", "-fixedform"], + 'compiler_f90' : ["pathf95"], + 'linker_so' : ["pathf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_opt(self): + return ['-O3'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/pg.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/pg.py new file mode 100644 index 0000000000000000000000000000000000000000..90686849cc6982b244c9de8a2aeee13fdbb51d40 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/pg.py @@ -0,0 +1,128 @@ +# http://www.pgroup.com +import sys + +from numpy.distutils.fcompiler import FCompiler +from sys import platform +from os.path import join, dirname, normpath + +compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] + + +class PGroupFCompiler(FCompiler): + + compiler_type = 'pg' + description = 'Portland Group Fortran Compiler' + version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' + + if platform == 'darwin': + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran", "-dynamiclib"], + 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], + 'compiler_f90': ["pgfortran", "-dynamiclib"], + 'linker_so': ["libtool"], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = [''] + else: + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran"], + 'compiler_fix': ["pgfortran", "-Mfixed"], + 'compiler_f90': ["pgfortran"], + 'linker_so': [""], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = ['-fpic'] + + module_dir_switch = '-module ' + module_include_switch = '-I' + + def get_flags(self): + opt = ['-Minform=inform', '-Mnosecond_underscore'] + return self.pic_flags + opt + + def get_flags_opt(self): + return ['-fast'] + + def get_flags_debug(self): + return ['-g'] + + if platform == 'darwin': + def get_flags_linker_so(self): + return ["-dynamic", '-undefined', 'dynamic_lookup'] + + else: + def get_flags_linker_so(self): + return ["-shared", '-fpic'] + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + + +import functools + +class PGroupFlangCompiler(FCompiler): + compiler_type = 'flang' + description = 'Portland Group Fortran LLVM Compiler' + version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' + + ar_exe = 'lib.exe' + possible_executables = ['flang'] + + executables = { + 'version_cmd': ["", "--version"], + 'compiler_f77': ["flang"], + 'compiler_fix': ["flang"], + 'compiler_f90': ["flang"], + 'linker_so': [None], + 'archiver': [ar_exe, "/verbose", "/OUT:"], + 'ranlib': None + } + + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '-module ' # Don't remove ending space! + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + opt.extend(['flang', 'flangrti', 'ompstub']) + return opt + + @functools.lru_cache(maxsize=128) + def get_library_dirs(self): + """List of compiler library directories.""" + opt = FCompiler.get_library_dirs(self) + flang_dir = dirname(self.executables['compiler_f77'][0]) + opt.append(normpath(join(flang_dir, '..', 'lib'))) + + return opt + + def get_flags(self): + return [] + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + return ['-O3'] + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + if 'flang' in sys.argv: + print(customized_fcompiler(compiler='flang').get_version()) + else: + print(customized_fcompiler(compiler='pg').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/sun.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/sun.py new file mode 100644 index 0000000000000000000000000000000000000000..621b1cb196ea76622134c75cfa93f6f38775e840 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/sun.py @@ -0,0 +1,51 @@ +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler + +compilers = ['SunFCompiler'] + +class SunFCompiler(FCompiler): + + compiler_type = 'sun' + description = 'Sun or Forte Fortran 95 Compiler' + # ex: + # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 + version_match = simple_version_match( + start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90", "-fixed"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["", "-Bdynamic", "-G"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = '-moddir=' + module_include_switch = '-M' + pic_flags = ['-xcode=pic32'] + + def get_flags_f77(self): + ret = ["-ftrap=%none"] + if (self.get_version() or '') >= '7': + ret.append("-f77") + else: + ret.append("-fixed") + return ret + def get_opt(self): + return ['-fast', '-dalign'] + def get_arch(self): + return ['-xtarget=generic'] + def get_libraries(self): + opt = [] + opt.extend(['fsu', 'sunmath', 'mvec']) + return opt + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='sun').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/fcompiler/vast.py b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/vast.py new file mode 100644 index 0000000000000000000000000000000000000000..b087e7b67ca67074dedca824821641bbb9d4286b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/fcompiler/vast.py @@ -0,0 +1,52 @@ +import os + +from numpy.distutils.fcompiler.gnu import GnuFCompiler + +compilers = ['VastFCompiler'] + +class VastFCompiler(GnuFCompiler): + compiler_type = 'vast' + compiler_aliases = () + description = 'Pacific-Sierra Research Fortran 90 Compiler' + version_pattern = (r'\s*Pacific-Sierra Research vf90 ' + r'(Personal|Professional)\s+(?P[^\s]*)') + + # VAST f90 does not support -o with -c. So, object files are created + # to the current directory and then moved to build directory + object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' + + executables = { + 'version_cmd' : ["vf90", "-v"], + 'compiler_f77' : ["g77"], + 'compiler_fix' : ["f90", "-Wv,-ya"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def find_executables(self): + pass + + def get_version_cmd(self): + f90 = self.compiler_f90[0] + d, b = os.path.split(f90) + vf90 = os.path.join(d, 'v'+b) + return vf90 + + def get_flags_arch(self): + vast_version = self.get_version() + gnu = GnuFCompiler() + gnu.customize(None) + self.version = gnu.get_version() + opt = GnuFCompiler.get_flags_arch(self) + self.version = vast_version + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='vast').get_version()) diff --git a/MLPY/Lib/site-packages/numpy/distutils/from_template.py b/MLPY/Lib/site-packages/numpy/distutils/from_template.py new file mode 100644 index 0000000000000000000000000000000000000000..4788985924065ca171383be72c913d68e56e279c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/from_template.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 +""" + +process_file(filename) + + takes templated file .xxx.src and produces .xxx file where .xxx + is .pyf .f90 or .f using the following template rules: + + '<..>' denotes a template. + + All function and subroutine blocks in a source file with names that + contain '<..>' will be replicated according to the rules in '<..>'. + + The number of comma-separated words in '<..>' will determine the number of + replicates. + + '<..>' may have two different forms, named and short. For example, + + named: + where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + + +""" +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i==-1: + break + start = i + if astr[i:i+7]!='\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = m and m.end()-1 or len(astr) + spanlist.append((start, end)) + return spanlist + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = '__l%s' % (n) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return "<%s>" % (thelist) + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return "<%s>" % name + + substr = list_re.sub(listrepl, substr) # convert all lists to named templates + # newnames are constructed as needed + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError('No replicates found for <%s>' % (r)) + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + print("Mismatch in number of replacements (base <%s=%s>)" + " for <%s=%s>. Ignoring." % + (base_rule, ','.join(rules[base_rule]), r, thelist)) + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k+1)*[name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +def main(): + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + writestr = process_str(allstr) + outfile.write(writestr) + + +if __name__ == "__main__": + main() diff --git a/MLPY/Lib/site-packages/numpy/distutils/intelccompiler.py b/MLPY/Lib/site-packages/numpy/distutils/intelccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..35d92f84389b54b720e97c06044b88944f534d2b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/intelccompiler.py @@ -0,0 +1,111 @@ +import platform + +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.ccompiler import simple_version_match +if platform.system() == 'Windows': + from numpy.distutils.msvc9compiler import MSVCCompiler + + +class IntelCCompiler(UnixCCompiler): + """A modified Intel compiler compatible with a GCC-built Python.""" + compiler_type = 'intel' + cc_exe = 'icc' + cc_args = 'fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +class IntelItaniumCCompiler(IntelCCompiler): + compiler_type = 'intele' + + # On Itanium, the Intel Compiler used to be called ecc, let's search for + # it (now it's also icc, so ecc is last in the search). + for cc_exe in map(find_executable, ['icc', 'ecc']): + if cc_exe: + break + + +class IntelEM64TCCompiler(UnixCCompiler): + """ + A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. + """ + compiler_type = 'intelem' + cc_exe = 'icc -m64' + cc_args = '-fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +if platform.system() == 'Windows': + class IntelCCompilerW(MSVCCompiler): + """ + A modified Intel compiler compatible with an MSVC-built Python. + """ + compiler_type = 'intelw' + compiler_cxx = 'icl' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?32,') + self.__version = version_match + + def initialize(self, plat_name=None): + MSVCCompiler.initialize(self, plat_name) + self.cc = self.find_exe('icl.exe') + self.lib = self.find_exe('xilib') + self.linker = self.find_exe('xilink') + self.compile_options = ['/nologo', '/O3', '/MD', '/W3', + '/Qstd=c99'] + self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', + '/Qstd=c99', '/Z7', '/D_DEBUG'] + + class IntelEM64TCCompilerW(IntelCCompilerW): + """ + A modified Intel x86_64 compiler compatible with + a 64bit MSVC-built Python. + """ + compiler_type = 'intelemw' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + self.__version = version_match diff --git a/MLPY/Lib/site-packages/numpy/distutils/lib2def.py b/MLPY/Lib/site-packages/numpy/distutils/lib2def.py new file mode 100644 index 0000000000000000000000000000000000000000..c40ca8b39b23ed646495a80c42319d290a8b96c6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/lib2def.py @@ -0,0 +1,116 @@ +import re +import sys +import subprocess + +__doc__ = """This module generates a DEF file from the symbols in +an MSVC-compiled DLL import library. It correctly discriminates between +data and functions. The data is collected from the output of the program +nm(1). + +Usage: + python lib2def.py [libname.lib] [output.def] +or + python lib2def.py [libname.lib] > output.def + +libname.lib defaults to python.lib and output.def defaults to stdout + +Author: Robert Kern +Last Update: April 30, 1999 +""" + +__version__ = '0.1a' + +py_ver = "%d%d" % tuple(sys.version_info[:2]) + +DEFAULT_NM = ['nm', '-Cs'] + +DEF_HEADER = """LIBRARY python%s.dll +;CODE PRELOAD MOVEABLE DISCARDABLE +;DATA PRELOAD SINGLE + +EXPORTS +""" % py_ver +# the header of the DEF file + +FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) +DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) + +def parse_cmd(): + """Parses the command-line arguments. + +libfile, deffile = parse_cmd()""" + if len(sys.argv) == 3: + if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': + libfile, deffile = sys.argv[1:] + elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': + deffile, libfile = sys.argv[1:] + else: + print("I'm assuming that your first argument is the library") + print("and the second is the DEF file.") + elif len(sys.argv) == 2: + if sys.argv[1][-4:] == '.def': + deffile = sys.argv[1] + libfile = 'python%s.lib' % py_ver + elif sys.argv[1][-4:] == '.lib': + deffile = None + libfile = sys.argv[1] + else: + libfile = 'python%s.lib' % py_ver + deffile = None + return libfile, deffile + +def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): + """Returns the output of nm_cmd via a pipe. + +nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" + p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) + nm_output, nm_err = p.communicate() + if p.returncode != 0: + raise RuntimeError('failed to run "%s": "%s"' % ( + ' '.join(nm_cmd), nm_err)) + return nm_output + +def parse_nm(nm_output): + """Returns a tuple of lists: dlist for the list of data +symbols and flist for the list of function symbols. + +dlist, flist = parse_nm(nm_output)""" + data = DATA_RE.findall(nm_output) + func = FUNC_RE.findall(nm_output) + + flist = [] + for sym in data: + if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): + flist.append(sym) + + dlist = [] + for sym in data: + if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): + dlist.append(sym) + + dlist.sort() + flist.sort() + return dlist, flist + +def output_def(dlist, flist, header, file = sys.stdout): + """Outputs the final DEF file to a file defaulting to stdout. + +output_def(dlist, flist, header, file = sys.stdout)""" + for data_sym in dlist: + header = header + '\t%s DATA\n' % data_sym + header = header + '\n' # blank line + for func_sym in flist: + header = header + '\t%s\n' % func_sym + file.write(header) + +if __name__ == '__main__': + libfile, deffile = parse_cmd() + if deffile is None: + deffile = sys.stdout + else: + deffile = open(deffile, 'w') + nm_cmd = DEFAULT_NM + [str(libfile)] + nm_output = getnm(nm_cmd, shell=False) + dlist, flist = parse_nm(nm_output) + output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/MLPY/Lib/site-packages/numpy/distutils/line_endings.py b/MLPY/Lib/site-packages/numpy/distutils/line_endings.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d0dadef6b442456ebbf394d67bfd5c3b3b1950 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/line_endings.py @@ -0,0 +1,77 @@ +""" Functions for converting from DOS to UNIX line endings + +""" +import os +import re +import sys + + +def dos2unix(file): + "Replace CRLF with LF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + with open(file, "rb") as fp: + data = fp.read() + if '\0' in data: + print(file, "Binary!") + return + + newdata = re.sub("\r\n", "\n", data) + if newdata != data: + print('dos2unix:', file) + with open(file, "wb") as f: + f.write(newdata) + return file + else: + print(file, 'ok') + +def dos2unix_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + file = dos2unix(full_path) + if file is not None: + modified_files.append(file) + +def dos2unix_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, dos2unix_one_dir, modified_files) + return modified_files +#---------------------------------- + +def unix2dos(file): + "Replace LF with CRLF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + with open(file, "rb") as fp: + data = fp.read() + if '\0' in data: + print(file, "Binary!") + return + newdata = re.sub("\r\n", "\n", data) + newdata = re.sub("\n", "\r\n", newdata) + if newdata != data: + print('unix2dos:', file) + with open(file, "wb") as f: + f.write(newdata) + return file + else: + print(file, 'ok') + +def unix2dos_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + unix2dos(full_path) + if file is not None: + modified_files.append(file) + +def unix2dos_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, unix2dos_one_dir, modified_files) + return modified_files + +if __name__ == "__main__": + dos2unix_dir(sys.argv[1]) diff --git a/MLPY/Lib/site-packages/numpy/distutils/log.py b/MLPY/Lib/site-packages/numpy/distutils/log.py new file mode 100644 index 0000000000000000000000000000000000000000..7e755bc660bf26d2c388958554602291b4d9a5a8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/log.py @@ -0,0 +1,89 @@ +# Colored log +import sys +from distutils.log import * # noqa: F403 +from distutils.log import Log as old_Log +from distutils.log import _global_log + +from numpy.distutils.misc_util import (red_text, default_text, cyan_text, + green_text, is_sequence, is_string) + + +def _fix_args(args,flag=1): + if is_string(args): + return args.replace('%', '%%') + if flag and is_sequence(args): + return tuple([_fix_args(a, flag=0) for a in args]) + return args + + +class Log(old_Log): + def _log(self, level, msg, args): + if level >= self.threshold: + if args: + msg = msg % _fix_args(args) + if 0: + if msg.startswith('copying ') and msg.find(' -> ') != -1: + return + if msg.startswith('byte-compiling '): + return + print(_global_color_map[level](msg)) + sys.stdout.flush() + + def good(self, msg, *args): + """ + If we log WARN messages, log this message as a 'nice' anti-warn + message. + + """ + if WARN >= self.threshold: + if args: + print(green_text(msg % _fix_args(args))) + else: + print(green_text(msg)) + sys.stdout.flush() + + +_global_log.__class__ = Log + +good = _global_log.good + +def set_threshold(level, force=False): + prev_level = _global_log.threshold + if prev_level > DEBUG or force: + # If we're running at DEBUG, don't change the threshold, as there's + # likely a good reason why we're running at this level. + _global_log.threshold = level + if level <= DEBUG: + info('set_threshold: setting threshold to DEBUG level,' + ' it can be changed only with force argument') + else: + info('set_threshold: not changing threshold from DEBUG level' + ' %s to %s' % (prev_level, level)) + return prev_level + +def get_threshold(): + return _global_log.threshold + +def set_verbosity(v, force=False): + prev_level = _global_log.threshold + if v < 0: + set_threshold(ERROR, force) + elif v == 0: + set_threshold(WARN, force) + elif v == 1: + set_threshold(INFO, force) + elif v >= 2: + set_threshold(DEBUG, force) + return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) + + +_global_color_map = { + DEBUG:cyan_text, + INFO:default_text, + WARN:red_text, + ERROR:red_text, + FATAL:red_text +} + +# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. +set_verbosity(0, force=True) diff --git a/MLPY/Lib/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c b/MLPY/Lib/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c new file mode 100644 index 0000000000000000000000000000000000000000..d21fce58b73ad52ff583bdf0bdd153c497063e80 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c @@ -0,0 +1,6 @@ +int _get_output_format(void) +{ + return 0; +} + +int _imp____lc_codepage = 0; diff --git a/MLPY/Lib/site-packages/numpy/distutils/mingw32ccompiler.py b/MLPY/Lib/site-packages/numpy/distutils/mingw32ccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..ed27b67b28be6594f8703b93d45aeb01c5ebc994 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/mingw32ccompiler.py @@ -0,0 +1,657 @@ +""" +Support code for building Python extensions on Windows. + + # NT stuff + # 1. Make sure libpython.a exists for gcc. If not, build it. + # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) + # 3. Force windows to use g77 + +""" +import os +import platform +import sys +import subprocess +import re +import textwrap + +# Overwrite certain distutils.ccompiler functions: +import numpy.distutils.ccompiler # noqa: F401 +from numpy.distutils import log +# NT stuff +# 1. Make sure libpython.a exists for gcc. If not, build it. +# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) +# --> this is done in numpy/distutils/ccompiler.py +# 3. Force windows to use g77 + +import distutils.cygwinccompiler +from distutils.version import StrictVersion +from distutils.unixccompiler import UnixCCompiler +from distutils.msvccompiler import get_build_version as get_build_msvc_version +from distutils.errors import UnknownFileError +from numpy.distutils.misc_util import (msvc_runtime_library, + msvc_runtime_version, + msvc_runtime_major, + get_build_architecture) + +def get_msvcr_replacement(): + """Replacement for outdated version of get_msvcr from cygwinccompiler""" + msvcr = msvc_runtime_library() + return [] if msvcr is None else [msvcr] + +# monkey-patch cygwinccompiler with our updated version from misc_util +# to avoid getting an exception raised on Python 3.5 +distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement + +# Useful to generate table of symbols from a dll +_START = re.compile(r'\[Ordinal/Name Pointer\] Table') +_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') + +# the same as cygwin plus some additional parameters +class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): + """ A modified MingW32 compiler compatible with an MSVC built Python. + + """ + + compiler_type = 'mingw32' + + def __init__ (self, + verbose=0, + dry_run=0, + force=0): + + distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, + dry_run, force) + + # we need to support 3.2 which doesn't match the standard + # get_versions methods regex + if self.gcc_version is None: + try: + out_string = subprocess.check_output(['gcc', '-dumpversion']) + except (OSError, CalledProcessError): + out_string = "" # ignore failures to match old behavior + result = re.search(r'(\d+\.\d+)', out_string) + if result: + self.gcc_version = StrictVersion(result.group(1)) + + # A real mingw32 doesn't need to specify a different entry point, + # but cygwin 2.91.57 in no-cygwin-mode needs it. + if self.gcc_version <= "2.91.57": + entry_point = '--entry _DllMain@12' + else: + entry_point = '' + + if self.linker_dll == 'dllwrap': + # Commented out '--driver-name g++' part that fixes weird + # g++.exe: g++: No such file or directory + # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). + # If the --driver-name part is required for some environment + # then make the inclusion of this part specific to that + # environment. + self.linker = 'dllwrap' # --driver-name g++' + elif self.linker_dll == 'gcc': + self.linker = 'g++' + + # **changes: eric jones 4/11/01 + # 1. Check for import library on Windows. Build if it doesn't exist. + + build_import_library() + + # Check for custom msvc runtime library on Windows. Build if it doesn't exist. + msvcr_success = build_msvcr_library() + msvcr_dbg_success = build_msvcr_library(debug=True) + if msvcr_success or msvcr_dbg_success: + # add preprocessor statement for using customized msvcr lib + self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') + + # Define the MSVC version as hint for MinGW + msvcr_version = msvc_runtime_version() + if msvcr_version: + self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) + + # MS_WIN64 should be defined when building for amd64 on windows, + # but python headers define it only for MS compilers, which has all + # kind of bad consequences, like using Py_ModuleInit4 instead of + # Py_ModuleInit4_64, etc... So we add it here + if get_build_architecture() == 'AMD64': + if self.gcc_version < "4.0": + self.set_executables( + compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0' + ' -Wall -Wstrict-prototypes', + linker_exe='gcc -g -mno-cygwin', + linker_so='gcc -g -mno-cygwin -shared') + else: + # gcc-4 series releases do not support -mno-cygwin option + self.set_executables( + compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', + linker_exe='gcc -g', + linker_so='gcc -g -shared') + else: + if self.gcc_version <= "3.0.0": + self.set_executables( + compiler='gcc -mno-cygwin -O2 -w', + compiler_so='gcc -mno-cygwin -mdll -O2 -w' + ' -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='%s -mno-cygwin -mdll -static %s' % + (self.linker, entry_point)) + elif self.gcc_version < "4.0": + self.set_executables( + compiler='gcc -mno-cygwin -O2 -Wall', + compiler_so='gcc -mno-cygwin -O2 -Wall' + ' -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='g++ -mno-cygwin -shared') + else: + # gcc-4 series releases do not support -mno-cygwin option + self.set_executables(compiler='gcc -O2 -Wall', + compiler_so='gcc -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ ', + linker_so='g++ -shared') + # added for python2.3 support + # we can't pass it through set_executables because pre 2.2 would fail + self.compiler_cxx = ['g++'] + + # Maybe we should also append -mthreads, but then the finished dlls + # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support + # thread-safe exception handling on `Mingw32') + + # no additional libraries needed + #self.dll_libraries=[] + return + + # __init__ () + + def link(self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + export_symbols = None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + # Include the appropriate MSVC runtime library if Python was built + # with MSVC >= 7.0 (MinGW standard is msvcrt) + runtime_library = msvc_runtime_library() + if runtime_library: + if not libraries: + libraries = [] + libraries.append(runtime_library) + args = (self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + None, #export_symbols, we do this in our def-file + debug, + extra_preargs, + extra_postargs, + build_temp, + target_lang) + if self.gcc_version < "3.0.0": + func = distutils.cygwinccompiler.CygwinCCompiler.link + else: + func = UnixCCompiler.link + func(*args[:func.__code__.co_argcount]) + return + + def object_filenames (self, + source_filenames, + strip_dir=0, + output_dir=''): + if output_dir is None: output_dir = '' + obj_names = [] + for src_name in source_filenames: + # use normcase to make sure '.rc' is really '.rc' and not '.RC' + (base, ext) = os.path.splitext (os.path.normcase(src_name)) + + # added these lines to strip off windows drive letters + # without it, .o files are placed next to .c files + # instead of the build directory + drv, base = os.path.splitdrive(base) + if drv: + base = base[1:] + + if ext not in (self.src_extensions + ['.rc', '.res']): + raise UnknownFileError( + "unknown file type '%s' (from '%s')" % \ + (ext, src_name)) + if strip_dir: + base = os.path.basename (base) + if ext == '.res' or ext == '.rc': + # these need to be compiled to object files + obj_names.append (os.path.join (output_dir, + base + ext + self.obj_extension)) + else: + obj_names.append (os.path.join (output_dir, + base + self.obj_extension)) + return obj_names + + # object_filenames () + + +def find_python_dll(): + # We can't do much here: + # - find it in the virtualenv (sys.prefix) + # - find it in python main dir (sys.base_prefix, if in a virtualenv) + # - sys.real_prefix is main dir for virtualenvs in Python 2.7 + # - in system32, + # - ortherwise (Sxs), I don't know how to get it. + stems = [sys.prefix] + if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: + stems.append(sys.real_prefix) + + sub_dirs = ['', 'lib', 'bin'] + # generate possible combinations of directory trees and sub-directories + lib_dirs = [] + for stem in stems: + for folder in sub_dirs: + lib_dirs.append(os.path.join(stem, folder)) + + # add system directory as well + if 'SYSTEMROOT' in os.environ: + lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) + + # search in the file system for possible candidates + major_version, minor_version = tuple(sys.version_info[:2]) + implementation = platform.python_implementation() + if implementation == 'CPython': + dllname = f'python{major_version}{minor_version}.dll' + elif implementation == 'PyPy': + dllname = f'libpypy{major_version}-c.dll' + else: + dllname = 'Unknown platform {implementation}' + print("Looking for %s" % dllname) + for folder in lib_dirs: + dll = os.path.join(folder, dllname) + if os.path.exists(dll): + return dll + + raise ValueError("%s not found in %s" % (dllname, lib_dirs)) + +def dump_table(dll): + st = subprocess.check_output(["objdump.exe", "-p", dll]) + return st.split(b'\n') + +def generate_def(dll, dfile): + """Given a dll file location, get all its exported symbols and dump them + into the given def file. + + The .def file will be overwritten""" + dump = dump_table(dll) + for i in range(len(dump)): + if _START.match(dump[i].decode()): + break + else: + raise ValueError("Symbol table not found") + + syms = [] + for j in range(i+1, len(dump)): + m = _TABLE.match(dump[j].decode()) + if m: + syms.append((int(m.group(1).strip()), m.group(2))) + else: + break + + if len(syms) == 0: + log.warn('No symbols found in %s' % dll) + + with open(dfile, 'w') as d: + d.write('LIBRARY %s\n' % os.path.basename(dll)) + d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') + d.write(';DATA PRELOAD SINGLE\n') + d.write('\nEXPORTS\n') + for s in syms: + #d.write('@%d %s\n' % (s[0], s[1])) + d.write('%s\n' % s[1]) + +def find_dll(dll_name): + + arch = {'AMD64' : 'amd64', + 'Intel' : 'x86'}[get_build_architecture()] + + def _find_dll_in_winsxs(dll_name): + # Walk through the WinSxS directory to find the dll. + winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), + 'winsxs') + if not os.path.exists(winsxs_path): + return None + for root, dirs, files in os.walk(winsxs_path): + if dll_name in files and arch in root: + return os.path.join(root, dll_name) + return None + + def _find_dll_in_path(dll_name): + # First, look in the Python directory, then scan PATH for + # the given dll name. + for path in [sys.prefix] + os.environ['PATH'].split(';'): + filepath = os.path.join(path, dll_name) + if os.path.exists(filepath): + return os.path.abspath(filepath) + + return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) + +def build_msvcr_library(debug=False): + if os.name != 'nt': + return False + + # If the version number is None, then we couldn't find the MSVC runtime at + # all, because we are running on a Python distribution which is customed + # compiled; trust that the compiler is the same as the one available to us + # now, and that it is capable of linking with the correct runtime without + # any extra options. + msvcr_ver = msvc_runtime_major() + if msvcr_ver is None: + log.debug('Skip building import library: ' + 'Runtime is not compiled with MSVC') + return False + + # Skip using a custom library for versions < MSVC 8.0 + if msvcr_ver < 80: + log.debug('Skip building msvcr library:' + ' custom functionality not present') + return False + + msvcr_name = msvc_runtime_library() + if debug: + msvcr_name += 'd' + + # Skip if custom library already exists + out_name = "lib%s.a" % msvcr_name + out_file = os.path.join(sys.prefix, 'libs', out_name) + if os.path.isfile(out_file): + log.debug('Skip building msvcr library: "%s" exists' % + (out_file,)) + return True + + # Find the msvcr dll + msvcr_dll_name = msvcr_name + '.dll' + dll_file = find_dll(msvcr_dll_name) + if not dll_file: + log.warn('Cannot build msvcr library: "%s" not found' % + msvcr_dll_name) + return False + + def_name = "lib%s.def" % msvcr_name + def_file = os.path.join(sys.prefix, 'libs', def_name) + + log.info('Building msvcr library: "%s" (from %s)' \ + % (out_file, dll_file)) + + # Generate a symbol definition file from the msvcr dll + generate_def(dll_file, def_file) + + # Create a custom mingw library for the given symbol definitions + cmd = ['dlltool', '-d', def_file, '-l', out_file] + retcode = subprocess.call(cmd) + + # Clean up symbol definitions + os.remove(def_file) + + return (not retcode) + +def build_import_library(): + if os.name != 'nt': + return + + arch = get_build_architecture() + if arch == 'AMD64': + return _build_import_library_amd64() + elif arch == 'Intel': + return _build_import_library_x86() + else: + raise ValueError("Unhandled arch %s" % arch) + +def _check_for_import_lib(): + """Check if an import library for the Python runtime already exists.""" + major_version, minor_version = tuple(sys.version_info[:2]) + + # patterns for the file name of the library itself + patterns = ['libpython%d%d.a', + 'libpython%d%d.dll.a', + 'libpython%d.%d.dll.a'] + + # directory trees that may contain the library + stems = [sys.prefix] + if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: + stems.append(sys.real_prefix) + + # possible subdirectories within those trees where it is placed + sub_dirs = ['libs', 'lib'] + + # generate a list of candidate locations + candidates = [] + for pat in patterns: + filename = pat % (major_version, minor_version) + for stem_dir in stems: + for folder in sub_dirs: + candidates.append(os.path.join(stem_dir, folder, filename)) + + # test the filesystem to see if we can find any of these + for fullname in candidates: + if os.path.isfile(fullname): + # already exists, in location given + return (True, fullname) + + # needs to be built, preferred location given first + return (False, candidates[0]) + +def _build_import_library_amd64(): + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + # get the runtime dll for which we are building import library + dll_file = find_python_dll() + log.info('Building import library (arch=AMD64): "%s" (from %s)' % + (out_file, dll_file)) + + # generate symbol list from this library + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + generate_def(dll_file, def_file) + + # generate import library from this symbol list + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.check_call(cmd) + +def _build_import_library_x86(): + """ Build the import libraries for Mingw32-gcc on Windows + """ + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) + lib_file = os.path.join(sys.prefix, 'libs', lib_name) + if not os.path.isfile(lib_file): + # didn't find library file in virtualenv, try base distribution, too, + # and use that instead if found there. for Python 2.7 venvs, the base + # directory is in attribute real_prefix instead of base_prefix. + if hasattr(sys, 'base_prefix'): + base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) + elif hasattr(sys, 'real_prefix'): + base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) + else: + base_lib = '' # os.path.isfile('') == False + + if os.path.isfile(base_lib): + lib_file = base_lib + else: + log.warn('Cannot build import library: "%s" not found', lib_file) + return + log.info('Building import library (ARCH=x86): "%s"', out_file) + + from numpy.distutils import lib2def + + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + nm_output = lib2def.getnm( + lib2def.DEFAULT_NM + [lib_file], shell=False) + dlist, flist = lib2def.parse_nm(nm_output) + with open(def_file, 'w') as fid: + lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) + + dll_name = find_python_dll () + + cmd = ["dlltool", + "--dllname", dll_name, + "--def", def_file, + "--output-lib", out_file] + status = subprocess.check_output(cmd) + if status: + log.warn('Failed to build import library for gcc. Linking will fail.') + return + +#===================================== +# Dealing with Visual Studio MANIFESTS +#===================================== + +# Functions to deal with visual studio manifests. Manifest are a mechanism to +# enforce strong DLL versioning on windows, and has nothing to do with +# distutils MANIFEST. manifests are XML files with version info, and used by +# the OS loader; they are necessary when linking against a DLL not in the +# system path; in particular, official python 2.6 binary is built against the +# MS runtime 9 (the one from VS 2008), which is not available on most windows +# systems; python 2.6 installer does install it in the Win SxS (Side by side) +# directory, but this requires the manifest for this to work. This is a big +# mess, thanks MS for a wonderful system. + +# XXX: ideally, we should use exactly the same version as used by python. I +# submitted a patch to get this version, but it was only included for python +# 2.6.1 and above. So for versions below, we use a "best guess". +_MSVCRVER_TO_FULLVER = {} +if sys.platform == 'win32': + try: + import msvcrt + # I took one version in my SxS directory: no idea if it is the good + # one, and we can't retrieve it from python + _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" + _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 + # on Windows XP: + _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" + # Python 3.7 uses 1415, but get_build_version returns 140 ?? + _MSVCRVER_TO_FULLVER['140'] = "14.15.26726.0" + if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): + major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) + _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION + del major, minor, rest + except ImportError: + # If we are here, means python was not built with MSVC. Not sure what + # to do in that case: manifest building will fail, but it should not be + # used in that case anyway + log.warn('Cannot import msvcrt: using manifest will not be possible') + +def msvc_manifest_xml(maj, min): + """Given a major and minor version of the MSVCR, returns the + corresponding XML file.""" + try: + fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] + except KeyError: + raise ValueError("Version %d,%d of MSVCRT not supported yet" % + (maj, min)) from None + # Don't be fooled, it looks like an XML, but it is not. In particular, it + # should not have any space before starting, and its size should be + # divisible by 4, most likely for alignment constraints when the xml is + # embedded in the binary... + # This template was copied directly from the python 2.6 binary (using + # strings.exe from mingw on python.exe). + template = textwrap.dedent("""\ + + + + + + + + + + + + + + """) + + return template % {'fullver': fullver, 'maj': maj, 'min': min} + +def manifest_rc(name, type='dll'): + """Return the rc file used to generate the res file which will be embedded + as manifest for given manifest file name, of given type ('dll' or + 'exe'). + + Parameters + ---------- + name : str + name of the manifest file to embed + type : str {'dll', 'exe'} + type of the binary which will embed the manifest + + """ + if type == 'dll': + rctype = 2 + elif type == 'exe': + rctype = 1 + else: + raise ValueError("Type %s not supported" % type) + + return """\ +#include "winuser.h" +%d RT_MANIFEST %s""" % (rctype, name) + +def check_embedded_msvcr_match_linked(msver): + """msver is the ms runtime version used for the MANIFEST.""" + # check msvcr major version are the same for linking and + # embedding + maj = msvc_runtime_major() + if maj: + if not maj == int(msver): + raise ValueError( + "Discrepancy between linked msvcr " \ + "(%d) and the one about to be embedded " \ + "(%d)" % (int(msver), maj)) + +def configtest_name(config): + base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) + return os.path.splitext(base)[0] + +def manifest_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + exext = config.compiler.exe_extension + return root + exext + ".manifest" + +def rc_name(config): + # Get configtest name (including suffix) + root = configtest_name(config) + return root + ".rc" + +def generate_manifest(config): + msver = get_build_msvc_version() + if msver is not None: + if msver >= 8: + check_embedded_msvcr_match_linked(msver) + ma = int(msver) + mi = int((msver - ma) * 10) + # Write the manifest file + manxml = msvc_manifest_xml(ma, mi) + man = open(manifest_name(config), "w") + config.temp_files.append(manifest_name(config)) + man.write(manxml) + man.close() diff --git a/MLPY/Lib/site-packages/numpy/distutils/misc_util.py b/MLPY/Lib/site-packages/numpy/distutils/misc_util.py new file mode 100644 index 0000000000000000000000000000000000000000..f96385ca8abf2ea55302d9aa5eda4a7c06133757 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/misc_util.py @@ -0,0 +1,2417 @@ +import os +import re +import sys +import copy +import glob +import atexit +import tempfile +import subprocess +import shutil +import multiprocessing +import textwrap +import importlib.util +from threading import local as tlocal + +import distutils +from distutils.errors import DistutilsError + +# stores temporary directory of each thread to only create one per thread +_tdata = tlocal() + +# store all created temporary directories so they can be deleted on exit +_tmpdirs = [] +def clean_up_temporary_directory(): + if _tmpdirs is not None: + for d in _tmpdirs: + try: + shutil.rmtree(d) + except OSError: + pass + +atexit.register(clean_up_temporary_directory) + +from numpy.compat import npy_load_module + +__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', + 'dict_append', 'appendpath', 'generate_config_py', + 'get_cmd', 'allpath', 'get_mathlibs', + 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', + 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', + 'has_f_sources', 'has_cxx_sources', 'filter_sources', + 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', + 'get_script_files', 'get_lib_source_files', 'get_data_files', + 'dot_join', 'get_frame', 'minrelpath', 'njoin', + 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', + 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info', + 'get_num_build_jobs'] + +class InstallableLib: + """ + Container to hold information on an installable library. + + Parameters + ---------- + name : str + Name of the installed library. + build_info : dict + Dictionary holding build information. + target_dir : str + Absolute path specifying where to install the library. + + See Also + -------- + Configuration.add_installed_library + + Notes + ----- + The three parameters are stored as attributes with the same names. + + """ + def __init__(self, name, build_info, target_dir): + self.name = name + self.build_info = build_info + self.target_dir = target_dir + + +def get_num_build_jobs(): + """ + Get number of parallel build jobs set by the --parallel command line + argument of setup.py + If the command did not receive a setting the environment variable + NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of + processors on the system, with a maximum of 8 (to prevent + overloading the system if there a lot of CPUs). + + Returns + ------- + out : int + number of parallel jobs that can be run + + """ + from numpy.distutils.core import get_distribution + try: + cpu_count = len(os.sched_getaffinity(0)) + except AttributeError: + cpu_count = multiprocessing.cpu_count() + cpu_count = min(cpu_count, 8) + envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) + dist = get_distribution() + # may be None during configuration + if dist is None: + return envjobs + + # any of these three may have the job set, take the largest + cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), + getattr(dist.get_command_obj('build_ext'), 'parallel', None), + getattr(dist.get_command_obj('build_clib'), 'parallel', None)) + if all(x is None for x in cmdattr): + return envjobs + else: + return max(x for x in cmdattr if x is not None) + +def quote_args(args): + # don't used _nt_quote_args as it does not check if + # args items already have quotes or not. + args = list(args) + for i in range(len(args)): + a = args[i] + if ' ' in a and a[0] not in '"\'': + args[i] = '"%s"' % (a) + return args + +def allpath(name): + "Convert a /-separated pathname to one using the OS's path separator." + splitted = name.split('/') + return os.path.join(*splitted) + +def rel_path(path, parent_path): + """Return path relative to parent_path.""" + # Use realpath to avoid issues with symlinked dirs (see gh-7707) + pd = os.path.realpath(os.path.abspath(parent_path)) + apath = os.path.realpath(os.path.abspath(path)) + if len(apath) < len(pd): + return path + if apath == pd: + return '' + if pd == apath[:len(pd)]: + assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) + path = apath[len(pd)+1:] + return path + +def get_path_from_frame(frame, parent_path=None): + """Return path of the module given a frame object from the call stack. + + Returned path is relative to parent_path when given, + otherwise it is absolute path. + """ + + # First, try to find if the file name is in the frame. + try: + caller_file = eval('__file__', frame.f_globals, frame.f_locals) + d = os.path.dirname(os.path.abspath(caller_file)) + except NameError: + # __file__ is not defined, so let's try __name__. We try this second + # because setuptools spoofs __name__ to be '__main__' even though + # sys.modules['__main__'] might be something else, like easy_install(1). + caller_name = eval('__name__', frame.f_globals, frame.f_locals) + __import__(caller_name) + mod = sys.modules[caller_name] + if hasattr(mod, '__file__'): + d = os.path.dirname(os.path.abspath(mod.__file__)) + else: + # we're probably running setup.py as execfile("setup.py") + # (likely we're building an egg) + d = os.path.abspath('.') + + if parent_path is not None: + d = rel_path(d, parent_path) + + return d or '.' + +def njoin(*path): + """Join two or more pathname components + + - convert a /-separated pathname to one using the OS's path separator. + - resolve `..` and `.` from path. + + Either passing n arguments as in njoin('a','b'), or a sequence + of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. + """ + paths = [] + for p in path: + if is_sequence(p): + # njoin(['a', 'b'], 'c') + paths.append(njoin(*p)) + else: + assert is_string(p) + paths.append(p) + path = paths + if not path: + # njoin() + joined = '' + else: + # njoin('a', 'b') + joined = os.path.join(*path) + if os.path.sep != '/': + joined = joined.replace('/', os.path.sep) + return minrelpath(joined) + +def get_mathlibs(path=None): + """Return the MATHLIB line from numpyconfig.h + """ + if path is not None: + config_file = os.path.join(path, '_numpyconfig.h') + else: + # Look for the file in each of the numpy include directories. + dirs = get_numpy_include_dirs() + for path in dirs: + fn = os.path.join(path, '_numpyconfig.h') + if os.path.exists(fn): + config_file = fn + break + else: + raise DistutilsError('_numpyconfig.h not found in numpy include ' + 'dirs %r' % (dirs,)) + + with open(config_file) as fid: + mathlibs = [] + s = '#define MATHLIB' + for line in fid: + if line.startswith(s): + value = line[len(s):].strip() + if value: + mathlibs.extend(value.split(',')) + return mathlibs + +def minrelpath(path): + """Resolve `..` and '.' from path. + """ + if not is_string(path): + return path + if '.' not in path: + return path + l = path.split(os.sep) + while l: + try: + i = l.index('.', 1) + except ValueError: + break + del l[i] + j = 1 + while l: + try: + i = l.index('..', j) + except ValueError: + break + if l[i-1]=='..': + j += 1 + else: + del l[i], l[i-1] + j = 1 + if not l: + return '' + return os.sep.join(l) + +def sorted_glob(fileglob): + """sorts output of python glob for https://bugs.python.org/issue30461 + to allow extensions to have reproducible build results""" + return sorted(glob.glob(fileglob)) + +def _fix_paths(paths, local_path, include_non_existing): + assert is_sequence(paths), repr(type(paths)) + new_paths = [] + assert not is_string(paths), repr(paths) + for n in paths: + if is_string(n): + if '*' in n or '?' in n: + p = sorted_glob(n) + p2 = sorted_glob(njoin(local_path, n)) + if p2: + new_paths.extend(p2) + elif p: + new_paths.extend(p) + else: + if include_non_existing: + new_paths.append(n) + print('could not resolve pattern in %r: %r' % + (local_path, n)) + else: + n2 = njoin(local_path, n) + if os.path.exists(n2): + new_paths.append(n2) + else: + if os.path.exists(n): + new_paths.append(n) + elif include_non_existing: + new_paths.append(n) + if not os.path.exists(n): + print('non-existing path in %r: %r' % + (local_path, n)) + + elif is_sequence(n): + new_paths.extend(_fix_paths(n, local_path, include_non_existing)) + else: + new_paths.append(n) + return [minrelpath(p) for p in new_paths] + +def gpaths(paths, local_path='', include_non_existing=True): + """Apply glob to paths and prepend local_path if needed. + """ + if is_string(paths): + paths = (paths,) + return _fix_paths(paths, local_path, include_non_existing) + +def make_temp_file(suffix='', prefix='', text=True): + if not hasattr(_tdata, 'tempdir'): + _tdata.tempdir = tempfile.mkdtemp() + _tmpdirs.append(_tdata.tempdir) + fid, name = tempfile.mkstemp(suffix=suffix, + prefix=prefix, + dir=_tdata.tempdir, + text=text) + fo = os.fdopen(fid, 'w') + return fo, name + +# Hooks for colored terminal output. +# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle +def terminal_has_colors(): + if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: + # Avoid importing curses that causes illegal operation + # with a message: + # PYTHON2 caused an invalid page fault in + # module CYGNURSES7.DLL as 015f:18bbfc28 + # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] + # ssh to Win32 machine from debian + # curses.version is 2.2 + # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) + return 0 + if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): + try: + import curses + curses.setupterm() + if (curses.tigetnum("colors") >= 0 + and curses.tigetnum("pairs") >= 0 + and ((curses.tigetstr("setf") is not None + and curses.tigetstr("setb") is not None) + or (curses.tigetstr("setaf") is not None + and curses.tigetstr("setab") is not None) + or curses.tigetstr("scp") is not None)): + return 1 + except Exception: + pass + return 0 + +if terminal_has_colors(): + _colour_codes = dict(black=0, red=1, green=2, yellow=3, + blue=4, magenta=5, cyan=6, white=7, default=9) + def colour_text(s, fg=None, bg=None, bold=False): + seq = [] + if bold: + seq.append('1') + if fg: + fgcode = 30 + _colour_codes.get(fg.lower(), 0) + seq.append(str(fgcode)) + if bg: + bgcode = 40 + _colour_codes.get(fg.lower(), 7) + seq.append(str(bgcode)) + if seq: + return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) + else: + return s +else: + def colour_text(s, fg=None, bg=None): + return s + +def default_text(s): + return colour_text(s, 'default') +def red_text(s): + return colour_text(s, 'red') +def green_text(s): + return colour_text(s, 'green') +def yellow_text(s): + return colour_text(s, 'yellow') +def cyan_text(s): + return colour_text(s, 'cyan') +def blue_text(s): + return colour_text(s, 'blue') + +######################### + +def cyg2win32(path): + if sys.platform=='cygwin' and path.startswith('/cygdrive'): + path = path[10] + ':' + os.path.normcase(path[11:]) + return path + +def mingw32(): + """Return true when using mingw32 environment. + """ + if sys.platform=='win32': + if os.environ.get('OSTYPE', '')=='msys': + return True + if os.environ.get('MSYSTEM', '')=='MINGW32': + return True + return False + +def msvc_runtime_version(): + "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" + msc_pos = sys.version.find('MSC v.') + if msc_pos != -1: + msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) + else: + msc_ver = None + return msc_ver + +def msvc_runtime_library(): + "Return name of MSVC runtime library if Python was built with MSVC >= 7" + ver = msvc_runtime_major () + if ver: + if ver < 140: + return "msvcr%i" % ver + else: + return "vcruntime%i" % ver + else: + return None + +def msvc_runtime_major(): + "Return major version of MSVC runtime coded like get_build_msvc_version" + major = {1300: 70, # MSVC 7.0 + 1310: 71, # MSVC 7.1 + 1400: 80, # MSVC 8 + 1500: 90, # MSVC 9 (aka 2008) + 1600: 100, # MSVC 10 (aka 2010) + 1900: 140, # MSVC 14 (aka 2015) + }.get(msvc_runtime_version(), None) + return major + +######################### + +#XXX need support for .C that is also C++ +cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match +f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match +f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match +def _get_f90_modules(source): + """Return a list of Fortran f90 module names that + given source file defines. + """ + if not f90_ext_match(source): + return [] + modules = [] + with open(source, 'r') as f: + for line in f: + m = f90_module_name_match(line) + if m: + name = m.group('name') + modules.append(name) + # break # XXX can we assume that there is one module per file? + return modules + +def is_string(s): + return isinstance(s, str) + +def all_strings(lst): + """Return True if all items in lst are string objects. """ + for item in lst: + if not is_string(item): + return False + return True + +def is_sequence(seq): + if is_string(seq): + return False + try: + len(seq) + except Exception: + return False + return True + +def is_glob_pattern(s): + return is_string(s) and ('*' in s or '?' in s) + +def as_list(seq): + if is_sequence(seq): + return list(seq) + else: + return [seq] + +def get_language(sources): + # not used in numpy/scipy packages, use build_ext.detect_language instead + """Determine language value (c,f77,f90) from sources """ + language = None + for source in sources: + if isinstance(source, str): + if f90_ext_match(source): + language = 'f90' + break + elif fortran_ext_match(source): + language = 'f77' + return language + +def has_f_sources(sources): + """Return True if sources contains Fortran files """ + for source in sources: + if fortran_ext_match(source): + return True + return False + +def has_cxx_sources(sources): + """Return True if sources contains C++ files """ + for source in sources: + if cxx_ext_match(source): + return True + return False + +def filter_sources(sources): + """Return four lists of filenames containing + C, C++, Fortran, and Fortran 90 module sources, + respectively. + """ + c_sources = [] + cxx_sources = [] + f_sources = [] + fmodule_sources = [] + for source in sources: + if fortran_ext_match(source): + modules = _get_f90_modules(source) + if modules: + fmodule_sources.append(source) + else: + f_sources.append(source) + elif cxx_ext_match(source): + cxx_sources.append(source) + else: + c_sources.append(source) + return c_sources, cxx_sources, f_sources, fmodule_sources + + +def _get_headers(directory_list): + # get *.h files from list of directories + headers = [] + for d in directory_list: + head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? + headers.extend(head) + return headers + +def _get_directories(list_of_sources): + # get unique directories from list of sources. + direcs = [] + for f in list_of_sources: + d = os.path.split(f) + if d[0] != '' and not d[0] in direcs: + direcs.append(d[0]) + return direcs + +def _commandline_dep_string(cc_args, extra_postargs, pp_opts): + """ + Return commandline representation used to determine if a file needs + to be recompiled + """ + cmdline = 'commandline: ' + cmdline += ' '.join(cc_args) + cmdline += ' '.join(extra_postargs) + cmdline += ' '.join(pp_opts) + '\n' + return cmdline + + +def get_dependencies(sources): + #XXX scan sources for include statements + return _get_headers(_get_directories(sources)) + +def is_local_src_dir(directory): + """Return true if directory is local directory. + """ + if not is_string(directory): + return False + abs_dir = os.path.abspath(directory) + c = os.path.commonprefix([os.getcwd(), abs_dir]) + new_dir = abs_dir[len(c):].split(os.sep) + if new_dir and not new_dir[0]: + new_dir = new_dir[1:] + if new_dir and new_dir[0]=='build': + return False + new_dir = os.sep.join(new_dir) + return os.path.isdir(new_dir) + +def general_source_files(top_path): + pruned_directories = {'CVS':1, '.svn':1, 'build':1} + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for f in filenames: + if not prune_file_pat.search(f): + yield os.path.join(dirpath, f) + +def general_source_directories_files(top_path): + """Return a directory name relative to top_path and + files contained. + """ + pruned_directories = ['CVS', '.svn', 'build'] + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for d in dirnames: + dpath = os.path.join(dirpath, d) + rpath = rel_path(dpath, top_path) + files = [] + for f in os.listdir(dpath): + fn = os.path.join(dpath, f) + if os.path.isfile(fn) and not prune_file_pat.search(fn): + files.append(fn) + yield rpath, files + dpath = top_path + rpath = rel_path(dpath, top_path) + filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ + if not prune_file_pat.search(f)] + files = [f for f in filenames if os.path.isfile(f)] + yield rpath, files + + +def get_ext_source_files(ext): + # Get sources and any include files in the same directory. + filenames = [] + sources = [_m for _m in ext.sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + for d in ext.depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_script_files(scripts): + scripts = [_m for _m in scripts if is_string(_m)] + return scripts + +def get_lib_source_files(lib): + filenames = [] + sources = lib[1].get('sources', []) + sources = [_m for _m in sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + depends = lib[1].get('depends', []) + for d in depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_shared_lib_extension(is_python_ext=False): + """Return the correct file extension for shared libraries. + + Parameters + ---------- + is_python_ext : bool, optional + Whether the shared library is a Python extension. Default is False. + + Returns + ------- + so_ext : str + The shared library extension. + + Notes + ----- + For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, + and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on + POSIX systems according to PEP 3149. For Python 3.2 this is implemented on + Linux, but not on OS X. + + """ + confvars = distutils.sysconfig.get_config_vars() + # SO is deprecated in 3.3.1, use EXT_SUFFIX instead + so_ext = confvars.get('EXT_SUFFIX', None) + if so_ext is None: + so_ext = confvars.get('SO', '') + + if not is_python_ext: + # hardcode known values, config vars (including SHLIB_SUFFIX) are + # unreliable (see #3182) + # darwin, windows and debug linux are wrong in 3.3.1 and older + if (sys.platform.startswith('linux') or + sys.platform.startswith('gnukfreebsd')): + so_ext = '.so' + elif sys.platform.startswith('darwin'): + so_ext = '.dylib' + elif sys.platform.startswith('win'): + so_ext = '.dll' + else: + # fall back to config vars for unknown platforms + # fix long extension for Python >=3.2, see PEP 3149. + if 'SOABI' in confvars: + # Does nothing unless SOABI config var exists + so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) + + return so_ext + +def get_data_files(data): + if is_string(data): + return [data] + sources = data[1] + filenames = [] + for s in sources: + if hasattr(s, '__call__'): + continue + if is_local_src_dir(s): + filenames.extend(list(general_source_files(s))) + elif is_string(s): + if os.path.isfile(s): + filenames.append(s) + else: + print('Not existing data file:', s) + else: + raise TypeError(repr(s)) + return filenames + +def dot_join(*args): + return '.'.join([a for a in args if a]) + +def get_frame(level=0): + """Return frame object from call stack with given level. + """ + try: + return sys._getframe(level+1) + except AttributeError: + frame = sys.exc_info()[2].tb_frame + for _ in range(level+1): + frame = frame.f_back + return frame + + +###################### + +class Configuration: + + _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', + 'libraries', 'headers', 'scripts', 'py_modules', + 'installed_libraries', 'define_macros'] + _dict_keys = ['package_dir', 'installed_pkg_config'] + _extra_keys = ['name', 'version'] + + numpy_include_dirs = [] + + def __init__(self, + package_name=None, + parent_name=None, + top_path=None, + package_path=None, + caller_level=1, + setup_name='setup.py', + **attrs): + """Construct configuration instance of a package. + + package_name -- name of the package + Ex.: 'distutils' + parent_name -- name of the parent package + Ex.: 'numpy' + top_path -- directory of the toplevel package + Ex.: the directory where the numpy package source sits + package_path -- directory of package. Will be computed by magic from the + directory of the caller module if not specified + Ex.: the directory where numpy.distutils is + caller_level -- frame level to caller namespace, internal parameter. + """ + self.name = dot_join(parent_name, package_name) + self.version = None + + caller_frame = get_frame(caller_level) + self.local_path = get_path_from_frame(caller_frame, top_path) + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + if top_path is None: + top_path = self.local_path + self.local_path = '' + if package_path is None: + package_path = self.local_path + elif os.path.isdir(njoin(self.local_path, package_path)): + package_path = njoin(self.local_path, package_path) + if not os.path.isdir(package_path or '.'): + raise ValueError("%r is not a directory" % (package_path,)) + self.top_path = top_path + self.package_path = package_path + # this is the relative path in the installed package + self.path_in_package = os.path.join(*self.name.split('.')) + + self.list_keys = self._list_keys[:] + self.dict_keys = self._dict_keys[:] + + for n in self.list_keys: + v = copy.copy(attrs.get(n, [])) + setattr(self, n, as_list(v)) + + for n in self.dict_keys: + v = copy.copy(attrs.get(n, {})) + setattr(self, n, v) + + known_keys = self.list_keys + self.dict_keys + self.extra_keys = self._extra_keys[:] + for n in attrs.keys(): + if n in known_keys: + continue + a = attrs[n] + setattr(self, n, a) + if isinstance(a, list): + self.list_keys.append(n) + elif isinstance(a, dict): + self.dict_keys.append(n) + else: + self.extra_keys.append(n) + + if os.path.exists(njoin(package_path, '__init__.py')): + self.packages.append(self.name) + self.package_dir[self.name] = package_path + + self.options = dict( + ignore_setup_xxx_py = False, + assume_default_configuration = False, + delegate_options_to_subpackages = False, + quiet = False, + ) + + caller_instance = None + for i in range(1, 3): + try: + f = get_frame(i) + except ValueError: + break + try: + caller_instance = eval('self', f.f_globals, f.f_locals) + break + except NameError: + pass + if isinstance(caller_instance, self.__class__): + if caller_instance.options['delegate_options_to_subpackages']: + self.set_options(**caller_instance.options) + + self.setup_name = setup_name + + def todict(self): + """ + Return a dictionary compatible with the keyword arguments of distutils + setup function. + + Examples + -------- + >>> setup(**config.todict()) #doctest: +SKIP + """ + + self._optimize_data_files() + d = {} + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for n in known_keys: + a = getattr(self, n) + if a: + d[n] = a + return d + + def info(self, message): + if not self.options['quiet']: + print(message) + + def warn(self, message): + sys.stderr.write('Warning: %s\n' % (message,)) + + def set_options(self, **options): + """ + Configure Configuration instance. + + The following options are available: + - ignore_setup_xxx_py + - assume_default_configuration + - delegate_options_to_subpackages + - quiet + + """ + for key, value in options.items(): + if key in self.options: + self.options[key] = value + else: + raise ValueError('Unknown option: '+key) + + def get_distribution(self): + """Return the distutils distribution object for self.""" + from numpy.distutils.core import get_distribution + return get_distribution() + + def _wildcard_get_subpackage(self, subpackage_name, + parent_name, + caller_level = 1): + l = subpackage_name.split('.') + subpackage_path = njoin([self.local_path]+l) + dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] + config_list = [] + for d in dirs: + if not os.path.isfile(njoin(d, '__init__.py')): + continue + if 'build' in d.split(os.sep): + continue + n = '.'.join(d.split(os.sep)[-len(l):]) + c = self.get_subpackage(n, + parent_name = parent_name, + caller_level = caller_level+1) + config_list.extend(c) + return config_list + + def _get_configuration_from_setup_py(self, setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = 1): + # In case setup_py imports local modules: + sys.path.insert(0, os.path.dirname(setup_py)) + try: + setup_name = os.path.splitext(os.path.basename(setup_py))[0] + n = dot_join(self.name, subpackage_name, setup_name) + setup_module = npy_load_module('_'.join(n.split('.')), + setup_py, + ('.py', 'U', 1)) + if not hasattr(setup_module, 'configuration'): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s does not define configuration())'\ + % (setup_module)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level + 1) + else: + pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) + args = (pn,) + if setup_module.configuration.__code__.co_argcount > 1: + args = args + (self.top_path,) + config = setup_module.configuration(*args) + if config.name!=dot_join(parent_name, subpackage_name): + self.warn('Subpackage %r configuration returned as %r' % \ + (dot_join(parent_name, subpackage_name), config.name)) + finally: + del sys.path[0] + return config + + def get_subpackage(self,subpackage_name, + subpackage_path=None, + parent_name=None, + caller_level = 1): + """Return list of subpackage configurations. + + Parameters + ---------- + subpackage_name : str or None + Name of the subpackage to get the configuration. '*' in + subpackage_name is handled as a wildcard. + subpackage_path : str + If None, then the path is assumed to be the local path plus the + subpackage_name. If a setup.py file is not found in the + subpackage_path, then a default configuration is used. + parent_name : str + Parent name. + """ + if subpackage_name is None: + if subpackage_path is None: + raise ValueError( + "either subpackage_name or subpackage_path must be specified") + subpackage_name = os.path.basename(subpackage_path) + + # handle wildcards + l = subpackage_name.split('.') + if subpackage_path is None and '*' in subpackage_name: + return self._wildcard_get_subpackage(subpackage_name, + parent_name, + caller_level = caller_level+1) + assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) + if subpackage_path is None: + subpackage_path = njoin([self.local_path] + l) + else: + subpackage_path = njoin([subpackage_path] + l[:-1]) + subpackage_path = self.paths([subpackage_path])[0] + setup_py = njoin(subpackage_path, self.setup_name) + if not self.options['ignore_setup_xxx_py']: + if not os.path.isfile(setup_py): + setup_py = njoin(subpackage_path, + 'setup_%s.py' % (subpackage_name)) + if not os.path.isfile(setup_py): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s/{setup_%s,setup}.py was not found)' \ + % (os.path.dirname(setup_py), subpackage_name)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level+1) + else: + config = self._get_configuration_from_setup_py( + setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = caller_level + 1) + if config: + return [config] + else: + return [] + + def add_subpackage(self,subpackage_name, + subpackage_path=None, + standalone = False): + """Add a sub-package to the current Configuration instance. + + This is useful in a setup.py script for adding sub-packages to a + package. + + Parameters + ---------- + subpackage_name : str + name of the subpackage + subpackage_path : str + if given, the subpackage path such as the subpackage is in + subpackage_path / subpackage_name. If None,the subpackage is + assumed to be located in the local path / subpackage_name. + standalone : bool + """ + + if standalone: + parent_name = None + else: + parent_name = self.name + config_list = self.get_subpackage(subpackage_name, subpackage_path, + parent_name = parent_name, + caller_level = 2) + if not config_list: + self.warn('No configuration returned, assuming unavailable.') + for config in config_list: + d = config + if isinstance(config, Configuration): + d = config.todict() + assert isinstance(d, dict), repr(type(d)) + + self.info('Appending %s configuration to %s' \ + % (d.get('name'), self.name)) + self.dict_append(**d) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a subpackage '+ subpackage_name) + + def add_data_dir(self, data_path): + """Recursively add files under data_path to data_files list. + + Recursively add files under data_path to the list of data_files to be + installed (and distributed). The data_path can be either a relative + path-name, or an absolute path-name, or a 2-tuple where the first + argument shows where in the install directory the data directory + should be installed to. + + Parameters + ---------- + data_path : seq or str + Argument can be either + + * 2-sequence (, ) + * path to data directory where python datadir suffix defaults + to package dir. + + Notes + ----- + Rules for installation paths:: + + foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar + (gun, foo/bar) -> parent/gun + foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b + (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun + (gun/*, foo/*) -> parent/gun/a, parent/gun/b + /foo/bar -> (bar, /foo/bar) -> parent/bar + (gun, /foo/bar) -> parent/gun + (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar + + Examples + -------- + For example suppose the source directory contains fun/foo.dat and + fun/bar/car.dat: + + >>> self.add_data_dir('fun') #doctest: +SKIP + >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP + >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP + + Will install data-files to the locations:: + + / + fun/ + foo.dat + bar/ + car.dat + sun/ + foo.dat + bar/ + car.dat + gun/ + foo.dat + car.dat + + """ + if is_sequence(data_path): + d, data_path = data_path + else: + d = None + if is_sequence(data_path): + [self.add_data_dir((d, p)) for p in data_path] + return + if not is_string(data_path): + raise TypeError("not a string: %r" % (data_path,)) + if d is None: + if os.path.isabs(data_path): + return self.add_data_dir((os.path.basename(data_path), data_path)) + return self.add_data_dir((data_path, data_path)) + paths = self.paths(data_path, include_non_existing=False) + if is_glob_pattern(data_path): + if is_glob_pattern(d): + pattern_list = allpath(d).split(os.sep) + pattern_list.reverse() + # /a/*//b/ -> /a/*/b + rl = list(range(len(pattern_list)-1)); rl.reverse() + for i in rl: + if not pattern_list[i]: + del pattern_list[i] + # + for path in paths: + if not os.path.isdir(path): + print('Not a directory, skipping', path) + continue + rpath = rel_path(path, self.local_path) + path_list = rpath.split(os.sep) + path_list.reverse() + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + if i>=len(path_list): + raise ValueError('cannot fill pattern %r with %r' \ + % (d, path)) + target_list.append(path_list[i]) + else: + assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) + target_list.append(s) + i += 1 + if path_list[i:]: + self.warn('mismatch of pattern_list=%s and path_list=%s'\ + % (pattern_list, path_list)) + target_list.reverse() + self.add_data_dir((os.sep.join(target_list), path)) + else: + for path in paths: + self.add_data_dir((d, path)) + return + assert not is_glob_pattern(d), repr(d) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + for path in paths: + for d1, f in list(general_source_directories_files(path)): + target_path = os.path.join(self.path_in_package, d, d1) + data_files.append((target_path, f)) + + def _optimize_data_files(self): + data_dict = {} + for p, files in self.data_files: + if p not in data_dict: + data_dict[p] = set() + for f in files: + data_dict[p].add(f) + self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] + + def add_data_files(self,*files): + """Add data files to configuration data_files. + + Parameters + ---------- + files : sequence + Argument(s) can be either + + * 2-sequence (,) + * paths to data files where python datadir prefix defaults + to package dir. + + Notes + ----- + The form of each element of the files sequence is very flexible + allowing many combinations of where to get the files from the package + and where they should ultimately be installed on the system. The most + basic usage is for an element of the files argument sequence to be a + simple filename. This will cause that file from the local path to be + installed to the installation path of the self.name package (package + path). The file argument can also be a relative path in which case the + entire relative path will be installed into the package directory. + Finally, the file can be an absolute path name in which case the file + will be found at the absolute path name but installed to the package + path. + + This basic behavior can be augmented by passing a 2-tuple in as the + file argument. The first element of the tuple should specify the + relative path (under the package install directory) where the + remaining sequence of files should be installed to (it has nothing to + do with the file-names in the source distribution). The second element + of the tuple is the sequence of files that should be installed. The + files in this sequence can be filenames, relative paths, or absolute + paths. For absolute paths the file will be installed in the top-level + package installation directory (regardless of the first argument). + Filenames and relative path names will be installed in the package + install directory under the path name given as the first element of + the tuple. + + Rules for installation paths: + + #. file.txt -> (., file.txt)-> parent/file.txt + #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt + #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt + #. ``*``.txt -> parent/a.txt, parent/b.txt + #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt + #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt + #. (sun, file.txt) -> parent/sun/file.txt + #. (sun, bar/file.txt) -> parent/sun/file.txt + #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt + #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt + + An additional feature is that the path to a data-file can actually be + a function that takes no arguments and returns the actual path(s) to + the data-files. This is useful when the data files are generated while + building the package. + + Examples + -------- + Add files to the list of data_files to be included with the package. + + >>> self.add_data_files('foo.dat', + ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), + ... 'bar/cat.dat', + ... '/full/path/to/can.dat') #doctest: +SKIP + + will install these data files to:: + + / + foo.dat + fun/ + gun.dat + nun/ + pun.dat + sun.dat + bar/ + car.dat + can.dat + + where is the package (or sub-package) + directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage') or + '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). + """ + + if len(files)>1: + for f in files: + self.add_data_files(f) + return + assert len(files)==1 + if is_sequence(files[0]): + d, files = files[0] + else: + d = None + if is_string(files): + filepat = files + elif is_sequence(files): + if len(files)==1: + filepat = files[0] + else: + for f in files: + self.add_data_files((d, f)) + return + else: + raise TypeError(repr(type(files))) + + if d is None: + if hasattr(filepat, '__call__'): + d = '' + elif os.path.isabs(filepat): + d = '' + else: + d = os.path.dirname(filepat) + self.add_data_files((d, files)) + return + + paths = self.paths(filepat, include_non_existing=False) + if is_glob_pattern(filepat): + if is_glob_pattern(d): + pattern_list = d.split(os.sep) + pattern_list.reverse() + for path in paths: + path_list = path.split(os.sep) + path_list.reverse() + path_list.pop() # filename + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + target_list.append(path_list[i]) + i += 1 + else: + target_list.append(s) + target_list.reverse() + self.add_data_files((os.sep.join(target_list), path)) + else: + self.add_data_files((d, paths)) + return + assert not is_glob_pattern(d), repr((d, filepat)) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + data_files.append((os.path.join(self.path_in_package, d), paths)) + + ### XXX Implement add_py_modules + + def add_define_macros(self, macros): + """Add define macros to configuration + + Add the given sequence of macro name and value duples to the beginning + of the define_macros list This list will be visible to all extension + modules of the current package. + """ + dist = self.get_distribution() + if dist is not None: + if not hasattr(dist, 'define_macros'): + dist.define_macros = [] + dist.define_macros.extend(macros) + else: + self.define_macros.extend(macros) + + + def add_include_dirs(self,*paths): + """Add paths to configuration include directories. + + Add the given sequence of paths to the beginning of the include_dirs + list. This list will be visible to all extension modules of the + current package. + """ + include_dirs = self.paths(paths) + dist = self.get_distribution() + if dist is not None: + if dist.include_dirs is None: + dist.include_dirs = [] + dist.include_dirs.extend(include_dirs) + else: + self.include_dirs.extend(include_dirs) + + def add_headers(self,*files): + """Add installable headers to configuration. + + Add the given sequence of files to the beginning of the headers list. + By default, headers will be installed under // directory. If an item of files + is a tuple, then its first argument specifies the actual installation + location relative to the path. + + Parameters + ---------- + files : str or seq + Argument(s) can be either: + + * 2-sequence (,) + * path(s) to header file(s) where python includedir suffix will + default to package name. + """ + headers = [] + for path in files: + if is_string(path): + [headers.append((self.name, p)) for p in self.paths(path)] + else: + if not isinstance(path, (tuple, list)) or len(path) != 2: + raise TypeError(repr(path)) + [headers.append((path[0], p)) for p in self.paths(path[1])] + dist = self.get_distribution() + if dist is not None: + if dist.headers is None: + dist.headers = [] + dist.headers.extend(headers) + else: + self.headers.extend(headers) + + def paths(self,*paths,**kws): + """Apply glob to paths and prepend local_path if needed. + + Applies glob.glob(...) to each path in the sequence (if needed) and + pre-pends the local_path if needed. Because this is called on all + source lists, this allows wildcard characters to be specified in lists + of sources for extension modules and libraries and scripts and allows + path-names be relative to the source directory. + + """ + include_non_existing = kws.get('include_non_existing', True) + return gpaths(paths, + local_path = self.local_path, + include_non_existing=include_non_existing) + + def _fix_paths_dict(self, kw): + for k in kw.keys(): + v = kw[k] + if k in ['sources', 'depends', 'include_dirs', 'library_dirs', + 'module_dirs', 'extra_objects']: + new_v = self.paths(v) + kw[k] = new_v + + def add_extension(self,name,sources,**kw): + """Add extension to configuration. + + Create and add an Extension instance to the ext_modules list. This + method also takes the following optional keyword arguments that are + passed on to the Extension constructor. + + Parameters + ---------- + name : str + name of the extension + sources : seq + list of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + include_dirs : + define_macros : + undef_macros : + library_dirs : + libraries : + runtime_library_dirs : + extra_objects : + extra_compile_args : + extra_link_args : + extra_f77_compile_args : + extra_f90_compile_args : + export_symbols : + swig_opts : + depends : + The depends list contains paths to files or directories that the + sources of the extension module depend on. If any path in the + depends list is newer than the extension module, then the module + will be rebuilt. + language : + f2py_options : + module_dirs : + extra_info : dict or list + dict or list of dict of keywords to be appended to keywords. + + Notes + ----- + The self.paths(...) method is applied to all lists that may contain + paths. + """ + ext_args = copy.copy(kw) + ext_args['name'] = dot_join(self.name, name) + ext_args['sources'] = sources + + if 'extra_info' in ext_args: + extra_info = ext_args['extra_info'] + del ext_args['extra_info'] + if isinstance(extra_info, dict): + extra_info = [extra_info] + for info in extra_info: + assert isinstance(info, dict), repr(info) + dict_append(ext_args,**info) + + self._fix_paths_dict(ext_args) + + # Resolve out-of-tree dependencies + libraries = ext_args.get('libraries', []) + libnames = [] + ext_args['libraries'] = [] + for libname in libraries: + if isinstance(libname, tuple): + self._fix_paths_dict(libname[1]) + + # Handle library names of the form libname@relative/path/to/library + if '@' in libname: + lname, lpath = libname.split('@', 1) + lpath = os.path.abspath(njoin(self.local_path, lpath)) + if os.path.isdir(lpath): + c = self.get_subpackage(None, lpath, + caller_level = 2) + if isinstance(c, Configuration): + c = c.todict() + for l in [l[0] for l in c.get('libraries', [])]: + llname = l.split('__OF__', 1)[0] + if llname == lname: + c.pop('name', None) + dict_append(ext_args,**c) + break + continue + libnames.append(libname) + + ext_args['libraries'] = libnames + ext_args['libraries'] + ext_args['define_macros'] = \ + self.define_macros + ext_args.get('define_macros', []) + + from numpy.distutils.core import Extension + ext = Extension(**ext_args) + self.ext_modules.append(ext) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add an extension '+name) + return ext + + def add_library(self,name,sources,**build_info): + """ + Add library to configuration. + + Parameters + ---------- + name : str + Name of the extension. + sources : sequence + List of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + """ + self._add_library(name, sources, None, build_info) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a library '+ name) + + def _add_library(self, name, sources, install_dir, build_info): + """Common implementation for add_library and add_installed_library. Do + not use directly""" + build_info = copy.copy(build_info) + build_info['sources'] = sources + + # Sometimes, depends is not set up to an empty list by default, and if + # depends is not given to add_library, distutils barfs (#1134) + if not 'depends' in build_info: + build_info['depends'] = [] + + self._fix_paths_dict(build_info) + + # Add to libraries list so that it is build with build_clib + self.libraries.append((name, build_info)) + + def add_installed_library(self, name, sources, install_dir, build_info=None): + """ + Similar to add_library, but the specified library is installed. + + Most C libraries used with `distutils` are only used to build python + extensions, but libraries built through this method will be installed + so that they can be reused by third-party packages. + + Parameters + ---------- + name : str + Name of the installed library. + sources : sequence + List of the library's source files. See `add_library` for details. + install_dir : str + Path to install the library, relative to the current sub-package. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + Returns + ------- + None + + See Also + -------- + add_library, add_npy_pkg_config, get_info + + Notes + ----- + The best way to encode the options required to link against the specified + C libraries is to use a "libname.ini" file, and use `get_info` to + retrieve the required options (see `add_npy_pkg_config` for more + information). + + """ + if not build_info: + build_info = {} + + install_dir = os.path.join(self.package_path, install_dir) + self._add_library(name, sources, install_dir, build_info) + self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) + + def add_npy_pkg_config(self, template, install_dir, subst_dict=None): + """ + Generate and install a npy-pkg config file from a template. + + The config file generated from `template` is installed in the + given install directory, using `subst_dict` for variable substitution. + + Parameters + ---------- + template : str + The path of the template, relatively to the current package path. + install_dir : str + Where to install the npy-pkg config file, relatively to the current + package path. + subst_dict : dict, optional + If given, any string of the form ``@key@`` will be replaced by + ``subst_dict[key]`` in the template file when installed. The install + prefix is always available through the variable ``@prefix@``, since the + install prefix is not easy to get reliably from setup.py. + + See also + -------- + add_installed_library, get_info + + Notes + ----- + This works for both standard installs and in-place builds, i.e. the + ``@prefix@`` refer to the source directory for in-place builds. + + Examples + -------- + :: + + config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) + + Assuming the foo.ini.in file has the following content:: + + [meta] + Name=@foo@ + Version=1.0 + Description=dummy description + + [default] + Cflags=-I@prefix@/include + Libs= + + The generated file will have the following content:: + + [meta] + Name=bar + Version=1.0 + Description=dummy description + + [default] + Cflags=-Iprefix_dir/include + Libs= + + and will be installed as foo.ini in the 'lib' subpath. + + When cross-compiling with numpy distutils, it might be necessary to + use modified npy-pkg-config files. Using the default/generated files + will link with the host libraries (i.e. libnpymath.a). For + cross-compilation you of-course need to link with target libraries, + while using the host Python installation. + + You can copy out the numpy/core/lib/npy-pkg-config directory, add a + pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment + variable to point to the directory with the modified npy-pkg-config + files. + + Example npymath.ini modified for cross-compilation:: + + [meta] + Name=npymath + Description=Portable, core math library implementing C99 standard + Version=0.1 + + [variables] + pkgname=numpy.core + pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core + prefix=${pkgdir} + libdir=${prefix}/lib + includedir=${prefix}/include + + [default] + Libs=-L${libdir} -lnpymath + Cflags=-I${includedir} + Requires=mlib + + [msvc] + Libs=/LIBPATH:${libdir} npymath.lib + Cflags=/INCLUDE:${includedir} + Requires=mlib + + """ + if subst_dict is None: + subst_dict = {} + template = os.path.join(self.package_path, template) + + if self.name in self.installed_pkg_config: + self.installed_pkg_config[self.name].append((template, install_dir, + subst_dict)) + else: + self.installed_pkg_config[self.name] = [(template, install_dir, + subst_dict)] + + + def add_scripts(self,*files): + """Add scripts to configuration. + + Add the sequence of files to the beginning of the scripts list. + Scripts will be installed under the /bin/ directory. + + """ + scripts = self.paths(files) + dist = self.get_distribution() + if dist is not None: + if dist.scripts is None: + dist.scripts = [] + dist.scripts.extend(scripts) + else: + self.scripts.extend(scripts) + + def dict_append(self,**dict): + for key in self.list_keys: + a = getattr(self, key) + a.extend(dict.get(key, [])) + for key in self.dict_keys: + a = getattr(self, key) + a.update(dict.get(key, {})) + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for key in dict.keys(): + if key not in known_keys: + a = getattr(self, key, None) + if a and a==dict[key]: continue + self.warn('Inheriting attribute %r=%r from %r' \ + % (key, dict[key], dict.get('name', '?'))) + setattr(self, key, dict[key]) + self.extra_keys.append(key) + elif key in self.extra_keys: + self.info('Ignoring attempt to set %r (from %r to %r)' \ + % (key, getattr(self, key), dict[key])) + elif key in known_keys: + # key is already processed above + pass + else: + raise ValueError("Don't know about key=%r" % (key)) + + def __str__(self): + from pprint import pformat + known_keys = self.list_keys + self.dict_keys + self.extra_keys + s = '<'+5*'-' + '\n' + s += 'Configuration of '+self.name+':\n' + known_keys.sort() + for k in known_keys: + a = getattr(self, k, None) + if a: + s += '%s = %s\n' % (k, pformat(a)) + s += 5*'-' + '>' + return s + + def get_config_cmd(self): + """ + Returns the numpy.distutils config command instance. + """ + cmd = get_cmd('config') + cmd.ensure_finalized() + cmd.dump_source = 0 + cmd.noisy = 0 + old_path = os.environ.get('PATH') + if old_path: + path = os.pathsep.join(['.', old_path]) + os.environ['PATH'] = path + return cmd + + def get_build_temp_dir(self): + """ + Return a path to a temporary directory where temporary files should be + placed. + """ + cmd = get_cmd('build') + cmd.ensure_finalized() + return cmd.build_temp + + def have_f77c(self): + """Check for availability of Fortran 77 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 77 compiler is available (because a simple Fortran 77 + code was able to be compiled successfully). + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') + return flag + + def have_f90c(self): + """Check for availability of Fortran 90 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 90 compiler is available (because a simple Fortran + 90 code was able to be compiled successfully) + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') + return flag + + def append_to(self, extlib): + """Append libraries, include_dirs to extension or library item. + """ + if is_sequence(extlib): + lib_name, build_info = extlib + dict_append(build_info, + libraries=self.libraries, + include_dirs=self.include_dirs) + else: + from numpy.distutils.core import Extension + assert isinstance(extlib, Extension), repr(extlib) + extlib.libraries.extend(self.libraries) + extlib.include_dirs.extend(self.include_dirs) + + def _get_svn_revision(self, path): + """Return path's SVN revision number. + """ + try: + output = subprocess.check_output(['svnversion'], cwd=path) + except (subprocess.CalledProcessError, OSError): + pass + else: + m = re.match(rb'(?P\d+)', output) + if m: + return int(m.group('revision')) + + if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): + entries = njoin(path, '_svn', 'entries') + else: + entries = njoin(path, '.svn', 'entries') + if os.path.isfile(entries): + with open(entries) as f: + fstr = f.read() + if fstr[:5] == '\d+)"', fstr) + if m: + return int(m.group('revision')) + else: # non-xml entries file --- check to be sure that + m = re.search(r'dir[\n\r]+(?P\d+)', fstr) + if m: + return int(m.group('revision')) + return None + + def _get_hg_revision(self, path): + """Return path's Mercurial revision number. + """ + try: + output = subprocess.check_output( + ['hg', 'identify', '--num'], cwd=path) + except (subprocess.CalledProcessError, OSError): + pass + else: + m = re.match(rb'(?P\d+)', output) + if m: + return int(m.group('revision')) + + branch_fn = njoin(path, '.hg', 'branch') + branch_cache_fn = njoin(path, '.hg', 'branch.cache') + + if os.path.isfile(branch_fn): + branch0 = None + with open(branch_fn) as f: + revision0 = f.read().strip() + + branch_map = {} + with open(branch_cache_fn, 'r') as f: + for line in f: + branch1, revision1 = line.split()[:2] + if revision1==revision0: + branch0 = branch1 + try: + revision1 = int(revision1) + except ValueError: + continue + branch_map[branch1] = revision1 + + return branch_map.get(branch0) + + return None + + + def get_version(self, version_file=None, version_variable=None): + """Try to get version string of a package. + + Return a version string of the current package or None if the version + information could not be detected. + + Notes + ----- + This method scans files named + __version__.py, _version.py, version.py, and + __svn_version__.py for string variables version, __version__, and + _version, until a version number is found. + """ + version = getattr(self, 'version', None) + if version is not None: + return version + + # Get version from version file. + if version_file is None: + files = ['__version__.py', + self.name.split('.')[-1]+'_version.py', + 'version.py', + '__svn_version__.py', + '__hg_version__.py'] + else: + files = [version_file] + if version_variable is None: + version_vars = ['version', + '__version__', + self.name.split('.')[-1]+'_version'] + else: + version_vars = [version_variable] + for f in files: + fn = njoin(self.local_path, f) + if os.path.isfile(fn): + info = ('.py', 'U', 1) + name = os.path.splitext(os.path.basename(fn))[0] + n = dot_join(self.name, name) + try: + version_module = npy_load_module('_'.join(n.split('.')), + fn, info) + except ImportError as e: + self.warn(str(e)) + version_module = None + if version_module is None: + continue + + for a in version_vars: + version = getattr(version_module, a, None) + if version is not None: + break + + # Try if versioneer module + try: + version = version_module.get_versions()['version'] + except AttributeError: + pass + + if version is not None: + break + + if version is not None: + self.version = version + return version + + # Get version as SVN or Mercurial revision number + revision = self._get_svn_revision(self.local_path) + if revision is None: + revision = self._get_hg_revision(self.local_path) + + if revision is not None: + version = str(revision) + self.version = version + + return version + + def make_svn_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __svn_version__.py file to the current package directory. + + Generate package __svn_version__.py file from SVN revision number, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __svn_version__.py existed before, nothing is done. + + This is + intended for working with source directories that are in an SVN + repository. + """ + target = njoin(self.local_path, '__svn_version__.py') + revision = self._get_svn_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_svn_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_svn_version_py())) + + def make_hg_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __hg_version__.py file to the current package directory. + + Generate package __hg_version__.py file from Mercurial revision, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __hg_version__.py existed before, nothing is done. + + This is intended for working with source directories that are + in an Mercurial repository. + """ + target = njoin(self.local_path, '__hg_version__.py') + revision = self._get_hg_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_hg_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_hg_version_py())) + + def make_config_py(self,name='__config__'): + """Generate package __config__.py file containing system_info + information used during building the package. + + This file is installed to the + package installation directory. + + """ + self.py_modules.append((self.name, name, generate_config_py)) + + def get_info(self,*names): + """Get resources information. + + Return information (from system_info.get_info) for all of the names in + the argument list in a single dictionary. + """ + from .system_info import get_info, dict_append + info_dict = {} + for a in names: + dict_append(info_dict,**get_info(a)) + return info_dict + + +def get_cmd(cmdname, _cache={}): + if cmdname not in _cache: + import distutils.core + dist = distutils.core._setup_distribution + if dist is None: + from distutils.errors import DistutilsInternalError + raise DistutilsInternalError( + 'setup distribution instance not initialized') + cmd = dist.get_command_obj(cmdname) + _cache[cmdname] = cmd + return _cache[cmdname] + +def get_numpy_include_dirs(): + # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] + include_dirs = Configuration.numpy_include_dirs[:] + if not include_dirs: + import numpy + include_dirs = [ numpy.get_include() ] + # else running numpy/core/setup.py + return include_dirs + +def get_npy_pkg_dir(): + """Return the path where to find the npy-pkg-config directory. + + If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that + is returned. Otherwise, a path inside the location of the numpy module is + returned. + + The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining + customized npy-pkg-config .ini files for the cross-compilation + environment, and using them when cross-compiling. + + """ + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d is not None: + return d + spec = importlib.util.find_spec('numpy') + d = os.path.join(os.path.dirname(spec.origin), + 'core', 'lib', 'npy-pkg-config') + return d + +def get_pkg_info(pkgname, dirs=None): + """ + Return library info for the given package. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_info + + """ + from numpy.distutils.npy_pkg_config import read_config + + if dirs: + dirs.append(get_npy_pkg_dir()) + else: + dirs = [get_npy_pkg_dir()] + return read_config(pkgname, dirs) + +def get_info(pkgname, dirs=None): + """ + Return an info dict for a given C library. + + The info dict contains the necessary options to use the C library. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + info : dict + The dictionary with build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_pkg_info + + Examples + -------- + To get the necessary information for the npymath library from NumPy: + + >>> npymath_info = np.distutils.misc_util.get_info('npymath') + >>> npymath_info #doctest: +SKIP + {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': + ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} + + This info dict can then be used as input to a `Configuration` instance:: + + config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) + + """ + from numpy.distutils.npy_pkg_config import parse_flags + pkg_info = get_pkg_info(pkgname, dirs) + + # Translate LibraryInfo instance into a build_info dict + info = parse_flags(pkg_info.cflags()) + for k, v in parse_flags(pkg_info.libs()).items(): + info[k].extend(v) + + # add_extension extra_info argument is ANAL + info['define_macros'] = info['macros'] + del info['macros'] + del info['ignored'] + + return info + +def is_bootstrapping(): + import builtins + + try: + builtins.__NUMPY_SETUP__ + return True + except AttributeError: + return False + + +######################### + +def default_config_dict(name = None, parent_name = None, local_path=None): + """Return a configuration dictionary for usage in + configuration() function defined in file setup_.py. + """ + import warnings + warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ + 'deprecated default_config_dict(%r,%r,%r)' + % (name, parent_name, local_path, + name, parent_name, local_path, + ), stacklevel=2) + c = Configuration(name, parent_name, local_path) + return c.todict() + + +def dict_append(d, **kws): + for k, v in kws.items(): + if k in d: + ov = d[k] + if isinstance(ov, str): + d[k] = v + else: + d[k].extend(v) + else: + d[k] = v + +def appendpath(prefix, path): + if os.path.sep != '/': + prefix = prefix.replace('/', os.path.sep) + path = path.replace('/', os.path.sep) + drive = '' + if os.path.isabs(path): + drive = os.path.splitdrive(prefix)[0] + absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] + pathdrive, path = os.path.splitdrive(path) + d = os.path.commonprefix([absprefix, path]) + if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ + or os.path.join(path[:len(d)], path[len(d):]) != path: + # Handle invalid paths + d = os.path.dirname(d) + subpath = path[len(d):] + if os.path.isabs(subpath): + subpath = subpath[1:] + else: + subpath = path + return os.path.normpath(njoin(drive + prefix, subpath)) + +def generate_config_py(target): + """Generate config.py file containing system_info information + used during building the package. + + Usage: + config['py_modules'].append((packagename, '__config__',generate_config_py)) + """ + from numpy.distutils.system_info import system_info + from distutils.dir_util import mkpath + mkpath(os.path.dirname(target)) + with open(target, 'w') as f: + f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) + f.write('# It contains system_info results at the time of building this package.\n') + f.write('__all__ = ["get_info","show"]\n\n') + + # For gfortran+msvc combination, extra shared libraries may exist + f.write(textwrap.dedent(""" + import os + import sys + + extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') + + if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + if sys.version_info >= (3, 8): + os.add_dll_directory(extra_dll_dir) + else: + os.environ.setdefault('PATH', '') + os.environ['PATH'] += os.pathsep + extra_dll_dir + + """)) + + for k, i in system_info.saved_results.items(): + f.write('%s=%r\n' % (k, i)) + f.write(textwrap.dedent(r''' + def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + + def show(): + """ + Show libraries in the system on which NumPy was built. + + Print information about various resources (libraries, library + directories, include directories, etc.) in the system on which + NumPy was built. + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + Classes specifying the information to be printed are defined + in the `numpy.distutils.system_info` module. + + Information may include: + + * ``language``: language used to write the libraries (mostly + C or f77) + * ``libraries``: names of libraries found in the system + * ``library_dirs``: directories containing the libraries + * ``include_dirs``: directories containing library header files + * ``src_dirs``: directories containing library source files + * ``define_macros``: preprocessor macros used by + ``distutils.setup`` + * ``baseline``: minimum CPU features required + * ``found``: dispatched features supported in the system + * ``not found``: dispatched features that are not supported + in the system + + Examples + -------- + >>> import numpy as np + >>> np.show_config() + blas_opt_info: + language = c + define_macros = [('HAVE_CBLAS', None)] + libraries = ['openblas', 'openblas'] + library_dirs = ['/usr/local/lib'] + """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + + print("Supported SIMD extensions in this NumPy install:") + print(" baseline = %s" % (','.join(__cpu_baseline__))) + print(" found = %s" % (','.join(features_found))) + print(" not found = %s" % (','.join(features_not_found))) + + ''')) + + return target + +def msvc_version(compiler): + """Return version major and minor of compiler instance if it is + MSVC, raise an exception otherwise.""" + if not compiler.compiler_type == "msvc": + raise ValueError("Compiler instance is not msvc (%s)"\ + % compiler.compiler_type) + return compiler._MSVCCompiler__version + +def get_build_architecture(): + # Importing distutils.msvccompiler triggers a warning on non-Windows + # systems, so delay the import to here. + from distutils.msvccompiler import get_build_architecture + return get_build_architecture() diff --git a/MLPY/Lib/site-packages/numpy/distutils/msvc9compiler.py b/MLPY/Lib/site-packages/numpy/distutils/msvc9compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..1e44d9379dedcd799aa2897cdce3300e6796f170 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/msvc9compiler.py @@ -0,0 +1,63 @@ +import os +from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if not old: + return new + if new in old: + return old + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self, plat_name=None): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib') + environ_include = os.getenv('include') + _MSVCCompiler.initialize(self, plat_name) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] + + def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): + ld_args.append('/MANIFEST') + _MSVCCompiler.manifest_setup_ldargs(self, output_filename, + build_temp, ld_args) diff --git a/MLPY/Lib/site-packages/numpy/distutils/msvccompiler.py b/MLPY/Lib/site-packages/numpy/distutils/msvccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..17e8ba07453955f09097163d5ed3ce49077d9d2c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/msvccompiler.py @@ -0,0 +1,58 @@ +import os +from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if new in old: + return old + if not old: + return new + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib', '') + environ_include = os.getenv('include', '') + _MSVCCompiler.initialize(self) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] diff --git a/MLPY/Lib/site-packages/numpy/distutils/npy_pkg_config.py b/MLPY/Lib/site-packages/numpy/distutils/npy_pkg_config.py new file mode 100644 index 0000000000000000000000000000000000000000..6655ae3937bac31797283498d26676c231ad98b1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/npy_pkg_config.py @@ -0,0 +1,437 @@ +import sys +import re +import os + +from configparser import RawConfigParser + +__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', + 'read_config', 'parse_flags'] + +_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') + +class FormatError(IOError): + """ + Exception thrown when there is a problem parsing a configuration file. + + """ + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +class PkgNotFound(IOError): + """Exception raised when a package can not be located.""" + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +def parse_flags(line): + """ + Parse a line from a config file containing compile flags. + + Parameters + ---------- + line : str + A single line containing one or more compile flags. + + Returns + ------- + d : dict + Dictionary of parsed flags, split into relevant categories. + These categories are the keys of `d`: + + * 'include_dirs' + * 'library_dirs' + * 'libraries' + * 'macros' + * 'ignored' + + """ + d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], + 'macros': [], 'ignored': []} + + flags = (' ' + line).split(' -') + for flag in flags: + flag = '-' + flag + if len(flag) > 0: + if flag.startswith('-I'): + d['include_dirs'].append(flag[2:].strip()) + elif flag.startswith('-L'): + d['library_dirs'].append(flag[2:].strip()) + elif flag.startswith('-l'): + d['libraries'].append(flag[2:].strip()) + elif flag.startswith('-D'): + d['macros'].append(flag[2:].strip()) + else: + d['ignored'].append(flag) + + return d + +def _escape_backslash(val): + return val.replace('\\', '\\\\') + +class LibraryInfo: + """ + Object containing build information about a library. + + Parameters + ---------- + name : str + The library name. + description : str + Description of the library. + version : str + Version string. + sections : dict + The sections of the configuration file for the library. The keys are + the section headers, the values the text under each header. + vars : class instance + A `VariableSet` instance, which contains ``(name, value)`` pairs for + variables defined in the configuration file for the library. + requires : sequence, optional + The required libraries for the library to be installed. + + Notes + ----- + All input parameters (except "sections" which is a method) are available as + attributes of the same name. + + """ + def __init__(self, name, description, version, sections, vars, requires=None): + self.name = name + self.description = description + if requires: + self.requires = requires + else: + self.requires = [] + self.version = version + self._sections = sections + self.vars = vars + + def sections(self): + """ + Return the section headers of the config file. + + Parameters + ---------- + None + + Returns + ------- + keys : list of str + The list of section headers. + + """ + return list(self._sections.keys()) + + def cflags(self, section="default"): + val = self.vars.interpolate(self._sections[section]['cflags']) + return _escape_backslash(val) + + def libs(self, section="default"): + val = self.vars.interpolate(self._sections[section]['libs']) + return _escape_backslash(val) + + def __str__(self): + m = ['Name: %s' % self.name, 'Description: %s' % self.description] + if self.requires: + m.append('Requires:') + else: + m.append('Requires: %s' % ",".join(self.requires)) + m.append('Version: %s' % self.version) + + return "\n".join(m) + +class VariableSet: + """ + Container object for the variables defined in a config file. + + `VariableSet` can be used as a plain dictionary, with the variable names + as keys. + + Parameters + ---------- + d : dict + Dict of items in the "variables" section of the configuration file. + + """ + def __init__(self, d): + self._raw_data = dict([(k, v) for k, v in d.items()]) + + self._re = {} + self._re_sub = {} + + self._init_parse() + + def _init_parse(self): + for k, v in self._raw_data.items(): + self._init_parse_var(k, v) + + def _init_parse_var(self, name, value): + self._re[name] = re.compile(r'\$\{%s\}' % name) + self._re_sub[name] = value + + def interpolate(self, value): + # Brute force: we keep interpolating until there is no '${var}' anymore + # or until interpolated string is equal to input string + def _interpolate(value): + for k in self._re.keys(): + value = self._re[k].sub(self._re_sub[k], value) + return value + while _VAR.search(value): + nvalue = _interpolate(value) + if nvalue == value: + break + value = nvalue + + return value + + def variables(self): + """ + Return the list of variable names. + + Parameters + ---------- + None + + Returns + ------- + names : list of str + The names of all variables in the `VariableSet` instance. + + """ + return list(self._raw_data.keys()) + + # Emulate a dict to set/get variables values + def __getitem__(self, name): + return self._raw_data[name] + + def __setitem__(self, name, value): + self._raw_data[name] = value + self._init_parse_var(name, value) + +def parse_meta(config): + if not config.has_section('meta'): + raise FormatError("No meta section found !") + + d = dict(config.items('meta')) + + for k in ['name', 'description', 'version']: + if not k in d: + raise FormatError("Option %s (section [meta]) is mandatory, " + "but not found" % k) + + if not 'requires' in d: + d['requires'] = [] + + return d + +def parse_variables(config): + if not config.has_section('variables'): + raise FormatError("No variables section found !") + + d = {} + + for name, value in config.items("variables"): + d[name] = value + + return VariableSet(d) + +def parse_sections(config): + return meta_d, r + +def pkg_to_filename(pkg_name): + return "%s.ini" % pkg_name + +def parse_config(filename, dirs=None): + if dirs: + filenames = [os.path.join(d, filename) for d in dirs] + else: + filenames = [filename] + + config = RawConfigParser() + + n = config.read(filenames) + if not len(n) >= 1: + raise PkgNotFound("Could not find file(s) %s" % str(filenames)) + + # Parse meta and variables sections + meta = parse_meta(config) + + vars = {} + if config.has_section('variables'): + for name, value in config.items("variables"): + vars[name] = _escape_backslash(value) + + # Parse "normal" sections + secs = [s for s in config.sections() if not s in ['meta', 'variables']] + sections = {} + + requires = {} + for s in secs: + d = {} + if config.has_option(s, "requires"): + requires[s] = config.get(s, 'requires') + + for name, value in config.items(s): + d[name] = value + sections[s] = d + + return meta, vars, sections, requires + +def _read_config_imp(filenames, dirs=None): + def _read_config(f): + meta, vars, sections, reqs = parse_config(f, dirs) + # recursively add sections and variables of required libraries + for rname, rvalue in reqs.items(): + nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) + + # Update var dict for variables not in 'top' config file + for k, v in nvars.items(): + if not k in vars: + vars[k] = v + + # Update sec dict + for oname, ovalue in nsections[rname].items(): + if ovalue: + sections[rname][oname] += ' %s' % ovalue + + return meta, vars, sections, reqs + + meta, vars, sections, reqs = _read_config(filenames) + + # FIXME: document this. If pkgname is defined in the variables section, and + # there is no pkgdir variable defined, pkgdir is automatically defined to + # the path of pkgname. This requires the package to be imported to work + if not 'pkgdir' in vars and "pkgname" in vars: + pkgname = vars["pkgname"] + if not pkgname in sys.modules: + raise ValueError("You should import %s to get information on %s" % + (pkgname, meta["name"])) + + mod = sys.modules[pkgname] + vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) + + return LibraryInfo(name=meta["name"], description=meta["description"], + version=meta["version"], sections=sections, vars=VariableSet(vars)) + +# Trivial cache to cache LibraryInfo instances creation. To be really +# efficient, the cache should be handled in read_config, since a same file can +# be parsed many time outside LibraryInfo creation, but I doubt this will be a +# problem in practice +_CACHE = {} +def read_config(pkgname, dirs=None): + """ + Return library info for a package from its configuration file. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of directories - usually including + the NumPy base directory - where to look for npy-pkg-config files. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + misc_util.get_info, misc_util.get_pkg_info + + Examples + -------- + >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') + >>> type(npymath_info) + + >>> print(npymath_info) + Name: npymath + Description: Portable, core math library implementing C99 standard + Requires: + Version: 0.1 #random + + """ + try: + return _CACHE[pkgname] + except KeyError: + v = _read_config_imp(pkg_to_filename(pkgname), dirs) + _CACHE[pkgname] = v + return v + +# TODO: +# - implements version comparison (modversion + atleast) + +# pkg-config simple emulator - useful for debugging, and maybe later to query +# the system +if __name__ == '__main__': + from optparse import OptionParser + import glob + + parser = OptionParser() + parser.add_option("--cflags", dest="cflags", action="store_true", + help="output all preprocessor and compiler flags") + parser.add_option("--libs", dest="libs", action="store_true", + help="output all linker flags") + parser.add_option("--use-section", dest="section", + help="use this section instead of default for options") + parser.add_option("--version", dest="version", action="store_true", + help="output version") + parser.add_option("--atleast-version", dest="min_version", + help="Minimal version") + parser.add_option("--list-all", dest="list_all", action="store_true", + help="Minimal version") + parser.add_option("--define-variable", dest="define_variable", + help="Replace variable with the given value") + + (options, args) = parser.parse_args(sys.argv) + + if len(args) < 2: + raise ValueError("Expect package name on the command line:") + + if options.list_all: + files = glob.glob("*.ini") + for f in files: + info = read_config(f) + print("%s\t%s - %s" % (info.name, info.name, info.description)) + + pkg_name = args[1] + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) + else: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) + + if options.section: + section = options.section + else: + section = "default" + + if options.define_variable: + m = re.search(r'([\S]+)=([\S]+)', options.define_variable) + if not m: + raise ValueError("--define-variable option should be of " + "the form --define-variable=foo=bar") + else: + name = m.group(1) + value = m.group(2) + info.vars[name] = value + + if options.cflags: + print(info.cflags(section)) + if options.libs: + print(info.libs(section)) + if options.version: + print(info.version) + if options.min_version: + print(info.version >= options.min_version) diff --git a/MLPY/Lib/site-packages/numpy/distutils/numpy_distribution.py b/MLPY/Lib/site-packages/numpy/distutils/numpy_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfd8895ca80fc241b4589bf3cb1a3cd0db7409d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/numpy_distribution.py @@ -0,0 +1,17 @@ +# XXX: Handle setuptools ? +from distutils.core import Distribution + +# This class is used because we add new files (sconscripts, and so on) with the +# scons command +class NumpyDistribution(Distribution): + def __init__(self, attrs = None): + # A list of (sconscripts, pre_hook, post_hook, src, parent_names) + self.scons_data = [] + # A list of installable libraries + self.installed_libraries = [] + # A dict of pkg_config files to generate/install + self.installed_pkg_config = {} + Distribution.__init__(self, attrs) + + def has_scons_scripts(self): + return bool(self.scons_data) diff --git a/MLPY/Lib/site-packages/numpy/distutils/pathccompiler.py b/MLPY/Lib/site-packages/numpy/distutils/pathccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..c4d6ce9246e19185ed7917b196ae1b61be96ca81 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/pathccompiler.py @@ -0,0 +1,21 @@ +from distutils.unixccompiler import UnixCCompiler + +class PathScaleCCompiler(UnixCCompiler): + + """ + PathScale compiler compatible with an gcc built Python. + """ + + compiler_type = 'pathcc' + cc_exe = 'pathcc' + cxx_exe = 'pathCC' + + def __init__ (self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__ (self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables(compiler=cc_compiler, + compiler_so=cc_compiler, + compiler_cxx=cxx_compiler, + linker_exe=cc_compiler, + linker_so=cc_compiler + ' -shared') diff --git a/MLPY/Lib/site-packages/numpy/distutils/setup.py b/MLPY/Lib/site-packages/numpy/distutils/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..c0dd08c324561fd959eaf6042ca0d8df3ee97dc4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/setup.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('distutils', parent_package, top_path) + config.add_subpackage('command') + config.add_subpackage('fcompiler') + config.add_subpackage('tests') + config.add_data_files('site.cfg') + config.add_data_files('mingw/gfortran_vs2003_hack.c') + config.add_data_dir('checks') + config.add_data_files('*.pyi') + config.make_config_py() + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/MLPY/Lib/site-packages/numpy/distutils/system_info.py b/MLPY/Lib/site-packages/numpy/distutils/system_info.py new file mode 100644 index 0000000000000000000000000000000000000000..639b2c6df5ae9340efff200740301a7961fbcba1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/system_info.py @@ -0,0 +1,3120 @@ +#!/usr/bin/env python3 +""" +This file defines a set of system_info classes for getting +information about various resources (libraries, library directories, +include directories, etc.) in the system. Usage: + info_dict = get_info() + where is a string 'atlas','x11','fftw','lapack','blas', + 'lapack_src', 'blas_src', etc. For a complete list of allowed names, + see the definition of get_info() function below. + + Returned info_dict is a dictionary which is compatible with + distutils.setup keyword arguments. If info_dict == {}, then the + asked resource is not available (system_info could not find it). + + Several *_info classes specify an environment variable to specify + the locations of software. When setting the corresponding environment + variable to 'None' then the software will be ignored, even when it + is available in system. + +Global parameters: + system_info.search_static_first - search static libraries (.a) + in precedence to shared ones (.so, .sl) if enabled. + system_info.verbosity - output the results to stdout if enabled. + +The file 'site.cfg' is looked for in + +1) Directory of main setup.py file being run. +2) Home directory of user running the setup.py file as ~/.numpy-site.cfg +3) System wide directory (location of this file...) + +The first one found is used to get system configuration options The +format is that used by ConfigParser (i.e., Windows .INI style). The +section ALL is not intended for general use. + +Appropriate defaults are used if nothing is specified. + +The order of finding the locations of resources is the following: + 1. environment variable + 2. section in site.cfg + 3. DEFAULT section in site.cfg + 4. System default search paths (see ``default_*`` variables below). +Only the first complete match is returned. + +Currently, the following classes are available, along with their section names: + + Numeric_info:Numeric + _numpy_info:Numeric + _pkg_config_info:None + accelerate_info:accelerate + agg2_info:agg2 + amd_info:amd + atlas_3_10_blas_info:atlas + atlas_3_10_blas_threads_info:atlas + atlas_3_10_info:atlas + atlas_3_10_threads_info:atlas + atlas_blas_info:atlas + atlas_blas_threads_info:atlas + atlas_info:atlas + atlas_threads_info:atlas + blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) + blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) + blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) + blas_info:blas + blas_mkl_info:mkl + blas_opt_info:ALL # usage recommended + blas_src_info:blas_src + blis_info:blis + boost_python_info:boost_python + dfftw_info:fftw + dfftw_threads_info:fftw + djbfft_info:djbfft + f2py_info:ALL + fft_opt_info:ALL + fftw2_info:fftw + fftw3_info:fftw3 + fftw_info:fftw + fftw_threads_info:fftw + flame_info:flame + freetype2_info:freetype2 + gdk_2_info:gdk_2 + gdk_info:gdk + gdk_pixbuf_2_info:gdk_pixbuf_2 + gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 + gdk_x11_2_info:gdk_x11_2 + gtkp_2_info:gtkp_2 + gtkp_x11_2_info:gtkp_x11_2 + lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) + lapack_atlas_3_10_info:atlas + lapack_atlas_3_10_threads_info:atlas + lapack_atlas_info:atlas + lapack_atlas_threads_info:atlas + lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) + lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) + lapack_info:lapack + lapack_mkl_info:mkl + lapack_opt_info:ALL # usage recommended + lapack_src_info:lapack_src + mkl_info:mkl + numarray_info:numarray + numerix_info:numerix + numpy_info:numpy + openblas64__info:openblas64_ + openblas64__lapack_info:openblas64_ + openblas_clapack_info:openblas + openblas_ilp64_info:openblas_ilp64 + openblas_ilp64_lapack_info:openblas_ilp64 + openblas_info:openblas + openblas_lapack_info:openblas + sfftw_info:fftw + sfftw_threads_info:fftw + system_info:ALL + umfpack_info:umfpack + wx_info:wx + x11_info:x11 + xft_info:xft + +Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER +and NPY_LAPACK_ORDER environment variables to determine the order in which +specific BLAS and LAPACK libraries are searched for. + +This search (or autodetection) can be bypassed by defining the environment +variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the +exact linker flags to use (language will be set to F77). Building against +Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK +implementations at runtime. If using this to build NumPy itself, it is +recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a +CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized +otherwise). + +Example: +---------- +[DEFAULT] +# default section +library_dirs = /usr/lib:/usr/local/lib:/opt/lib +include_dirs = /usr/include:/usr/local/include:/opt/include +src_dirs = /usr/local/src:/opt/src +# search static libraries (.a) in preference to shared ones (.so) +search_static_first = 0 + +[fftw] +libraries = rfftw, fftw + +[atlas] +library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas +# for overriding the names of the atlas libraries +libraries = lapack, f77blas, cblas, atlas + +[x11] +library_dirs = /usr/X11R6/lib +include_dirs = /usr/X11R6/include +---------- + +Note that the ``libraries`` key is the default setting for libraries. + +Authors: + Pearu Peterson , February 2002 + David M. Cooke , April 2002 + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +""" +import sys +import os +import re +import copy +import warnings +import subprocess +import textwrap + +from glob import glob +from functools import reduce +from configparser import NoOptionError +from configparser import RawConfigParser as ConfigParser +# It seems that some people are importing ConfigParser from here so is +# good to keep its class name. Use of RawConfigParser is needed in +# order to be able to load path names with percent in them, like +# `feature%2Fcool` which is common on git flow branch names. + +from distutils.errors import DistutilsError +from distutils.dist import Distribution +import sysconfig +from numpy.distutils import log +from distutils.util import get_platform + +from numpy.distutils.exec_command import ( + find_executable, filepath_from_subprocess_output, + ) +from numpy.distutils.misc_util import (is_sequence, is_string, + get_shared_lib_extension) +from numpy.distutils.command.config import config as cmd_config +from numpy.distutils import customized_ccompiler as _customized_ccompiler +from numpy.distutils import _shell_utils +import distutils.ccompiler +import tempfile +import shutil + +__all__ = ['system_info'] + +# Determine number of bits +import platform +_bits = {'32bit': 32, '64bit': 64} +platform_bits = _bits[platform.architecture()[0]] + + +global_compiler = None + +def customized_ccompiler(): + global global_compiler + if not global_compiler: + global_compiler = _customized_ccompiler() + return global_compiler + + +def _c_string_literal(s): + """ + Convert a python string into a literal suitable for inclusion into C code + """ + # only these three characters are forbidden in C strings + s = s.replace('\\', r'\\') + s = s.replace('"', r'\"') + s = s.replace('\n', r'\n') + return '"{}"'.format(s) + + +def libpaths(paths, bits): + """Return a list of library paths valid on 32 or 64 bit systems. + + Inputs: + paths : sequence + A sequence of strings (typically paths) + bits : int + An integer, the only valid values are 32 or 64. A ValueError exception + is raised otherwise. + + Examples: + + Consider a list of directories + >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] + + For a 32-bit platform, this is already valid: + >>> np.distutils.system_info.libpaths(paths,32) + ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] + + On 64 bits, we prepend the '64' postfix + >>> np.distutils.system_info.libpaths(paths,64) + ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', + '/usr/lib64', '/usr/lib'] + """ + if bits not in (32, 64): + raise ValueError("Invalid bit size in libpaths: 32 or 64 only") + + # Handle 32bit case + if bits == 32: + return paths + + # Handle 64bit case + out = [] + for p in paths: + out.extend([p + '64', p]) + + return out + + +if sys.platform == 'win32': + default_lib_dirs = ['C:\\', + os.path.join(sysconfig.get_config_var('exec_prefix'), + 'libs')] + default_runtime_dirs = [] + default_include_dirs = [] + default_src_dirs = ['.'] + default_x11_lib_dirs = [] + default_x11_include_dirs = [] + _include_dirs = [ + 'include', + 'include/suitesparse', + ] + _lib_dirs = [ + 'lib', + ] + + _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] + _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] + def add_system_root(library_root): + """Add a package manager root to the include directories""" + global default_lib_dirs + global default_include_dirs + + library_root = os.path.normpath(library_root) + + default_lib_dirs.extend( + os.path.join(library_root, d) for d in _lib_dirs) + default_include_dirs.extend( + os.path.join(library_root, d) for d in _include_dirs) + + # VCpkg is the de-facto package manager on windows for C/C++ + # libraries. If it is on the PATH, then we append its paths here. + vcpkg = shutil.which('vcpkg') + if vcpkg: + vcpkg_dir = os.path.dirname(vcpkg) + if platform.architecture()[0] == '32bit': + specifier = 'x86' + else: + specifier = 'x64' + + vcpkg_installed = os.path.join(vcpkg_dir, 'installed') + for vcpkg_root in [ + os.path.join(vcpkg_installed, specifier + '-windows'), + os.path.join(vcpkg_installed, specifier + '-windows-static'), + ]: + add_system_root(vcpkg_root) + + # Conda is another popular package manager that provides libraries + conda = shutil.which('conda') + if conda: + conda_dir = os.path.dirname(conda) + add_system_root(os.path.join(conda_dir, '..', 'Library')) + add_system_root(os.path.join(conda_dir, 'Library')) + +else: + default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', + '/opt/local/lib', '/sw/lib'], platform_bits) + default_runtime_dirs = [] + default_include_dirs = ['/usr/local/include', + '/opt/include', + # path of umfpack under macports + '/opt/local/include/ufsparse', + '/opt/local/include', '/sw/include', + '/usr/include/suitesparse'] + default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] + + default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', + '/usr/lib'], platform_bits) + default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] + + if os.path.exists('/usr/lib/X11'): + globbed_x11_dir = glob('/usr/lib/*/libX11.so') + if globbed_x11_dir: + x11_so_dir = os.path.split(globbed_x11_dir[0])[0] + default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) + default_x11_include_dirs.extend(['/usr/lib/X11/include', + '/usr/include/X11']) + + with open(os.devnull, 'w') as tmp: + try: + p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, + stderr=tmp) + except (OSError, DistutilsError): + # OSError if gcc is not installed, or SandboxViolation (DistutilsError + # subclass) if an old setuptools bug is triggered (see gh-3160). + pass + else: + triplet = str(p.communicate()[0].decode().strip()) + if p.returncode == 0: + # gcc supports the "-print-multiarch" option + default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] + default_lib_dirs += [os.path.join("/usr/lib/", triplet)] + + +if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: + default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) + default_include_dirs.append(os.path.join(sys.prefix, 'include')) + default_src_dirs.append(os.path.join(sys.prefix, 'src')) + +default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] +default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] +default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] +default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] + +so_ext = get_shared_lib_extension() + + +def get_standard_file(fname): + """Returns a list of files named 'fname' from + 1) System-wide directory (directory-location of this module) + 2) Users HOME directory (os.environ['HOME']) + 3) Local directory + """ + # System-wide file + filenames = [] + try: + f = __file__ + except NameError: + f = sys.argv[0] + else: + sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], + fname) + if os.path.isfile(sysfile): + filenames.append(sysfile) + + # Home directory + # And look for the user config file + try: + f = os.path.expanduser('~') + except KeyError: + pass + else: + user_file = os.path.join(f, fname) + if os.path.isfile(user_file): + filenames.append(user_file) + + # Local file + if os.path.isfile(fname): + filenames.append(os.path.abspath(fname)) + + return filenames + + +def _parse_env_order(base_order, env): + """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` + + This method will sequence the environment variable and check for their invidual elements in `base_order`. + + The items in the environment variable may be negated via '^item' or '!itema,itemb'. + It must start with ^/! to negate all options. + + Raises + ------ + ValueError: for mixed negated and non-negated orders or multiple negated orders + + Parameters + ---------- + base_order : list of str + the base list of orders + env : str + the environment variable to be parsed, if none is found, `base_order` is returned + + Returns + ------- + allow_order : list of str + allowed orders in lower-case + unknown_order : list of str + for values not overlapping with `base_order` + """ + order_str = os.environ.get(env, None) + + # ensure all base-orders are lower-case (for easier comparison) + base_order = [order.lower() for order in base_order] + if order_str is None: + return base_order, [] + + neg = order_str.startswith('^') or order_str.startswith('!') + # Check format + order_str_l = list(order_str) + sum_neg = order_str_l.count('^') + order_str_l.count('!') + if neg: + if sum_neg > 1: + raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") + # remove prefix + order_str = order_str[1:] + elif sum_neg > 0: + raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") + + # Split and lower case + orders = order_str.lower().split(',') + + # to inform callee about non-overlapping elements + unknown_order = [] + + # if negated, we have to remove from the order + if neg: + allow_order = base_order.copy() + + for order in orders: + if not order: + continue + + if order not in base_order: + unknown_order.append(order) + continue + + if order in allow_order: + allow_order.remove(order) + + else: + allow_order = [] + + for order in orders: + if not order: + continue + + if order not in base_order: + unknown_order.append(order) + continue + + if order not in allow_order: + allow_order.append(order) + + return allow_order, unknown_order + + +def get_info(name, notfound_action=0): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead + 'atlas_threads': atlas_threads_info, # ditto + 'atlas_blas': atlas_blas_info, + 'atlas_blas_threads': atlas_blas_threads_info, + 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead + 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto + 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead + 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto + 'atlas_3_10_blas': atlas_3_10_blas_info, + 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, + 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead + 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto + 'flame': flame_info, # use lapack_opt instead + 'mkl': mkl_info, + # openblas which may or may not have embedded lapack + 'openblas': openblas_info, # use blas_opt instead + # openblas with embedded lapack + 'openblas_lapack': openblas_lapack_info, # use blas_opt instead + 'openblas_clapack': openblas_clapack_info, # use blas_opt instead + 'blis': blis_info, # use blas_opt instead + 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead + 'blas_mkl': blas_mkl_info, # use blas_opt instead + 'accelerate': accelerate_info, # use blas_opt instead + 'openblas64_': openblas64__info, + 'openblas64__lapack': openblas64__lapack_info, + 'openblas_ilp64': openblas_ilp64_info, + 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, + 'x11': x11_info, + 'fft_opt': fft_opt_info, + 'fftw': fftw_info, + 'fftw2': fftw2_info, + 'fftw3': fftw3_info, + 'dfftw': dfftw_info, + 'sfftw': sfftw_info, + 'fftw_threads': fftw_threads_info, + 'dfftw_threads': dfftw_threads_info, + 'sfftw_threads': sfftw_threads_info, + 'djbfft': djbfft_info, + 'blas': blas_info, # use blas_opt instead + 'lapack': lapack_info, # use lapack_opt instead + 'lapack_src': lapack_src_info, + 'blas_src': blas_src_info, + 'numpy': numpy_info, + 'f2py': f2py_info, + 'Numeric': Numeric_info, + 'numeric': Numeric_info, + 'numarray': numarray_info, + 'numerix': numerix_info, + 'lapack_opt': lapack_opt_info, + 'lapack_ilp64_opt': lapack_ilp64_opt_info, + 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, + 'lapack64__opt': lapack64__opt_info, + 'blas_opt': blas_opt_info, + 'blas_ilp64_opt': blas_ilp64_opt_info, + 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, + 'blas64__opt': blas64__opt_info, + 'boost_python': boost_python_info, + 'agg2': agg2_info, + 'wx': wx_info, + 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, + 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, + 'gdk_pixbuf_2': gdk_pixbuf_2_info, + 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, + 'gdk': gdk_info, + 'gdk_2': gdk_2_info, + 'gdk-2.0': gdk_2_info, + 'gdk_x11_2': gdk_x11_2_info, + 'gdk-x11-2.0': gdk_x11_2_info, + 'gtkp_x11_2': gtkp_x11_2_info, + 'gtk+-x11-2.0': gtkp_x11_2_info, + 'gtkp_2': gtkp_2_info, + 'gtk+-2.0': gtkp_2_info, + 'xft': xft_info, + 'freetype2': freetype2_info, + 'umfpack': umfpack_info, + 'amd': amd_info, + }.get(name.lower(), system_info) + return cl().get_info(notfound_action) + + +class NotFoundError(DistutilsError): + """Some third-party program or library is not found.""" + + +class AliasedOptionError(DistutilsError): + """ + Aliases entries in config files should not be existing. + In section '{section}' we found multiple appearances of options {options}.""" + + +class AtlasNotFoundError(NotFoundError): + """ + Atlas (http://github.com/math-atlas/math-atlas) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [atlas]) or by setting + the ATLAS environment variable.""" + + +class FlameNotFoundError(NotFoundError): + """ + FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [flame]).""" + + +class LapackNotFoundError(NotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [lapack]) or by setting + the LAPACK environment variable.""" + + +class LapackSrcNotFoundError(LapackNotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [lapack_src]) or by setting + the LAPACK_SRC environment variable.""" + + +class LapackILP64NotFoundError(NotFoundError): + """ + 64-bit Lapack libraries not found. + Known libraries in numpy/distutils/site.cfg file are: + openblas64_, openblas_ilp64 + """ + +class BlasOptNotFoundError(NotFoundError): + """ + Optimized (vendor) Blas libraries are not found. + Falls back to netlib Blas library which has worse performance. + A better performance should be easily gained by switching + Blas library.""" + +class BlasNotFoundError(NotFoundError): + """ + Blas (http://www.netlib.org/blas/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [blas]) or by setting + the BLAS environment variable.""" + +class BlasILP64NotFoundError(NotFoundError): + """ + 64-bit Blas libraries not found. + Known libraries in numpy/distutils/site.cfg file are: + openblas64_, openblas_ilp64 + """ + +class BlasSrcNotFoundError(BlasNotFoundError): + """ + Blas (http://www.netlib.org/blas/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [blas_src]) or by setting + the BLAS_SRC environment variable.""" + + +class FFTWNotFoundError(NotFoundError): + """ + FFTW (http://www.fftw.org/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [fftw]) or by setting + the FFTW environment variable.""" + + +class DJBFFTNotFoundError(NotFoundError): + """ + DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [djbfft]) or by setting + the DJBFFT environment variable.""" + + +class NumericNotFoundError(NotFoundError): + """ + Numeric (https://www.numpy.org/) module not found. + Get it from above location, install it, and retry setup.py.""" + + +class X11NotFoundError(NotFoundError): + """X11 libraries not found.""" + + +class UmfpackNotFoundError(NotFoundError): + """ + UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) + not found. Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [umfpack]) or by setting + the UMFPACK environment variable.""" + + +class system_info: + + """ get_info() is the only public method. Don't use others. + """ + dir_env_var = None + # XXX: search_static_first is disabled by default, may disappear in + # future unless it is proved to be useful. + search_static_first = 0 + # The base-class section name is a random word "ALL" and is not really + # intended for general use. It cannot be None nor can it be DEFAULT as + # these break the ConfigParser. See gh-15338 + section = 'ALL' + saved_results = {} + + notfounderror = NotFoundError + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), + 'include_dirs': os.pathsep.join(default_include_dirs), + 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), + 'rpath': '', + 'src_dirs': os.pathsep.join(default_src_dirs), + 'search_static_first': str(self.search_static_first), + 'extra_compile_args': '', 'extra_link_args': ''} + self.cp = ConfigParser(defaults) + self.files = [] + self.files.extend(get_standard_file('.numpy-site.cfg')) + self.files.extend(get_standard_file('site.cfg')) + self.parse_config_files() + + if self.section is not None: + self.search_static_first = self.cp.getboolean( + self.section, 'search_static_first') + assert isinstance(self.search_static_first, int) + + def parse_config_files(self): + self.cp.read(self.files) + if not self.cp.has_section(self.section): + if self.section is not None: + self.cp.add_section(self.section) + + def calc_libraries_info(self): + libs = self.get_libraries() + dirs = self.get_lib_dirs() + # The extensions use runtime_library_dirs + r_dirs = self.get_runtime_lib_dirs() + # Intrinsic distutils use rpath, we simply append both entries + # as though they were one entry + r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) + info = {} + for lib in libs: + i = self.check_libs(dirs, [lib]) + if i is not None: + dict_append(info, **i) + else: + log.info('Library %s was not found. Ignoring' % (lib)) + + if r_dirs: + i = self.check_libs(r_dirs, [lib]) + if i is not None: + # Swap library keywords found to runtime_library_dirs + # the libraries are insisting on the user having defined + # them using the library_dirs, and not necessarily by + # runtime_library_dirs + del i['libraries'] + i['runtime_library_dirs'] = i.pop('library_dirs') + dict_append(info, **i) + else: + log.info('Runtime library %s was not found. Ignoring' % (lib)) + + return info + + def set_info(self, **info): + if info: + lib_info = self.calc_libraries_info() + dict_append(info, **lib_info) + # Update extra information + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + self.saved_results[self.__class__.__name__] = info + + def get_option_single(self, *options): + """ Ensure that only one of `options` are found in the section + + Parameters + ---------- + *options : list of str + a list of options to be found in the section (``self.section``) + + Returns + ------- + str : + the option that is uniquely found in the section + + Raises + ------ + AliasedOptionError : + in case more than one of the options are found + """ + found = [self.cp.has_option(self.section, opt) for opt in options] + if sum(found) == 1: + return options[found.index(True)] + elif sum(found) == 0: + # nothing is found anyways + return options[0] + + # Else we have more than 1 key found + if AliasedOptionError.__doc__ is None: + raise AliasedOptionError() + raise AliasedOptionError(AliasedOptionError.__doc__.format( + section=self.section, options='[{}]'.format(', '.join(options)))) + + + def has_info(self): + return self.__class__.__name__ in self.saved_results + + def calc_extra_info(self): + """ Updates the information in the current information with + respect to these flags: + extra_compile_args + extra_link_args + """ + info = {} + for key in ['extra_compile_args', 'extra_link_args']: + # Get values + opt = self.cp.get(self.section, key) + opt = _shell_utils.NativeParser.split(opt) + if opt: + tmp = {key: opt} + dict_append(info, **tmp) + return info + + def get_info(self, notfound_action=0): + """ Return a dictionary with items that are compatible + with numpy.distutils.setup keyword arguments. + """ + flag = 0 + if not self.has_info(): + flag = 1 + log.info(self.__class__.__name__ + ':') + if hasattr(self, 'calc_info'): + self.calc_info() + if notfound_action: + if not self.has_info(): + if notfound_action == 1: + warnings.warn(self.notfounderror.__doc__, stacklevel=2) + elif notfound_action == 2: + raise self.notfounderror(self.notfounderror.__doc__) + else: + raise ValueError(repr(notfound_action)) + + if not self.has_info(): + log.info(' NOT AVAILABLE') + self.set_info() + else: + log.info(' FOUND:') + + res = self.saved_results.get(self.__class__.__name__) + if log.get_threshold() <= log.INFO and flag: + for k, v in res.items(): + v = str(v) + if k in ['sources', 'libraries'] and len(v) > 270: + v = v[:120] + '...\n...\n...' + v[-120:] + log.info(' %s = %s', k, v) + log.info('') + + return copy.deepcopy(res) + + def get_paths(self, section, key): + dirs = self.cp.get(section, key).split(os.pathsep) + env_var = self.dir_env_var + if env_var: + if is_sequence(env_var): + e0 = env_var[-1] + for e in env_var: + if e in os.environ: + e0 = e + break + if not env_var[0] == e0: + log.info('Setting %s=%s' % (env_var[0], e0)) + env_var = e0 + if env_var and env_var in os.environ: + d = os.environ[env_var] + if d == 'None': + log.info('Disabled %s: %s', + self.__class__.__name__, '(%s is None)' + % (env_var,)) + return [] + if os.path.isfile(d): + dirs = [os.path.dirname(d)] + dirs + l = getattr(self, '_lib_names', []) + if len(l) == 1: + b = os.path.basename(d) + b = os.path.splitext(b)[0] + if b[:3] == 'lib': + log.info('Replacing _lib_names[0]==%r with %r' \ + % (self._lib_names[0], b[3:])) + self._lib_names[0] = b[3:] + else: + ds = d.split(os.pathsep) + ds2 = [] + for d in ds: + if os.path.isdir(d): + ds2.append(d) + for dd in ['include', 'lib']: + d1 = os.path.join(d, dd) + if os.path.isdir(d1): + ds2.append(d1) + dirs = ds2 + dirs + default_dirs = self.cp.get(self.section, key).split(os.pathsep) + dirs.extend(default_dirs) + ret = [] + for d in dirs: + if len(d) > 0 and not os.path.isdir(d): + warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) + continue + + if d not in ret: + ret.append(d) + + log.debug('( %s = %s )', key, ':'.join(ret)) + return ret + + def get_lib_dirs(self, key='library_dirs'): + return self.get_paths(self.section, key) + + def get_runtime_lib_dirs(self, key='runtime_library_dirs'): + path = self.get_paths(self.section, key) + if path == ['']: + path = [] + return path + + def get_include_dirs(self, key='include_dirs'): + return self.get_paths(self.section, key) + + def get_src_dirs(self, key='src_dirs'): + return self.get_paths(self.section, key) + + def get_libs(self, key, default): + try: + libs = self.cp.get(self.section, key) + except NoOptionError: + if not default: + return [] + if is_string(default): + return [default] + return default + return [b for b in [a.strip() for a in libs.split(',')] if b] + + def get_libraries(self, key='libraries'): + if hasattr(self, '_lib_names'): + return self.get_libs(key, default=self._lib_names) + else: + return self.get_libs(key, '') + + def library_extensions(self): + c = customized_ccompiler() + static_exts = [] + if c.compiler_type != 'msvc': + # MSVC doesn't understand binutils + static_exts.append('.a') + if sys.platform == 'win32': + static_exts.append('.lib') # .lib is used by MSVC and others + if self.search_static_first: + exts = static_exts + [so_ext] + else: + exts = [so_ext] + static_exts + if sys.platform == 'cygwin': + exts.append('.dll.a') + if sys.platform == 'darwin': + exts.append('.dylib') + return exts + + def check_libs(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks for all libraries as shared libraries first, then + static (or vice versa if self.search_static_first is True). + """ + exts = self.library_extensions() + info = None + for ext in exts: + info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) + if info is not None: + break + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + return info + + def check_libs2(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks each library for shared or static. + """ + exts = self.library_extensions() + info = self._check_libs(lib_dirs, libs, opt_libs, exts) + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + + return info + + def _find_lib(self, lib_dir, lib, exts): + assert is_string(lib_dir) + # under windows first try without 'lib' prefix + if sys.platform == 'win32': + lib_prefixes = ['', 'lib'] + else: + lib_prefixes = ['lib'] + # for each library name, see if we can find a file for it. + for ext in exts: + for prefix in lib_prefixes: + p = self.combine_paths(lib_dir, prefix + lib + ext) + if p: + break + if p: + assert len(p) == 1 + # ??? splitext on p[0] would do this for cygwin + # doesn't seem correct + if ext == '.dll.a': + lib += '.dll' + if ext == '.lib': + lib = prefix + lib + return lib + + return False + + def _find_libs(self, lib_dirs, libs, exts): + # make sure we preserve the order of libs, as it can be important + found_dirs, found_libs = [], [] + for lib in libs: + for lib_dir in lib_dirs: + found_lib = self._find_lib(lib_dir, lib, exts) + if found_lib: + found_libs.append(found_lib) + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + break + return found_dirs, found_libs + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Find mandatory and optional libs in expected paths. + + Missing optional libraries are silently forgotten. + """ + if not is_sequence(lib_dirs): + lib_dirs = [lib_dirs] + # First, try to find the mandatory libraries + found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) + if len(found_libs) > 0 and len(found_libs) == len(libs): + # Now, check for optional libraries + opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) + found_libs.extend(opt_found_libs) + for lib_dir in opt_found_dirs: + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + info = {'libraries': found_libs, 'library_dirs': found_dirs} + return info + else: + return None + + def combine_paths(self, *args): + """Return a list of existing paths composed by all combinations + of items from the arguments. + """ + return combine_paths(*args) + + +class fft_opt_info(system_info): + + def calc_info(self): + info = {} + fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') + djbfft_info = get_info('djbfft') + if fftw_info: + dict_append(info, **fftw_info) + if djbfft_info: + dict_append(info, **djbfft_info) + self.set_info(**info) + return + + +class fftw_info(system_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + {'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]}] + + def calc_ver_info(self, ver_param): + """Returns True on successful version detection, else False""" + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + + opt = self.get_option_single(self.section + '_libs', 'libraries') + libs = self.get_libs(opt, ver_param['libs']) + info = self.check_libs(lib_dirs, libs) + if info is not None: + flag = 0 + for d in incl_dirs: + if len(self.combine_paths(d, ver_param['includes'])) \ + == len(ver_param['includes']): + dict_append(info, include_dirs=[d]) + flag = 1 + break + if flag: + dict_append(info, define_macros=ver_param['macros']) + else: + info = None + if info is not None: + self.set_info(**info) + return True + else: + log.info(' %s not found' % (ver_param['name'])) + return False + + def calc_info(self): + for i in self.ver_info: + if self.calc_ver_info(i): + break + + +class fftw2_info(fftw_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]} + ] + + +class fftw3_info(fftw_info): + #variables to override + section = 'fftw3' + dir_env_var = 'FFTW3' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + ] + + +class dfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw', + 'libs':['drfftw', 'dfftw'], + 'includes':['dfftw.h', 'drfftw.h'], + 'macros':[('SCIPY_DFFTW_H', None)]}] + + +class sfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw', + 'libs':['srfftw', 'sfftw'], + 'includes':['sfftw.h', 'srfftw.h'], + 'macros':[('SCIPY_SFFTW_H', None)]}] + + +class fftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'fftw threads', + 'libs':['rfftw_threads', 'fftw_threads'], + 'includes':['fftw_threads.h', 'rfftw_threads.h'], + 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] + + +class dfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw threads', + 'libs':['drfftw_threads', 'dfftw_threads'], + 'includes':['dfftw_threads.h', 'drfftw_threads.h'], + 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] + + +class sfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw threads', + 'libs':['srfftw_threads', 'sfftw_threads'], + 'includes':['sfftw_threads.h', 'srfftw_threads.h'], + 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] + + +class djbfft_info(system_info): + section = 'djbfft' + dir_env_var = 'DJBFFT' + notfounderror = DJBFFTNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + info = None + for d in lib_dirs: + p = self.combine_paths(d, ['djbfft.a']) + if p: + info = {'extra_objects': p} + break + p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) + if p: + info = {'libraries': ['djbfft'], 'library_dirs': [d]} + break + if info is None: + return + for d in incl_dirs: + if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: + dict_append(info, include_dirs=[d], + define_macros=[('SCIPY_DJBFFT_H', None)]) + self.set_info(**info) + return + return + + +class mkl_info(system_info): + section = 'mkl' + dir_env_var = 'MKLROOT' + _lib_mkl = ['mkl_rt'] + + def get_mkl_rootdir(self): + mklroot = os.environ.get('MKLROOT', None) + if mklroot is not None: + return mklroot + paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) + ld_so_conf = '/etc/ld.so.conf' + if os.path.isfile(ld_so_conf): + with open(ld_so_conf, 'r') as f: + for d in f: + d = d.strip() + if d: + paths.append(d) + intel_mkl_dirs = [] + for path in paths: + path_atoms = path.split(os.sep) + for m in path_atoms: + if m.startswith('mkl'): + d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) + intel_mkl_dirs.append(d) + break + for d in paths: + dirs = glob(os.path.join(d, 'mkl', '*')) + dirs += glob(os.path.join(d, 'mkl*')) + for sub_dir in dirs: + if os.path.isdir(os.path.join(sub_dir, 'lib')): + return sub_dir + return None + + def __init__(self): + mklroot = self.get_mkl_rootdir() + if mklroot is None: + system_info.__init__(self) + else: + from .cpuinfo import cpu + if cpu.is_Itanium(): + plt = '64' + elif cpu.is_Intel() and cpu.is_64bit(): + plt = 'intel64' + else: + plt = '32' + system_info.__init__( + self, + default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], + default_include_dirs=[os.path.join(mklroot, 'include')]) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + opt = self.get_option_single('mkl_libs', 'libraries') + mkl_libs = self.get_libs(opt, self._lib_mkl) + info = self.check_libs2(lib_dirs, mkl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + if sys.platform == 'win32': + pass # win32 has no pthread library + else: + dict_append(info, libraries=['pthread']) + self.set_info(**info) + + +class lapack_mkl_info(mkl_info): + pass + + +class blas_mkl_info(mkl_info): + pass + + +class atlas_info(system_info): + section = 'atlas' + dir_env_var = 'ATLAS' + _lib_names = ['f77blas', 'cblas'] + if sys.platform[:7] == 'freebsd': + _lib_atlas = ['atlas_r'] + _lib_lapack = ['alapack_r'] + else: + _lib_atlas = ['atlas'] + _lib_lapack = ['lapack'] + + notfounderror = AtlasNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', + 'sse', '3dnow', 'sse2']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_libs', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) + lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) + atlas = None + lapack = None + atlas_1 = None + for d in lib_dirs: + # FIXME: lapack_atlas is unused + lapack_atlas = self.check_libs2(d, ['lapack_atlas'], []) + atlas = self.check_libs2(d, atlas_libs, []) + if atlas is not None: + lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) + lapack = self.check_libs2(lib_dirs2, lapack_libs, []) + if lapack is not None: + break + if atlas: + atlas_1 = atlas + log.info(self.__class__) + if atlas is None: + atlas = atlas_1 + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + if lapack is not None: + dict_append(info, **lapack) + dict_append(info, **atlas) + elif 'lapack_atlas' in atlas['libraries']: + dict_append(info, **atlas) + dict_append(info, + define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) + self.set_info(**info) + return + else: + dict_append(info, **atlas) + dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) + message = textwrap.dedent(""" + ********************************************************************* + Could not find lapack library within the ATLAS installation. + ********************************************************************* + """) + warnings.warn(message, stacklevel=2) + self.set_info(**info) + return + + # Check if lapack library is complete, only warn if it is not. + lapack_dir = lapack['library_dirs'][0] + lapack_name = lapack['libraries'][0] + lapack_lib = None + lib_prefixes = ['lib'] + if sys.platform == 'win32': + lib_prefixes.append('') + for e in self.library_extensions(): + for prefix in lib_prefixes: + fn = os.path.join(lapack_dir, prefix + lapack_name + e) + if os.path.exists(fn): + lapack_lib = fn + break + if lapack_lib: + break + if lapack_lib is not None: + sz = os.stat(lapack_lib)[6] + if sz <= 4000 * 1024: + message = textwrap.dedent(""" + ********************************************************************* + Lapack library (from ATLAS) is probably incomplete: + size of %s is %sk (expected >4000k) + + Follow the instructions in the KNOWN PROBLEMS section of the file + numpy/INSTALL.txt. + ********************************************************************* + """) % (lapack_lib, sz / 1024) + warnings.warn(message, stacklevel=2) + else: + info['language'] = 'f77' + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(info, **atlas_extra_info) + + self.set_info(**info) + + +class atlas_blas_info(atlas_info): + _lib_names = ['f77blas', 'cblas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_libs', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_threads_info(atlas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class atlas_blas_threads_info(atlas_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class lapack_atlas_info(atlas_info): + _lib_names = ['lapack_atlas'] + atlas_info._lib_names + + +class lapack_atlas_threads_info(atlas_threads_info): + _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names + + +class atlas_3_10_info(atlas_info): + _lib_names = ['satlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_info(atlas_3_10_info): + _lib_names = ['satlas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_lib', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_3_10_threads_info(atlas_3_10_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + + +class lapack_atlas_3_10_info(atlas_3_10_info): + pass + + +class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): + pass + + +class lapack_info(system_info): + section = 'lapack' + dir_env_var = 'LAPACK' + _lib_names = ['lapack'] + notfounderror = LapackNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('lapack_libs', 'libraries') + lapack_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, lapack_libs, []) + if info is None: + return + info['language'] = 'f77' + self.set_info(**info) + + +class lapack_src_info(system_info): + # LAPACK_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. + section = 'lapack_src' + dir_env_var = 'LAPACK_SRC' + notfounderror = LapackSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'dgesv.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + # The following is extracted from LAPACK-3.0/SRC/Makefile. + # Added missing names from lapack-lite-3.1.1/SRC/Makefile + # while keeping removed names for Lapack-3.0 compatibility. + allaux = ''' + ilaenv ieeeck lsame lsamen xerbla + iparmq + ''' # *.f + laux = ''' + bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 + laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 + lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre + larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 + lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 + lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf + stebz stedc steqr sterf + + larra larrc larrd larr larrk larrj larrr laneg laisnan isnan + lazq3 lazq4 + ''' # [s|d]*.f + lasrc = ''' + gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak + gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv + gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 + geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd + gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal + gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd + ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein + hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 + lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb + lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp + laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv + lartv larz larzb larzt laswp lasyf latbs latdf latps latrd + latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv + pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 + potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri + pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs + spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv + sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 + tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs + trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs + tzrqf tzrzf + + lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 + ''' # [s|c|d|z]*.f + sd_lasrc = ''' + laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l + org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr + orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 + ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx + sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd + stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd + sygvx sytd2 sytrd + ''' # [s|d]*.f + cz_lasrc = ''' + bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev + heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv + hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd + hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf + hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 + laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe + laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv + spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq + ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 + unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr + ''' # [c|z]*.f + ####### + sclaux = laux + ' econd ' # s*.f + dzlaux = laux + ' secnd ' # d*.f + slasrc = lasrc + sd_lasrc # s*.f + dlasrc = lasrc + sd_lasrc # d*.f + clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f + zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f + oclasrc = ' icmax1 scsum1 ' # *.f + ozlasrc = ' izmax1 dzsum1 ' # *.f + sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ + + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ + + ['c%s.f' % f for f in (clasrc).split()] \ + + ['z%s.f' % f for f in (zlasrc).split()] \ + + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] + sources = [os.path.join(src_dir, f) for f in sources] + # Lapack 3.1: + src_dir2 = os.path.join(src_dir, '..', 'INSTALL') + sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] + # Lapack 3.2.1: + sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] + # Should we check here actual existence of source files? + # Yes, the file listing is different between 3.0 and 3.1 + # versions. + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + +atlas_version_c_text = r''' +/* This file is generated from numpy/distutils/system_info.py */ +void ATL_buildinfo(void); +int main(void) { + ATL_buildinfo(); + return 0; +} +''' + +_cached_atlas_version = {} + + +def get_atlas_version(**config): + libraries = config.get('libraries', []) + library_dirs = config.get('library_dirs', []) + key = (tuple(libraries), tuple(library_dirs)) + if key in _cached_atlas_version: + return _cached_atlas_version[key] + c = cmd_config(Distribution()) + atlas_version = None + info = {} + try: + s, o = c.get_output(atlas_version_c_text, + libraries=libraries, library_dirs=library_dirs, + ) + if s and re.search(r'undefined reference to `_gfortran', o, re.M): + s, o = c.get_output(atlas_version_c_text, + libraries=libraries + ['gfortran'], + library_dirs=library_dirs, + ) + if not s: + warnings.warn(textwrap.dedent(""" + ***************************************************** + Linkage with ATLAS requires gfortran. Use + + python setup.py config_fc --fcompiler=gnu95 ... + + when building extension libraries that use ATLAS. + Make sure that -lgfortran is used for C++ extensions. + ***************************************************** + """), stacklevel=2) + dict_append(info, language='f90', + define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) + except Exception: # failed to get version from file -- maybe on Windows + # look at directory name + for o in library_dirs: + m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) + if m: + atlas_version = m.group('version') + if atlas_version is not None: + break + + # final choice --- look at ATLAS_VERSION environment + # variable + if atlas_version is None: + atlas_version = os.environ.get('ATLAS_VERSION', None) + if atlas_version: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + else: + dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) + return atlas_version or '?.?.?', info + + if not s: + m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) + if m: + atlas_version = m.group('version') + if atlas_version is None: + if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): + atlas_version = '3.2.1_pre3.3.6' + else: + log.info('Status: %d', s) + log.info('Output: %s', o) + + elif atlas_version == '3.2.1_pre3.3.6': + dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) + else: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + result = _cached_atlas_version[key] = atlas_version, info + return result + + +class lapack_opt_info(system_info): + notfounderror = LapackNotFoundError + + # List of all known LAPACK libraries, in the default order + lapack_order = ['mkl', 'openblas', 'flame', + 'accelerate', 'atlas', 'lapack'] + order_env_var_name = 'NPY_LAPACK_ORDER' + + def _calc_info_mkl(self): + info = get_info('lapack_mkl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_openblas(self): + info = get_info('openblas_lapack') + if info: + self.set_info(**info) + return True + info = get_info('openblas_clapack') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_flame(self): + info = get_info('flame') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_atlas(self): + info = get_info('atlas_3_10_threads') + if not info: + info = get_info('atlas_3_10') + if not info: + info = get_info('atlas_threads') + if not info: + info = get_info('atlas') + if info: + # Figure out if ATLAS has lapack... + # If not we need the lapack library, but not BLAS! + l = info.get('define_macros', []) + if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ + or ('ATLAS_WITHOUT_LAPACK', None) in l: + # Get LAPACK (with possible warnings) + # If not found we don't accept anything + # since we can't use ATLAS with LAPACK! + lapack_info = self._get_info_lapack() + if not lapack_info: + return False + dict_append(info, **lapack_info) + self.set_info(**info) + return True + return False + + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + + def _get_info_blas(self): + # Default to get the optimized BLAS implementation + info = get_info('blas_opt') + if not info: + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) + info_src = get_info('blas_src') + if not info_src: + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) + return {} + dict_append(info, libraries=[('fblas_src', info_src)]) + return info + + def _get_info_lapack(self): + info = get_info('lapack') + if not info: + warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) + info_src = get_info('lapack_src') + if not info_src: + warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) + return {} + dict_append(info, libraries=[('flapack_src', info_src)]) + return info + + def _calc_info_lapack(self): + info = self._get_info_lapack() + if info: + info_blas = self._get_info_blas() + dict_append(info, **info_blas) + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + self.set_info(**info) + return True + return False + + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() + self.set_info(**info) + return True + + def _calc_info(self, name): + return getattr(self, '_calc_info_{}'.format(name))() + + def calc_info(self): + lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) + if len(unknown_order) > 0: + raise ValueError("lapack_opt_info user defined " + "LAPACK order has unacceptable " + "values: {}".format(unknown_order)) + + if 'NPY_LAPACK_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + + for lapack in lapack_order: + if self._calc_info(lapack): + return + + if 'lapack' not in lapack_order: + # Since the user may request *not* to use any library, we still need + # to raise warnings to signal missing packages! + warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) + warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) + + +class _ilp64_opt_info_mixin: + symbol_suffix = None + symbol_prefix = None + + def _check_info(self, info): + macros = dict(info.get('define_macros', [])) + prefix = macros.get('BLAS_SYMBOL_PREFIX', '') + suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') + + if self.symbol_prefix not in (None, prefix): + return False + + if self.symbol_suffix not in (None, suffix): + return False + + return bool(info) + + +class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): + notfounderror = LapackILP64NotFoundError + lapack_order = ['openblas64_', 'openblas_ilp64'] + order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' + + def _calc_info(self, name): + info = get_info(name + '_lapack') + if self._check_info(info): + self.set_info(**info) + return True + return False + + +class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): + # Same as lapack_ilp64_opt_info, but fix symbol names + symbol_prefix = '' + symbol_suffix = '' + + +class lapack64__opt_info(lapack_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '64_' + + +class blas_opt_info(system_info): + notfounderror = BlasNotFoundError + # List of all known BLAS libraries, in the default order + + blas_order = ['mkl', 'blis', 'openblas', + 'accelerate', 'atlas', 'blas'] + order_env_var_name = 'NPY_BLAS_ORDER' + + def _calc_info_mkl(self): + info = get_info('blas_mkl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_blis(self): + info = get_info('blis') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_openblas(self): + info = get_info('openblas') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_atlas(self): + info = get_info('atlas_3_10_blas_threads') + if not info: + info = get_info('atlas_3_10_blas') + if not info: + info = get_info('atlas_blas_threads') + if not info: + info = get_info('atlas_blas') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_blas(self): + # Warn about a non-optimized BLAS library + warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) + info = {} + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + blas = get_info('blas') + if blas: + dict_append(info, **blas) + else: + # Not even BLAS was found! + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) + + blas_src = get_info('blas_src') + if not blas_src: + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) + return False + dict_append(info, libraries=[('fblas_src', blas_src)]) + + self.set_info(**info) + return True + + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() + if 'NPY_CBLAS_LIBS' in os.environ: + info['define_macros'].append(('HAVE_CBLAS', None)) + info['extra_link_args'].extend( + os.environ['NPY_CBLAS_LIBS'].split()) + self.set_info(**info) + return True + + def _calc_info(self, name): + return getattr(self, '_calc_info_{}'.format(name))() + + def calc_info(self): + blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) + if len(unknown_order) > 0: + raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) + + if 'NPY_BLAS_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + + for blas in blas_order: + if self._calc_info(blas): + return + + if 'blas' not in blas_order: + # Since the user may request *not* to use any library, we still need + # to raise warnings to signal missing packages! + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) + + +class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): + notfounderror = BlasILP64NotFoundError + blas_order = ['openblas64_', 'openblas_ilp64'] + order_env_var_name = 'NPY_BLAS_ILP64_ORDER' + + def _calc_info(self, name): + info = get_info(name) + if self._check_info(info): + self.set_info(**info) + return True + return False + + +class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '' + + +class blas64__opt_info(blas_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '64_' + + +class cblas_info(system_info): + section = 'cblas' + dir_env_var = 'CBLAS' + # No default as it's used only in blas_info + _lib_names = [] + notfounderror = BlasNotFoundError + + +class blas_info(system_info): + section = 'blas' + dir_env_var = 'BLAS' + _lib_names = ['blas'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + opt = self.get_option_single('blas_libs', 'libraries') + blas_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, blas_libs, []) + if info is None: + return + else: + info['include_dirs'] = self.get_include_dirs() + if platform.system() == 'Windows': + # The check for windows is needed because get_cblas_libs uses the + # same compiler that was used to compile Python and msvc is + # often not installed when mingw is being used. This rough + # treatment is not desirable, but windows is tricky. + info['language'] = 'f77' # XXX: is it generally true? + # If cblas is given as an option, use those + cblas_info_obj = cblas_info() + cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') + cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) + if cblas_libs: + info['libraries'] = cblas_libs + blas_libs + info['define_macros'] = [('HAVE_CBLAS', None)] + else: + lib = self.get_cblas_libs(info) + if lib is not None: + info['language'] = 'c' + info['libraries'] = lib + info['define_macros'] = [('HAVE_CBLAS', None)] + self.set_info(**info) + + def get_cblas_libs(self, info): + """ Check whether we can link with CBLAS interface + + This method will search through several combinations of libraries + to check whether CBLAS is present: + + 1. Libraries in ``info['libraries']``, as is + 2. As 1. but also explicitly adding ``'cblas'`` as a library + 3. As 1. but also explicitly adding ``'blas'`` as a library + 4. Check only library ``'cblas'`` + 5. Check only library ``'blas'`` + + Parameters + ---------- + info : dict + system information dictionary for compilation and linking + + Returns + ------- + libraries : list of str or None + a list of libraries that enables the use of CBLAS interface. + Returns None if not found or a compilation error occurs. + + Since 1.17 returns a list. + """ + # primitive cblas check by looking for the header and trying to link + # cblas or blas + c = customized_ccompiler() + tmpdir = tempfile.mkdtemp() + s = textwrap.dedent("""\ + #include + int main(int argc, const char *argv[]) + { + double a[4] = {1,2,3,4}; + double b[4] = {5,6,7,8}; + return cblas_ddot(4, a, 1, b, 1) > 10; + }""") + src = os.path.join(tmpdir, 'source.c') + try: + with open(src, 'wt') as f: + f.write(s) + + try: + # check we can compile (find headers) + obj = c.compile([src], output_dir=tmpdir, + include_dirs=self.get_include_dirs()) + except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): + return None + + # check we can link (find library) + # some systems have separate cblas and blas libs. + for libs in [info['libraries'], ['cblas'] + info['libraries'], + ['blas'] + info['libraries'], ['cblas'], ['blas']]: + try: + c.link_executable(obj, os.path.join(tmpdir, "a.out"), + libraries=libs, + library_dirs=info['library_dirs'], + extra_postargs=info.get('extra_link_args', [])) + return libs + except distutils.ccompiler.LinkError: + pass + finally: + shutil.rmtree(tmpdir) + return None + + +class openblas_info(blas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + _require_symbols = [] + notfounderror = BlasNotFoundError + + @property + def symbol_prefix(self): + try: + return self.cp.get(self.section, 'symbol_prefix') + except NoOptionError: + return '' + + @property + def symbol_suffix(self): + try: + return self.cp.get(self.section, 'symbol_suffix') + except NoOptionError: + return '' + + def _calc_info(self): + c = customized_ccompiler() + + lib_dirs = self.get_lib_dirs() + + # Prefer to use libraries over openblas_libs + opt = self.get_option_single('openblas_libs', 'libraries') + openblas_libs = self.get_libs(opt, self._lib_names) + + info = self.check_libs(lib_dirs, openblas_libs, []) + + if c.compiler_type == "msvc" and info is None: + from numpy.distutils.fcompiler import new_fcompiler + f = new_fcompiler(c_compiler=c) + if f and f.compiler_type == 'gnu95': + # Try gfortran-compatible library files + info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) + # Skip lapack check, we'd need build_ext to do it + skip_symbol_check = True + elif info: + skip_symbol_check = False + info['language'] = 'c' + + if info is None: + return None + + # Add extra info for OpenBLAS + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + + if not (skip_symbol_check or self.check_symbols(info)): + return None + + info['define_macros'] = [('HAVE_CBLAS', None)] + if self.symbol_prefix: + info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] + if self.symbol_suffix: + info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] + + return info + + def calc_info(self): + info = self._calc_info() + if info is not None: + self.set_info(**info) + + def check_msvc_gfortran_libs(self, library_dirs, libraries): + # First, find the full path to each library directory + library_paths = [] + for library in libraries: + for library_dir in library_dirs: + # MinGW static ext will be .a + fullpath = os.path.join(library_dir, library + '.a') + if os.path.isfile(fullpath): + library_paths.append(fullpath) + break + else: + return None + + # Generate numpy.distutils virtual static library file + basename = self.__class__.__name__ + tmpdir = os.path.join(os.getcwd(), 'build', basename) + if not os.path.isdir(tmpdir): + os.makedirs(tmpdir) + + info = {'library_dirs': [tmpdir], + 'libraries': [basename], + 'language': 'f77'} + + fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') + fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') + with open(fake_lib_file, 'w') as f: + f.write("\n".join(library_paths)) + with open(fake_clib_file, 'w') as f: + pass + + return info + + def check_symbols(self, info): + res = False + c = customized_ccompiler() + + tmpdir = tempfile.mkdtemp() + + prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, + symbol_name, + self.symbol_suffix) + for symbol_name in self._require_symbols) + calls = "\n".join("%s%s%s();" % (self.symbol_prefix, + symbol_name, + self.symbol_suffix) + for symbol_name in self._require_symbols) + s = textwrap.dedent("""\ + %(prototypes)s + int main(int argc, const char *argv[]) + { + %(calls)s + return 0; + }""") % dict(prototypes=prototypes, calls=calls) + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + try: + extra_args = info['extra_link_args'] + except Exception: + extra_args = [] + try: + with open(src, 'wt') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + res = True + except distutils.ccompiler.LinkError: + res = False + finally: + shutil.rmtree(tmpdir) + return res + +class openblas_lapack_info(openblas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + _require_symbols = ['zungqr_'] + notfounderror = BlasNotFoundError + +class openblas_clapack_info(openblas_lapack_info): + _lib_names = ['openblas', 'lapack'] + +class openblas_ilp64_info(openblas_info): + section = 'openblas_ilp64' + dir_env_var = 'OPENBLAS_ILP64' + _lib_names = ['openblas64'] + _require_symbols = ['dgemm_', 'cblas_dgemm'] + notfounderror = BlasILP64NotFoundError + + def _calc_info(self): + info = super()._calc_info() + if info is not None: + info['define_macros'] += [('HAVE_BLAS_ILP64', None)] + return info + +class openblas_ilp64_lapack_info(openblas_ilp64_info): + _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] + + def _calc_info(self): + info = super()._calc_info() + if info: + info['define_macros'] += [('HAVE_LAPACKE', None)] + return info + +class openblas64__info(openblas_ilp64_info): + # ILP64 Openblas, with default symbol suffix + section = 'openblas64_' + dir_env_var = 'OPENBLAS64_' + _lib_names = ['openblas64_'] + symbol_suffix = '64_' + symbol_prefix = '' + +class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): + pass + +class blis_info(blas_info): + section = 'blis' + dir_env_var = 'BLIS' + _lib_names = ['blis'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + opt = self.get_option_single('blis_libs', 'libraries') + blis_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs2(lib_dirs, blis_libs, []) + if info is None: + return + + # Add include dirs + incl_dirs = self.get_include_dirs() + dict_append(info, + language='c', + define_macros=[('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + self.set_info(**info) + + +class flame_info(system_info): + """ Usage of libflame for LAPACK operations + + This requires libflame to be compiled with lapack wrappers: + + ./configure --enable-lapack2flame ... + + Be aware that libflame 5.1.0 has some missing names in the shared library, so + if you have problems, try the static flame library. + """ + section = 'flame' + _lib_names = ['flame'] + notfounderror = FlameNotFoundError + + def check_embedded_lapack(self, info): + """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ + c = customized_ccompiler() + + tmpdir = tempfile.mkdtemp() + s = textwrap.dedent("""\ + void zungqr_(); + int main(int argc, const char *argv[]) + { + zungqr_(); + return 0; + }""") + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + extra_args = info.get('extra_link_args', []) + try: + with open(src, 'wt') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + return True + except distutils.ccompiler.LinkError: + return False + finally: + shutil.rmtree(tmpdir) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + flame_libs = self.get_libs('libraries', self._lib_names) + + info = self.check_libs2(lib_dirs, flame_libs, []) + if info is None: + return + + # Add the extra flag args to info + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + + if self.check_embedded_lapack(info): + # check if the user has supplied all information required + self.set_info(**info) + else: + # Try and get the BLAS lib to see if we can get it to work + blas_info = get_info('blas_opt') + if not blas_info: + # since we already failed once, this ain't going to work either + return + + # Now we need to merge the two dictionaries + for key in blas_info: + if isinstance(blas_info[key], list): + info[key] = info.get(key, []) + blas_info[key] + elif isinstance(blas_info[key], tuple): + info[key] = info.get(key, ()) + blas_info[key] + else: + info[key] = info.get(key, '') + blas_info[key] + + # Now check again + if self.check_embedded_lapack(info): + self.set_info(**info) + + +class accelerate_info(system_info): + section = 'accelerate' + _lib_names = ['accelerate', 'veclib'] + notfounderror = BlasNotFoundError + + def calc_info(self): + # Make possible to enable/disable from config file/env var + libraries = os.environ.get('ACCELERATE') + if libraries: + libraries = [libraries] + else: + libraries = self.get_libs('libraries', self._lib_names) + libraries = [lib.strip().lower() for lib in libraries] + + if (sys.platform == 'darwin' and + not os.getenv('_PYTHON_HOST_PLATFORM', None)): + # Use the system BLAS from Accelerate or vecLib under OSX + args = [] + link_args = [] + if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ + 'x86_64' in get_platform() or \ + 'i386' in platform.platform(): + intel = 1 + else: + intel = 0 + if (os.path.exists('/System/Library/Frameworks' + '/Accelerate.framework/') and + 'accelerate' in libraries): + if intel: + args.extend(['-msse3']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) + elif (os.path.exists('/System/Library/Frameworks' + '/vecLib.framework/') and + 'veclib' in libraries): + if intel: + args.extend(['-msse3']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,vecLib']) + + if args: + self.set_info(extra_compile_args=args, + extra_link_args=link_args, + define_macros=[('NO_ATLAS_INFO', 3), + ('HAVE_CBLAS', None)]) + + return + +class blas_src_info(system_info): + # BLAS_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. + section = 'blas_src' + dir_env_var = 'BLAS_SRC' + notfounderror = BlasSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['blas'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'daxpy.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + blas1 = ''' + caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot + dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 + srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg + dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax + snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap + scabs1 + ''' + blas2 = ''' + cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv + chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv + dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv + sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger + stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc + zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 + ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv + ''' + blas3 = ''' + cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k + dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm + ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm + ''' + sources = [os.path.join(src_dir, f + '.f') \ + for f in (blas1 + blas2 + blas3).split()] + #XXX: should we check here actual existence of source files? + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + + +class x11_info(system_info): + section = 'x11' + notfounderror = X11NotFoundError + _lib_names = ['X11'] + + def __init__(self): + system_info.__init__(self, + default_lib_dirs=default_x11_lib_dirs, + default_include_dirs=default_x11_include_dirs) + + def calc_info(self): + if sys.platform in ['win32']: + return + lib_dirs = self.get_lib_dirs() + include_dirs = self.get_include_dirs() + opt = self.get_option_single('x11_libs', 'libraries') + x11_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, x11_libs, []) + if info is None: + return + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, 'X11/X.h'): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + self.set_info(**info) + + +class _numpy_info(system_info): + section = 'Numeric' + modulename = 'Numeric' + notfounderror = NumericNotFoundError + + def __init__(self): + include_dirs = [] + try: + module = __import__(self.modulename) + prefix = [] + for name in module.__file__.split(os.sep): + if name == 'lib': + break + prefix.append(name) + + # Ask numpy for its own include path before attempting + # anything else + try: + include_dirs.append(getattr(module, 'get_include')()) + except AttributeError: + pass + + include_dirs.append(sysconfig.get_path('include')) + except ImportError: + pass + py_incl_dir = sysconfig.get_path('include') + include_dirs.append(py_incl_dir) + py_pincl_dir = sysconfig.get_path('platinclude') + if py_pincl_dir not in include_dirs: + include_dirs.append(py_pincl_dir) + for d in default_include_dirs: + d = os.path.join(d, os.path.basename(py_incl_dir)) + if d not in include_dirs: + include_dirs.append(d) + system_info.__init__(self, + default_lib_dirs=[], + default_include_dirs=include_dirs) + + def calc_info(self): + try: + module = __import__(self.modulename) + except ImportError: + return + info = {} + macros = [] + for v in ['__version__', 'version']: + vrs = getattr(module, v, None) + if vrs is None: + continue + macros = [(self.modulename.upper() + '_VERSION', + _c_string_literal(vrs)), + (self.modulename.upper(), None)] + break + dict_append(info, define_macros=macros) + include_dirs = self.get_include_dirs() + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, + os.path.join(self.modulename, + 'arrayobject.h')): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + if info: + self.set_info(**info) + return + + +class numarray_info(_numpy_info): + section = 'numarray' + modulename = 'numarray' + + +class Numeric_info(_numpy_info): + section = 'Numeric' + modulename = 'Numeric' + + +class numpy_info(_numpy_info): + section = 'numpy' + modulename = 'numpy' + + +class numerix_info(system_info): + section = 'numerix' + + def calc_info(self): + which = None, None + if os.getenv("NUMERIX"): + which = os.getenv("NUMERIX"), "environment var" + # If all the above fail, default to numpy. + if which[0] is None: + which = "numpy", "defaulted" + try: + import numpy # noqa: F401 + which = "numpy", "defaulted" + except ImportError as e: + msg1 = str(e) + try: + import Numeric # noqa: F401 + which = "numeric", "defaulted" + except ImportError as e: + msg2 = str(e) + try: + import numarray # noqa: F401 + which = "numarray", "defaulted" + except ImportError as e: + msg3 = str(e) + log.info(msg1) + log.info(msg2) + log.info(msg3) + which = which[0].strip().lower(), which[1] + if which[0] not in ["numeric", "numarray", "numpy"]: + raise ValueError("numerix selector must be either 'Numeric' " + "or 'numarray' or 'numpy' but the value obtained" + " from the %s was '%s'." % (which[1], which[0])) + os.environ['NUMERIX'] = which[0] + self.set_info(**get_info(which[0])) + + +class f2py_info(system_info): + def calc_info(self): + try: + import numpy.f2py as f2py + except ImportError: + return + f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') + self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], + include_dirs=[f2py_dir]) + return + + +class boost_python_info(system_info): + section = 'boost_python' + dir_env_var = 'BOOST' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['boost*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', + 'module.cpp')): + src_dir = d + break + if not src_dir: + return + py_incl_dirs = [sysconfig.get_path('include')] + py_pincl_dir = sysconfig.get_path('platinclude') + if py_pincl_dir not in py_incl_dirs: + py_incl_dirs.append(py_pincl_dir) + srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') + bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) + bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) + info = {'libraries': [('boost_python_src', + {'include_dirs': [src_dir] + py_incl_dirs, + 'sources':bpl_srcs} + )], + 'include_dirs': [src_dir], + } + if info: + self.set_info(**info) + return + + +class agg2_info(system_info): + section = 'agg2' + dir_env_var = 'AGG2' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['agg2*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): + src_dir = d + break + if not src_dir: + return + if sys.platform == 'win32': + agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', + 'win32', 'agg_win32_bmp.cpp')) + else: + agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) + agg2_srcs += [os.path.join(src_dir, 'src', 'platform', + 'X11', + 'agg_platform_support.cpp')] + + info = {'libraries': + [('agg2_src', + {'sources': agg2_srcs, + 'include_dirs': [os.path.join(src_dir, 'include')], + } + )], + 'include_dirs': [os.path.join(src_dir, 'include')], + } + if info: + self.set_info(**info) + return + + +class _pkg_config_info(system_info): + section = None + config_env_var = 'PKG_CONFIG' + default_config_exe = 'pkg-config' + append_config_exe = '' + version_macro_name = None + release_macro_name = None + version_flag = '--modversion' + cflags_flag = '--cflags' + + def get_config_exe(self): + if self.config_env_var in os.environ: + return os.environ[self.config_env_var] + return self.default_config_exe + + def get_config_output(self, config_exe, option): + cmd = config_exe + ' ' + self.append_config_exe + ' ' + option + try: + o = subprocess.check_output(cmd) + except (OSError, subprocess.CalledProcessError): + pass + else: + o = filepath_from_subprocess_output(o) + return o + + def calc_info(self): + config_exe = find_executable(self.get_config_exe()) + if not config_exe: + log.warn('File not found: %s. Cannot determine %s info.' \ + % (config_exe, self.section)) + return + info = {} + macros = [] + libraries = [] + library_dirs = [] + include_dirs = [] + extra_link_args = [] + extra_compile_args = [] + version = self.get_config_output(config_exe, self.version_flag) + if version: + macros.append((self.__class__.__name__.split('.')[-1].upper(), + _c_string_literal(version))) + if self.version_macro_name: + macros.append((self.version_macro_name + '_%s' + % (version.replace('.', '_')), None)) + if self.release_macro_name: + release = self.get_config_output(config_exe, '--release') + if release: + macros.append((self.release_macro_name + '_%s' + % (release.replace('.', '_')), None)) + opts = self.get_config_output(config_exe, '--libs') + if opts: + for opt in opts.split(): + if opt[:2] == '-l': + libraries.append(opt[2:]) + elif opt[:2] == '-L': + library_dirs.append(opt[2:]) + else: + extra_link_args.append(opt) + opts = self.get_config_output(config_exe, self.cflags_flag) + if opts: + for opt in opts.split(): + if opt[:2] == '-I': + include_dirs.append(opt[2:]) + elif opt[:2] == '-D': + if '=' in opt: + n, v = opt[2:].split('=') + macros.append((n, v)) + else: + macros.append((opt[2:], None)) + else: + extra_compile_args.append(opt) + if macros: + dict_append(info, define_macros=macros) + if libraries: + dict_append(info, libraries=libraries) + if library_dirs: + dict_append(info, library_dirs=library_dirs) + if include_dirs: + dict_append(info, include_dirs=include_dirs) + if extra_link_args: + dict_append(info, extra_link_args=extra_link_args) + if extra_compile_args: + dict_append(info, extra_compile_args=extra_compile_args) + if info: + self.set_info(**info) + return + + +class wx_info(_pkg_config_info): + section = 'wx' + config_env_var = 'WX_CONFIG' + default_config_exe = 'wx-config' + append_config_exe = '' + version_macro_name = 'WX_VERSION' + release_macro_name = 'WX_RELEASE' + version_flag = '--version' + cflags_flag = '--cxxflags' + + +class gdk_pixbuf_xlib_2_info(_pkg_config_info): + section = 'gdk_pixbuf_xlib_2' + append_config_exe = 'gdk-pixbuf-xlib-2.0' + version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' + + +class gdk_pixbuf_2_info(_pkg_config_info): + section = 'gdk_pixbuf_2' + append_config_exe = 'gdk-pixbuf-2.0' + version_macro_name = 'GDK_PIXBUF_VERSION' + + +class gdk_x11_2_info(_pkg_config_info): + section = 'gdk_x11_2' + append_config_exe = 'gdk-x11-2.0' + version_macro_name = 'GDK_X11_VERSION' + + +class gdk_2_info(_pkg_config_info): + section = 'gdk_2' + append_config_exe = 'gdk-2.0' + version_macro_name = 'GDK_VERSION' + + +class gdk_info(_pkg_config_info): + section = 'gdk' + append_config_exe = 'gdk' + version_macro_name = 'GDK_VERSION' + + +class gtkp_x11_2_info(_pkg_config_info): + section = 'gtkp_x11_2' + append_config_exe = 'gtk+-x11-2.0' + version_macro_name = 'GTK_X11_VERSION' + + +class gtkp_2_info(_pkg_config_info): + section = 'gtkp_2' + append_config_exe = 'gtk+-2.0' + version_macro_name = 'GTK_VERSION' + + +class xft_info(_pkg_config_info): + section = 'xft' + append_config_exe = 'xft' + version_macro_name = 'XFT_VERSION' + + +class freetype2_info(_pkg_config_info): + section = 'freetype2' + append_config_exe = 'freetype2' + version_macro_name = 'FREETYPE2_VERSION' + + +class amd_info(system_info): + section = 'amd' + dir_env_var = 'AMD' + _lib_names = ['amd'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('amd_libs', 'libraries') + amd_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, amd_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, 'amd.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_AMD_H', None)], + swig_opts=['-I' + inc_dir]) + + self.set_info(**info) + return + + +class umfpack_info(system_info): + section = 'umfpack' + dir_env_var = 'UMFPACK' + notfounderror = UmfpackNotFoundError + _lib_names = ['umfpack'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('umfpack_libs', 'libraries') + umfpack_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, umfpack_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_UMFPACK_H', None)], + swig_opts=['-I' + inc_dir]) + + dict_append(info, **get_info('amd')) + + self.set_info(**info) + return + + +def combine_paths(*args, **kws): + """ Return a list of existing paths composed by all combinations of + items from arguments. + """ + r = [] + for a in args: + if not a: + continue + if is_string(a): + a = [a] + r.append(a) + args = r + if not args: + return [] + if len(args) == 1: + result = reduce(lambda a, b: a + b, map(glob, args[0]), []) + elif len(args) == 2: + result = [] + for a0 in args[0]: + for a1 in args[1]: + result.extend(glob(os.path.join(a0, a1))) + else: + result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) + log.debug('(paths: %s)', ','.join(result)) + return result + +language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} +inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} + + +def dict_append(d, **kws): + languages = [] + for k, v in kws.items(): + if k == 'language': + languages.append(v) + continue + if k in d: + if k in ['library_dirs', 'include_dirs', + 'extra_compile_args', 'extra_link_args', + 'runtime_library_dirs', 'define_macros']: + [d[k].append(vv) for vv in v if vv not in d[k]] + else: + d[k].extend(v) + else: + d[k] = v + if languages: + l = inv_language_map[max([language_map.get(l, 0) for l in languages])] + d['language'] = l + return + + +def parseCmdLine(argv=(None,)): + import optparse + parser = optparse.OptionParser("usage: %prog [-v] [info objs]") + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', + default=False, + help='be verbose and print more messages') + + opts, args = parser.parse_args(args=argv[1:]) + return opts, args + + +def show_all(argv=None): + import inspect + if argv is None: + argv = sys.argv + opts, args = parseCmdLine(argv) + if opts.verbose: + log.set_threshold(log.DEBUG) + else: + log.set_threshold(log.INFO) + show_only = [] + for n in args: + if n[-5:] != '_info': + n = n + '_info' + show_only.append(n) + show_all = not show_only + _gdict_ = globals().copy() + for name, c in _gdict_.items(): + if not inspect.isclass(c): + continue + if not issubclass(c, system_info) or c is system_info: + continue + if not show_all: + if name not in show_only: + continue + del show_only[show_only.index(name)] + conf = c() + conf.verbosity = 2 + # FIXME: r not used + r = conf.get_info() + if show_only: + log.info('Info classes not defined: %s', ','.join(show_only)) + +if __name__ == "__main__": + show_all() diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__init__.py b/MLPY/Lib/site-packages/numpy/distutils/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c464a883aa0462e16e19a53dca1f812a5d5e13c9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_build_ext.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_build_ext.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ca20a21b276a6c34c4483fa6a754e9fb95860b9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_build_ext.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..181b654778a592ad3453d4e96aac45766ff87d37 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt_conf.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt_conf.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c929aa98c026be9df95d50be939b3724da1d6e13 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_ccompiler_opt_conf.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_exec_command.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_exec_command.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..926595a67e075887fa8dd422690fba94cc889b4a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_exec_command.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1b315470f70c11c7310bed5e12851a1e2f2f144 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf101719c6b0762c7fdb5eea75fc9e88287d587a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..039a4c11e6f398d1f0401578e23658679f78b3ca Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..560a956a612c92146e0443c8908be8ce7938391f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_from_template.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_from_template.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..992f3d98a5e8883ac561fff56c7cdef39671de87 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_from_template.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbe5ffb628d8d239853599b7e8ff6add1fab9240 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_misc_util.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_misc_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20c21e255d43a77acaa8f7935b6fbdd0299b35e5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_misc_util.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05a845f97aa89c76d2f14729d2bd5f4f3b56ca65 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_shell_utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_shell_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bc85fbe5c18506a7fedc73c3f45bdf5f106317e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_shell_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_system_info.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_system_info.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d27e2c10acc24904fa926a1e7a1b47d1776ccc0c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/distutils/tests/__pycache__/test_system_info.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_build_ext.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_build_ext.py new file mode 100644 index 0000000000000000000000000000000000000000..b41244ba0f0d3b3a45d1a0adbadc2bd3b67202a2 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_build_ext.py @@ -0,0 +1,72 @@ +'''Tests for numpy.distutils.build_ext.''' + +import os +import subprocess +import sys +from textwrap import indent, dedent +import pytest + +@pytest.mark.slow +def test_multi_fortran_libs_link(tmp_path): + ''' + Ensures multiple "fake" static libraries are correctly linked. + see gh-18295 + ''' + + # We need to make sure we actually have an f77 compiler. + # This is nontrivial, so we'll borrow the utilities + # from f2py tests: + from numpy.f2py.tests.util import has_f77_compiler + if not has_f77_compiler(): + pytest.skip('No F77 compiler found') + + # make some dummy sources + with open(tmp_path / '_dummy1.f', 'w') as fid: + fid.write(indent(dedent('''\ + FUNCTION dummy_one() + RETURN + END FUNCTION'''), prefix=' '*6)) + with open(tmp_path / '_dummy2.f', 'w') as fid: + fid.write(indent(dedent('''\ + FUNCTION dummy_two() + RETURN + END FUNCTION'''), prefix=' '*6)) + with open(tmp_path / '_dummy.c', 'w') as fid: + # doesn't need to load - just needs to exist + fid.write('int PyInit_dummyext;') + + # make a setup file + with open(tmp_path / 'setup.py', 'w') as fid: + srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..') + fid.write(dedent(f'''\ + def configuration(parent_package="", top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration("", parent_package, top_path) + config.add_library("dummy1", sources=["_dummy1.f"]) + config.add_library("dummy2", sources=["_dummy2.f"]) + config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"]) + return config + + + if __name__ == "__main__": + import sys + sys.path.insert(0, r"{srctree}") + from numpy.distutils.core import setup + setup(**configuration(top_path="").todict())''')) + + # build the test extensino and "install" into a temporary directory + build_dir = tmp_path + subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', + '--prefix', str(tmp_path / 'installdir'), + '--record', str(tmp_path / 'tmp_install_log.txt'), + ], + cwd=str(build_dir), + ) + # get the path to the so + so = None + with open(tmp_path /'tmp_install_log.txt') as fid: + for line in fid: + if 'dummyext' in line: + so = line.strip() + break + assert so is not None diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..4a331b674ddf2a705fcfde602de61a5f38157ec6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt.py @@ -0,0 +1,787 @@ +import re, textwrap, os +from os import sys, path +from distutils.errors import DistutilsError + +is_standalone = __name__ == '__main__' and __package__ is None +if is_standalone: + import unittest, contextlib, tempfile, shutil + sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) + from ccompiler_opt import CCompilerOpt + + # from numpy/testing/_private/utils.py + @contextlib.contextmanager + def tempdir(*args, **kwargs): + tmpdir = tempfile.mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + def assert_(expr, msg=''): + if not expr: + raise AssertionError(msg) +else: + from numpy.distutils.ccompiler_opt import CCompilerOpt + from numpy.testing import assert_, tempdir + +# architectures and compilers to test +arch_compilers = dict( + x86 = ("gcc", "clang", "icc", "iccw", "msvc"), + x64 = ("gcc", "clang", "icc", "iccw", "msvc"), + ppc64 = ("gcc", "clang"), + ppc64le = ("gcc", "clang"), + armhf = ("gcc", "clang"), + aarch64 = ("gcc", "clang"), + noarch = ("gcc",) +) + +class FakeCCompilerOpt(CCompilerOpt): + fake_info = "" + def __init__(self, trap_files="", trap_flags="", *args, **kwargs): + self.fake_trap_files = trap_files + self.fake_trap_flags = trap_flags + CCompilerOpt.__init__(self, None, **kwargs) + + def __repr__(self): + return textwrap.dedent("""\ + <<<< + march : {} + compiler : {} + ---------------- + {} + >>>> + """).format(self.cc_march, self.cc_name, self.report()) + + def dist_compile(self, sources, flags, **kwargs): + assert(isinstance(sources, list)) + assert(isinstance(flags, list)) + if self.fake_trap_files: + for src in sources: + if re.match(self.fake_trap_files, src): + self.dist_error("source is trapped by a fake interface") + if self.fake_trap_flags: + for f in flags: + if re.match(self.fake_trap_flags, f): + self.dist_error("flag is trapped by a fake interface") + # fake objects + return zip(sources, [' '.join(flags)] * len(sources)) + + def dist_info(self): + return FakeCCompilerOpt.fake_info + + @staticmethod + def dist_log(*args, stderr=False): + pass + +class _Test_CCompilerOpt: + arch = None # x86_64 + cc = None # gcc + + def setup(self): + FakeCCompilerOpt.conf_nocache = True + self._opt = None + + def nopt(self, *args, **kwargs): + FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") + return FakeCCompilerOpt(*args, **kwargs) + + def opt(self): + if not self._opt: + self._opt = self.nopt() + return self._opt + + def march(self): + return self.opt().cc_march + + def cc_name(self): + return self.opt().cc_name + + def get_targets(self, targets, groups, **kwargs): + FakeCCompilerOpt.conf_target_groups = groups + opt = self.nopt( + cpu_baseline=kwargs.get("baseline", "min"), + cpu_dispatch=kwargs.get("dispatch", "max"), + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + with tempdir() as tmpdir: + file = os.path.join(tmpdir, "test_targets.c") + with open(file, 'w') as f: + f.write(targets) + gtargets = [] + gflags = {} + fake_objects = opt.try_dispatch([file]) + for source, flags in fake_objects: + gtar = path.basename(source).split('.')[1:-1] + glen = len(gtar) + if glen == 0: + gtar = "baseline" + elif glen == 1: + gtar = gtar[0].upper() + else: + # converting multi-target into parentheses str format to be equivalent + # to the configuration statements syntax. + gtar = ('('+' '.join(gtar)+')').upper() + gtargets.append(gtar) + gflags[gtar] = flags + + has_baseline, targets = opt.sources_status[file] + targets = targets + ["baseline"] if has_baseline else targets + # convert tuple that represent multi-target into parentheses str format + targets = [ + '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar + for tar in targets + ] + if len(targets) != len(gtargets) or not all(t in gtargets for t in targets): + raise AssertionError( + "'sources_status' returns different targets than the compiled targets\n" + "%s != %s" % (targets, gtargets) + ) + # return targets from 'sources_status' since the order is matters + return targets, gflags + + def arg_regex(self, **kwargs): + map2origin = dict( + x64 = "x86", + ppc64le = "ppc64", + aarch64 = "armhf", + clang = "gcc", + ) + march = self.march(); cc_name = self.cc_name() + map_march = map2origin.get(march, march) + map_cc = map2origin.get(cc_name, cc_name) + for key in ( + march, cc_name, map_march, map_cc, + march + '_' + cc_name, + map_march + '_' + cc_name, + march + '_' + map_cc, + map_march + '_' + map_cc, + ) : + regex = kwargs.pop(key, None) + if regex is not None: + break + if regex: + if isinstance(regex, dict): + for k, v in regex.items(): + if v[-1:] not in ')}$?\\.+*': + regex[k] = v + '$' + else: + assert(isinstance(regex, str)) + if regex[-1:] not in ')}$?\\.+*': + regex += '$' + return regex + + def expect(self, dispatch, baseline="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + features = ' '.join(opt.cpu_dispatch_names()) + if not match: + if len(features) != 0: + raise AssertionError( + 'expected empty features, not "%s"' % features + ) + return + if not re.match(match, features, re.IGNORECASE): + raise AssertionError( + 'dispatch features "%s" not match "%s"' % (features, match) + ) + + def expect_baseline(self, baseline, dispatch="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + features = ' '.join(opt.cpu_baseline_names()) + if not match: + if len(features) != 0: + raise AssertionError( + 'expected empty features, not "%s"' % features + ) + return + if not re.match(match, features, re.IGNORECASE): + raise AssertionError( + 'baseline features "%s" not match "%s"' % (features, match) + ) + + def expect_flags(self, baseline, dispatch="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + flags = ' '.join(opt.cpu_baseline_flags()) + if not match: + if len(flags) != 0: + raise AssertionError( + 'expected empty flags not "%s"' % flags + ) + return + if not re.match(match, flags): + raise AssertionError( + 'flags "%s" not match "%s"' % (flags, match) + ) + + def expect_targets(self, targets, groups={}, **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs) + targets = ' '.join(targets) + if not match: + if len(targets) != 0: + raise AssertionError( + 'expected empty targets, not "%s"' % targets + ) + return + if not re.match(match, targets, re.IGNORECASE): + raise AssertionError( + 'targets "%s" not match "%s"' % (targets, match) + ) + + def expect_target_flags(self, targets, groups={}, **kwargs): + match_dict = self.arg_regex(**kwargs) + if match_dict is None: + return + assert(isinstance(match_dict, dict)) + _, tar_flags = self.get_targets(targets=targets, groups=groups) + + for match_tar, match_flags in match_dict.items(): + if match_tar not in tar_flags: + raise AssertionError( + 'expected to find target "%s"' % match_tar + ) + flags = tar_flags[match_tar] + if not match_flags: + if len(flags) != 0: + raise AssertionError( + 'expected to find empty flags in target "%s"' % match_tar + ) + if not re.match(match_flags, flags): + raise AssertionError( + '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags) + ) + + def test_interface(self): + wrong_arch = "ppc64" if self.arch != "ppc64" else "x86" + wrong_cc = "clang" if self.cc != "clang" else "icc" + opt = self.opt() + assert_(getattr(opt, "cc_on_" + self.arch)) + assert_(not getattr(opt, "cc_on_" + wrong_arch)) + assert_(getattr(opt, "cc_is_" + self.cc)) + assert_(not getattr(opt, "cc_is_" + wrong_cc)) + + def test_args_empty(self): + for baseline, dispatch in ( + ("", "none"), + (None, ""), + ("none +none", "none - none"), + ("none -max", "min - max"), + ("+vsx2 -VSX2", "vsx avx2 avx512f -max"), + ("max -vsx - avx + avx512f neon -MAX ", + "min -min + max -max -vsx + avx2 -avx2 +NONE") + ) : + opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) + assert(len(opt.cpu_baseline_names()) == 0) + assert(len(opt.cpu_dispatch_names()) == 0) + + def test_args_validation(self): + if self.march() == "unknown": + return + # check sanity of argument's validation + for baseline, dispatch in ( + ("unkown_feature - max +min", "unknown max min"), # unknowing features + ("#avx2", "$vsx") # groups and polices aren't acceptable + ) : + try: + self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) + raise AssertionError("excepted an exception for invalid arguments") + except DistutilsError: + pass + + def test_skip(self): + # only takes what platform supports and skip the others + # without casing exceptions + self.expect( + "sse vsx neon", + x86="sse", ppc64="vsx", armhf="neon", unknown="" + ) + self.expect( + "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd", + x86 = "sse41 avx avx2", + ppc64 = "vsx2 vsx3", + armhf = "neon_vfpv4 asimd", + unknown = "" + ) + # any features in cpu_dispatch must be ignored if it's part of baseline + self.expect( + "sse neon vsx", baseline="sse neon vsx", + x86="", ppc64="", armhf="" + ) + self.expect( + "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp", + x86="", ppc64="", armhf="" + ) + + def test_implies(self): + # baseline combining implied features, so we count + # on it instead of testing 'feature_implies()'' directly + self.expect_baseline( + "fma3 avx2 asimd vsx3", + # .* between two spaces can validate features in between + x86 = "sse .* sse41 .* fma3.*avx2", + ppc64 = "vsx vsx2 vsx3", + armhf = "neon neon_fp16 neon_vfpv4 asimd" + ) + """ + special cases + """ + # in icc and msvc, FMA3 and AVX2 can't be separated + # both need to implies each other, same for avx512f & cd + for f0, f1 in ( + ("fma3", "avx2"), + ("avx512f", "avx512cd"), + ): + diff = ".* sse42 .* %s .*%s$" % (f0, f1) + self.expect_baseline(f0, + x86_gcc=".* sse42 .* %s$" % f0, + x86_icc=diff, x86_iccw=diff + ) + self.expect_baseline(f1, + x86_gcc=".* avx .* %s$" % f1, + x86_icc=diff, x86_iccw=diff + ) + # in msvc, following features can't be separated too + for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")): + for ff in f: + self.expect_baseline(ff, + x86_msvc=".*%s" % ' '.join(f) + ) + + # in ppc64le VSX and VSX2 can't be separated + self.expect_baseline("vsx", ppc64le="vsx vsx2") + # in aarch64 following features can't be separated + for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"): + self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd") + + def test_args_options(self): + # max & native + for o in ("max", "native"): + if o == "native" and self.cc_name() == "msvc": + continue + self.expect(o, + trap_files=".*cpu_(sse|vsx|neon).c", + x86="", ppc64="", armhf="" + ) + self.expect(o, + trap_files=".*cpu_(sse3|vsx2|neon_vfpv4).c", + x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", + aarch64="", ppc64le="" + ) + self.expect(o, + trap_files=".*cpu_(popcnt|vsx3).c", + x86="sse .* sse41", ppc64="vsx vsx2", + armhf="neon neon_fp16 .* asimd .*" + ) + self.expect(o, + x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", + # in icc, xop and fam4 aren't supported + x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", + x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", + # in msvc, avx512_knl avx512_knm aren't supported + x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", + armhf=".* asimd asimdhp asimddp .*", + ppc64="vsx vsx2 vsx3.*" + ) + # min + self.expect("min", + x86="sse sse2", x64="sse sse2 sse3", + armhf="", aarch64="neon neon_fp16 .* asimd", + ppc64="", ppc64le="vsx vsx2" + ) + self.expect( + "min", trap_files=".*cpu_(sse2|vsx2).c", + x86="", ppc64le="" + ) + # an exception must triggered if native flag isn't supported + # when option "native" is activated through the args + try: + self.expect("native", + trap_flags=".*(-march=native|-xHost|/QxHost).*", + x86=".*", ppc64=".*", armhf=".*" + ) + if self.march() != "unknown": + raise AssertionError( + "excepted an exception for %s" % self.march() + ) + except DistutilsError: + if self.march() == "unknown": + raise AssertionError("excepted no exceptions") + + def test_flags(self): + self.expect_flags( + "sse sse2 vsx vsx2 neon neon_fp16", + x86_gcc="-msse -msse2", x86_icc="-msse -msse2", + x86_iccw="/arch:SSE2", x86_msvc="/arch:SSE2", + ppc64_gcc= "-mcpu=power8", + ppc64_clang="-maltivec -mvsx -mpower8-vector", + armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", + aarch64="" + ) + # testing normalize -march + self.expect_flags( + "asimd", + aarch64="", + armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd" + ) + self.expect_flags( + "asimdhp", + aarch64_gcc=r"-march=armv8.2-a\+fp16", + armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16" + ) + self.expect_flags( + "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod" + ) + self.expect_flags( + # asimdfhm implies asimdhp + "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml" + ) + self.expect_flags( + "asimddp asimdhp asimdfhm", + aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" + ) + + def test_targets_exceptions(self): + for targets in ( + "bla bla", "/*@targets", + "/*@targets */", + "/*@targets unknown */", + "/*@targets $unknown_policy avx2 */", + "/*@targets #unknown_group avx2 */", + "/*@targets $ */", + "/*@targets # vsx */", + "/*@targets #$ vsx */", + "/*@targets vsx avx2 ) */", + "/*@targets vsx avx2 (avx2 */", + "/*@targets vsx avx2 () */", + "/*@targets vsx avx2 ($autovec) */", # no features + "/*@targets vsx avx2 (xxx) */", + "/*@targets vsx avx2 (baseline) */", + ) : + try: + self.expect_targets( + targets, + x86="", armhf="", ppc64="" + ) + if self.march() != "unknown": + raise AssertionError( + "excepted an exception for %s" % self.march() + ) + except DistutilsError: + if self.march() == "unknown": + raise AssertionError("excepted no exceptions") + + def test_targets_syntax(self): + for targets in ( + "/*@targets $keep_baseline sse vsx neon*/", + "/*@targets,$keep_baseline,sse,vsx,neon*/", + "/*@targets*$keep_baseline*sse*vsx*neon*/", + """ + /* + ** @targets + ** $keep_baseline, sse vsx,neon + */ + """, + """ + /* + ************@targets************* + ** $keep_baseline, sse vsx, neon + ********************************* + */ + """, + """ + /* + /////////////@targets///////////////// + //$keep_baseline//sse//vsx//neon + ///////////////////////////////////// + */ + """, + """ + /* + @targets + $keep_baseline + SSE VSX NEON*/ + """ + ) : + self.expect_targets(targets, + x86="sse", ppc64="vsx", armhf="neon", unknown="" + ) + + def test_targets(self): + # test skipping baseline features + self.expect_targets( + """ + /*@targets + sse sse2 sse41 avx avx2 avx512f + vsx vsx2 vsx3 + neon neon_fp16 asimdhp asimddp + */ + """, + baseline="avx vsx2 asimd", + x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx3" + ) + # test skipping non-dispatch features + self.expect_targets( + """ + /*@targets + sse41 avx avx2 avx512f + vsx2 vsx3 + asimd asimdhp asimddp + */ + """, + baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp", + x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2" + ) + # test skipping features that not supported + self.expect_targets( + """ + /*@targets + sse2 sse41 avx2 avx512f + vsx2 vsx3 + neon asimdhp asimddp + */ + """, + baseline="", + trap_files=".*(avx2|avx512f|vsx3|asimddp).c", + x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon" + ) + # test skipping features that implies each other + self.expect_targets( + """ + /*@targets + sse sse2 avx fma3 avx2 avx512f avx512cd + vsx vsx2 vsx3 + neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp + asimddp asimdfhm + */ + """, + baseline="", + x86_gcc="avx512cd avx512f avx2 fma3 avx sse2", + x86_msvc="avx512cd avx2 avx sse2", + x86_icc="avx512cd avx2 avx sse2", + x86_iccw="avx512cd avx2 avx sse2", + ppc64="vsx3 vsx2 vsx", + ppc64le="vsx3 vsx2", + armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon", + aarch64="asimdfhm asimddp asimdhp asimd" + ) + + def test_targets_policies(self): + # 'keep_baseline', generate objects for baseline features + self.expect_targets( + """ + /*@targets + $keep_baseline + sse2 sse42 avx2 avx512f + vsx2 vsx3 + neon neon_vfpv4 asimd asimddp + */ + """, + baseline="sse41 avx2 vsx2 asimd vsx3", + x86="avx512f avx2 sse42 sse2", + ppc64="vsx3 vsx2", + armhf="asimddp asimd neon_vfpv4 neon", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimddp asimd" + ) + # 'keep_sort', leave the sort as-is + self.expect_targets( + """ + /*@targets + $keep_baseline $keep_sort + avx512f sse42 avx2 sse2 + vsx2 vsx3 + asimd neon neon_vfpv4 asimddp + */ + """, + x86="avx512f sse42 avx2 sse2", + ppc64="vsx2 vsx3", + armhf="asimd neon neon_vfpv4 asimddp", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimd asimddp" + ) + # 'autovec', skipping features that can't be + # vectorized by the compiler + self.expect_targets( + """ + /*@targets + $keep_baseline $keep_sort $autovec + avx512f avx2 sse42 sse41 sse2 + vsx3 vsx2 + asimddp asimd neon_vfpv4 neon + */ + """, + x86_gcc="avx512f avx2 sse42 sse41 sse2", + x86_icc="avx512f avx2 sse42 sse41 sse2", + x86_iccw="avx512f avx2 sse42 sse41 sse2", + x86_msvc="avx512f avx2 sse2", + ppc64="vsx3 vsx2", + armhf="asimddp asimd neon_vfpv4 neon", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimddp asimd" + ) + for policy in ("$maxopt", "$autovec"): + # 'maxopt' and autovec set the max acceptable optimization flags + self.expect_target_flags( + "/*@targets baseline %s */" % policy, + gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"}, + iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"}, + unknown={"baseline":".*"} + ) + + # 'werror', force compilers to treat warnings as errors + self.expect_target_flags( + "/*@targets baseline $werror */", + gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"}, + iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"}, + unknown={"baseline":".*"} + ) + + def test_targets_groups(self): + self.expect_targets( + """ + /*@targets $keep_baseline baseline #test_group */ + """, + groups=dict( + test_group=(""" + $keep_baseline + asimddp sse2 vsx2 avx2 vsx3 + avx512f asimdhp + """) + ), + x86="avx512f avx2 sse2 baseline", + ppc64="vsx3 vsx2 baseline", + armhf="asimddp asimdhp baseline" + ) + # test skip duplicating and sorting + self.expect_targets( + """ + /*@targets + * sse42 avx avx512f + * #test_group_1 + * vsx2 + * #test_group_2 + * asimddp asimdfhm + */ + """, + groups=dict( + test_group_1=(""" + VSX2 vsx3 asimd avx2 SSE41 + """), + test_group_2=(""" + vsx2 vsx3 asImd aVx2 sse41 + """) + ), + x86="avx512f avx2 avx sse42 sse41", + ppc64="vsx3 vsx2", + # vsx2 part of the default baseline of ppc64le, option ("min") + ppc64le="vsx3", + armhf="asimdfhm asimddp asimd", + # asimd part of the default baseline of aarch64, option ("min") + aarch64="asimdfhm asimddp" + ) + + def test_targets_multi(self): + self.expect_targets( + """ + /*@targets + (avx512_clx avx512_cnl) (asimdhp asimddp) + */ + """, + x86=r"\(avx512_clx avx512_cnl\)", + armhf=r"\(asimdhp asimddp\)", + ) + # test skipping implied features and auto-sort + self.expect_targets( + """ + /*@targets + f16c (sse41 avx sse42) (sse3 avx2 avx512f) + vsx2 (vsx vsx3 vsx2) + (neon neon_vfpv4 asimd asimdhp asimddp) + */ + """, + x86="avx512f f16c avx", + ppc64="vsx3 vsx2", + ppc64le="vsx3", # vsx2 part of baseline + armhf=r"\(asimdhp asimddp\)", + ) + # test skipping implied features and keep sort + self.expect_targets( + """ + /*@targets $keep_sort + (sse41 avx sse42) (sse3 avx2 avx512f) + (vsx vsx3 vsx2) + (asimddp neon neon_vfpv4 asimd asimdhp) + */ + """, + x86="avx avx512f", + ppc64="vsx3", + armhf=r"\(asimdhp asimddp\)", + ) + # test compiler variety and avoiding duplicating + self.expect_targets( + """ + /*@targets $keep_sort + fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3 + */ + """, + x86_gcc=r"fma3 avx2 \(fma3 avx2\)", + x86_icc="avx2", x86_iccw="avx2", + x86_msvc="avx2" + ) + +def new_test(arch, cc): + if is_standalone: return textwrap.dedent("""\ + class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase): + arch = '{arch}' + cc = '{cc}' + def __init__(self, methodName="runTest"): + unittest.TestCase.__init__(self, methodName) + self.setup() + """).format( + class_name=arch + '_' + cc, arch=arch, cc=cc + ) + return textwrap.dedent("""\ + class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt): + arch = '{arch}' + cc = '{cc}' + """).format( + class_name=arch + '_' + cc, arch=arch, cc=cc + ) +""" +if 1 and is_standalone: + FakeCCompilerOpt.fake_info = "x86_icc" + cco = FakeCCompilerOpt(None, cpu_baseline="avx2") + print(' '.join(cco.cpu_baseline_names())) + print(cco.cpu_baseline_flags()) + unittest.main() + sys.exit() +""" +for arch, compilers in arch_compilers.items(): + for cc in compilers: + exec(new_test(arch, cc)) + +if is_standalone: + unittest.main() diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py new file mode 100644 index 0000000000000000000000000000000000000000..f295d6521c908342129909842e1f9a4dd26577a3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py @@ -0,0 +1,176 @@ +import unittest +from os import sys, path + +is_standalone = __name__ == '__main__' and __package__ is None +if is_standalone: + sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) + from ccompiler_opt import CCompilerOpt +else: + from numpy.distutils.ccompiler_opt import CCompilerOpt + +arch_compilers = dict( + x86 = ("gcc", "clang", "icc", "iccw", "msvc"), + x64 = ("gcc", "clang", "icc", "iccw", "msvc"), + ppc64 = ("gcc", "clang"), + ppc64le = ("gcc", "clang"), + armhf = ("gcc", "clang"), + aarch64 = ("gcc", "clang"), + narch = ("gcc",) +) + +class FakeCCompilerOpt(CCompilerOpt): + fake_info = ("arch", "compiler", "extra_args") + def __init__(self, *args, **kwargs): + CCompilerOpt.__init__(self, None, **kwargs) + def dist_compile(self, sources, flags, **kwargs): + return sources + def dist_info(self): + return FakeCCompilerOpt.fake_info + @staticmethod + def dist_log(*args, stderr=False): + pass + +class _TestConfFeatures(FakeCCompilerOpt): + """A hook to check the sanity of configured features +- before it called by the abstract class '_Feature' + """ + + def conf_features_partial(self): + conf_all = self.conf_features + for feature_name, feature in conf_all.items(): + self.test_feature( + "attribute conf_features", + conf_all, feature_name, feature + ) + + conf_partial = FakeCCompilerOpt.conf_features_partial(self) + for feature_name, feature in conf_partial.items(): + self.test_feature( + "conf_features_partial()", + conf_partial, feature_name, feature + ) + return conf_partial + + def test_feature(self, log, search_in, feature_name, feature_dict): + error_msg = ( + "during validate '{}' within feature '{}', " + "march '{}' and compiler '{}'\n>> " + ).format(log, feature_name, self.cc_march, self.cc_name) + + if not feature_name.isupper(): + raise AssertionError(error_msg + "feature name must be in uppercase") + + for option, val in feature_dict.items(): + self.test_option_types(error_msg, option, val) + self.test_duplicates(error_msg, option, val) + + self.test_implies(error_msg, search_in, feature_name, feature_dict) + self.test_group(error_msg, search_in, feature_name, feature_dict) + self.test_extra_checks(error_msg, search_in, feature_name, feature_dict) + + def test_option_types(self, error_msg, option, val): + for tp, available in ( + ((str, list), ( + "implies", "headers", "flags", "group", "detect", "extra_checks" + )), + ((str,), ("disable",)), + ((int,), ("interest",)), + ((bool,), ("implies_detect",)), + ((bool, type(None)), ("autovec",)), + ) : + found_it = option in available + if not found_it: + continue + if not isinstance(val, tp): + error_tp = [t.__name__ for t in (*tp,)] + error_tp = ' or '.join(error_tp) + raise AssertionError(error_msg + + "expected '%s' type for option '%s' not '%s'" % ( + error_tp, option, type(val).__name__ + )) + break + + if not found_it: + raise AssertionError(error_msg + "invalid option name '%s'" % option) + + def test_duplicates(self, error_msg, option, val): + if option not in ( + "implies", "headers", "flags", "group", "detect", "extra_checks" + ) : return + + if isinstance(val, str): + val = val.split() + + if len(val) != len(set(val)): + raise AssertionError(error_msg + "duplicated values in option '%s'" % option) + + def test_implies(self, error_msg, search_in, feature_name, feature_dict): + if feature_dict.get("disabled") is not None: + return + implies = feature_dict.get("implies", "") + if not implies: + return + if isinstance(implies, str): + implies = implies.split() + + if feature_name in implies: + raise AssertionError(error_msg + "feature implies itself") + + for impl in implies: + impl_dict = search_in.get(impl) + if impl_dict is not None: + if "disable" in impl_dict: + raise AssertionError(error_msg + "implies disabled feature '%s'" % impl) + continue + raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl) + + def test_group(self, error_msg, search_in, feature_name, feature_dict): + if feature_dict.get("disabled") is not None: + return + group = feature_dict.get("group", "") + if not group: + return + if isinstance(group, str): + group = group.split() + + for f in group: + impl_dict = search_in.get(f) + if not impl_dict or "disable" in impl_dict: + continue + raise AssertionError(error_msg + + "in option 'group', '%s' already exists as a feature name" % f + ) + + def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict): + if feature_dict.get("disabled") is not None: + return + extra_checks = feature_dict.get("extra_checks", "") + if not extra_checks: + return + if isinstance(extra_checks, str): + extra_checks = extra_checks.split() + + for f in extra_checks: + impl_dict = search_in.get(f) + if not impl_dict or "disable" in impl_dict: + continue + raise AssertionError(error_msg + + "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f + ) + +class TestConfFeatures(unittest.TestCase): + def __init__(self, methodName="runTest"): + unittest.TestCase.__init__(self, methodName) + self.setup() + + def setup(self): + FakeCCompilerOpt.conf_nocache = True + + def test_features(self): + for arch, compilers in arch_compilers.items(): + for cc in compilers: + FakeCCompilerOpt.fake_info = (arch, cc, "") + _TestConfFeatures() + +if is_standalone: + unittest.main() diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_exec_command.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_exec_command.py new file mode 100644 index 0000000000000000000000000000000000000000..99c4ec33675ab6e7d52f94b85b13ab6293e32c07 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_exec_command.py @@ -0,0 +1,214 @@ +import os +import sys +from tempfile import TemporaryFile + +from numpy.distutils import exec_command +from numpy.distutils.exec_command import get_pythonexe +from numpy.testing import tempdir, assert_, assert_warns + +# In python 3 stdout, stderr are text (unicode compliant) devices, so to +# emulate them import StringIO from the io module. +from io import StringIO + +class redirect_stdout: + """Context manager to redirect stdout for exec_command test.""" + def __init__(self, stdout=None): + self._stdout = stdout or sys.stdout + + def __enter__(self): + self.old_stdout = sys.stdout + sys.stdout = self._stdout + + def __exit__(self, exc_type, exc_value, traceback): + self._stdout.flush() + sys.stdout = self.old_stdout + # note: closing sys.stdout won't close it. + self._stdout.close() + +class redirect_stderr: + """Context manager to redirect stderr for exec_command test.""" + def __init__(self, stderr=None): + self._stderr = stderr or sys.stderr + + def __enter__(self): + self.old_stderr = sys.stderr + sys.stderr = self._stderr + + def __exit__(self, exc_type, exc_value, traceback): + self._stderr.flush() + sys.stderr = self.old_stderr + # note: closing sys.stderr won't close it. + self._stderr.close() + +class emulate_nonposix: + """Context manager to emulate os.name != 'posix' """ + def __init__(self, osname='non-posix'): + self._new_name = osname + + def __enter__(self): + self._old_name = os.name + os.name = self._new_name + + def __exit__(self, exc_type, exc_value, traceback): + os.name = self._old_name + + +def test_exec_command_stdout(): + # Regression test for gh-2999 and gh-2915. + # There are several packages (nose, scipy.weave.inline, Sage inline + # Fortran) that replace stdout, in which case it doesn't have a fileno + # method. This is tested here, with a do-nothing command that fails if the + # presence of fileno() is assumed in exec_command. + + # The code has a special case for posix systems, so if we are on posix test + # both that the special case works and that the generic code works. + + # Test posix version: + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + +def test_exec_command_stderr(): + # Test posix version: + with redirect_stdout(TemporaryFile(mode='w+')): + with redirect_stderr(StringIO()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(TemporaryFile()): + with redirect_stderr(StringIO()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + + +class TestExecCommand: + def setup(self): + self.pyexe = get_pythonexe() + + def check_nt(self, **kws): + s, o = exec_command.exec_command('cmd /C echo path=%path%') + assert_(s == 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) + assert_(s == 0) + assert_(o == 'win32') + + def check_posix(self, **kws): + s, o = exec_command.exec_command("echo Hello", **kws) + assert_(s == 0) + assert_(o == 'Hello') + + s, o = exec_command.exec_command('echo $AAA', **kws) + assert_(s == 0) + assert_(o == '') + + s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) + assert_(s == 0) + assert_(o == 'Tere') + + s, o = exec_command.exec_command('echo "$AAA"', **kws) + assert_(s == 0) + assert_(o == '') + + if 'BBB' not in os.environ: + os.environ['BBB'] = 'Hi' + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == 'Hi') + + s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) + assert_(s == 0) + assert_(o == 'Hey') + + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == 'Hi') + + del os.environ['BBB'] + + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == '') + + + s, o = exec_command.exec_command('this_is_not_a_command', **kws) + assert_(s != 0) + assert_(o != '') + + s, o = exec_command.exec_command('echo path=$PATH', **kws) + assert_(s == 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % + self.pyexe, **kws) + assert_(s == 0) + assert_(o == 'posix') + + def check_basic(self, *kws): + s, o = exec_command.exec_command( + '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) + assert_(s != 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.stderr.write(\'0\');' + 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % + self.pyexe, **kws) + assert_(s == 0) + assert_(o == '012') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) + assert_(s == 15) + assert_(o == '') + + s, o = exec_command.exec_command( + '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) + assert_(s == 0) + assert_(o == 'Heipa') + + def check_execute_in(self, **kws): + with tempdir() as tmpdir: + fn = "file" + tmpfile = os.path.join(tmpdir, fn) + with open(tmpfile, 'w') as f: + f.write('Hello') + + s, o = exec_command.exec_command( + '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % + (self.pyexe, fn), **kws) + assert_(s != 0) + assert_(o != '') + s, o = exec_command.exec_command( + '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' + 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) + assert_(s == 0) + assert_(o == 'Hello') + + def test_basic(self): + with redirect_stdout(StringIO()): + with redirect_stderr(StringIO()): + with assert_warns(DeprecationWarning): + if os.name == "posix": + self.check_posix(use_tee=0) + self.check_posix(use_tee=1) + elif os.name == "nt": + self.check_nt(use_tee=0) + self.check_nt(use_tee=1) + self.check_execute_in(use_tee=0) + self.check_execute_in(use_tee=1) diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..1d24aa62df5aae423623fad908b829790d2de491 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler.py @@ -0,0 +1,43 @@ +from numpy.testing import assert_ +import numpy.distutils.fcompiler + +customizable_flags = [ + ('f77', 'F77FLAGS'), + ('f90', 'F90FLAGS'), + ('free', 'FREEFLAGS'), + ('arch', 'FARCH'), + ('debug', 'FDEBUG'), + ('flags', 'FFLAGS'), + ('linker_so', 'LDFLAGS'), +] + + +def test_fcompiler_flags(monkeypatch): + monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') + flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + prev_flags = getattr(flag_vars, opt) + + monkeypatch.setenv(envvar, new_flag) + new_flags = getattr(flag_vars, opt) + + monkeypatch.delenv(envvar) + assert_(new_flags == [new_flag]) + + monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + prev_flags = getattr(flag_vars, opt) + monkeypatch.setenv(envvar, new_flag) + new_flags = getattr(flag_vars, opt) + + monkeypatch.delenv(envvar) + if prev_flags is None: + assert_(new_flags == [new_flag]) + else: + assert_(new_flags == prev_flags + [new_flag]) + diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py new file mode 100644 index 0000000000000000000000000000000000000000..52ceb973283f8e601e70efd77745c4f28046cb05 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py @@ -0,0 +1,55 @@ +from numpy.testing import assert_ + +import numpy.distutils.fcompiler + +g77_version_strings = [ + ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), + ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), + ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), + ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), + ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' + ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), +] + +gfortran_version_strings = [ + ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', + '4.0.3'), + ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), + ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), + ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), + ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), + ('4.8.0', '4.8.0'), + ('4.0.3-7', '4.0.3'), + ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", + '4.9.1'), + ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" + "gfortran: warning: yet another warning\n4.9.1", + '4.9.1'), + ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') +] + +class TestG77Versions: + def test_g77_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, version in g77_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_g77(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, _ in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) + +class TestGFortranVersions: + def test_gfortran_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, version in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_gfortran(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, _ in g77_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler_intel.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler_intel.py new file mode 100644 index 0000000000000000000000000000000000000000..1ef537096aa470851e3cc91e879f7d20f240bec1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler_intel.py @@ -0,0 +1,30 @@ +import numpy.distutils.fcompiler +from numpy.testing import assert_ + + +intel_32bit_version_strings = [ + ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" + "running on Intel(R) 32, Version 11.1", '11.1'), +] + +intel_64bit_version_strings = [ + ("Intel(R) Fortran IA-64 Compiler Professional for applications" + "running on IA-64, Version 11.0", '11.0'), + ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" + "running on Intel(R) 64, Version 11.1", '11.1') +] + +class TestIntelFCompilerVersions: + def test_32bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') + for vs, version in intel_32bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) + + +class TestIntelEM64TFCompilerVersions: + def test_64bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') + for vs, version in intel_64bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py new file mode 100644 index 0000000000000000000000000000000000000000..36a67b3dfd5db522f6a7b3c404c3403243652bfb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py @@ -0,0 +1,22 @@ +from numpy.testing import assert_ +import numpy.distutils.fcompiler + +nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' + '6.2(Chiyoda) Build 6200', '6.2'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.1(Tozai) Build 6136', '6.1'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.0(Hibiya) Build 1021', '6.0'), + ('nagfor', 'NAG Fortran Compiler Release ' + '5.3.2(971)', '5.3.2'), + ('nag', 'NAGWare Fortran 95 compiler Release 5.1' + '(347,355-367,375,380-383,389,394,399,401-402,407,' + '431,435,437,446,459-460,463,472,494,496,503,508,' + '511,517,529,555,557,565)', '5.1')] + +class TestNagFCompilerVersions: + def test_version_match(self): + for comp, vs, version in nag_version_strings: + fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) + v = fc.version_match(vs) + assert_(v == version) diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_from_template.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_from_template.py new file mode 100644 index 0000000000000000000000000000000000000000..4c725f0eeb4ed76a2b2b468742d56b7f93b659eb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_from_template.py @@ -0,0 +1,44 @@ + +from numpy.distutils.from_template import process_str +from numpy.testing import assert_equal + + +pyf_src = """ +python module foo + <_rd=real,double precision> + interface + subroutine foosub(tol) + <_rd>, intent(in,out) :: tol + end subroutine foosub + end interface +end python module foo +""" + +expected_pyf = """ +python module foo + interface + subroutine sfoosub(tol) + real, intent(in,out) :: tol + end subroutine sfoosub + subroutine dfoosub(tol) + double precision, intent(in,out) :: tol + end subroutine dfoosub + end interface +end python module foo +""" + + +def normalize_whitespace(s): + """ + Remove leading and trailing whitespace, and convert internal + stretches of whitespace to a single space. + """ + return ' '.join(s.split()) + + +def test_from_template(): + """Regression test for gh-10712.""" + pyf = process_str(pyf_src) + normalized_pyf = normalize_whitespace(pyf) + normalized_expected_pyf = normalize_whitespace(expected_pyf) + assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..2a8145fc65eb71d8a78ac18de45d6d9877968c37 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py @@ -0,0 +1,42 @@ +import shutil +import subprocess +import sys +import pytest + +from numpy.distutils import mingw32ccompiler + + +@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') +def test_build_import(): + '''Test the mingw32ccompiler.build_import_library, which builds a + `python.a` from the MSVC `python.lib` + ''' + + # make sure `nm.exe` exists and supports the current python version. This + # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit + try: + out = subprocess.check_output(['nm.exe', '--help']) + except FileNotFoundError: + pytest.skip("'nm.exe' not on path, is mingw installed?") + supported = out[out.find(b'supported targets:'):] + if sys.maxsize < 2**32: + if b'pe-i386' not in supported: + raise ValueError("'nm.exe' found but it does not support 32-bit " + "dlls when using 32-bit python. Supported " + "formats: '%s'" % supported) + elif b'pe-x86-64' not in supported: + raise ValueError("'nm.exe' found but it does not support 64-bit " + "dlls when using 64-bit python. Supported " + "formats: '%s'" % supported) + # Hide the import library to force a build + has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() + if has_import_lib: + shutil.move(fullpath, fullpath + '.bak') + + try: + # Whew, now we can actually test the function + mingw32ccompiler.build_import_library() + + finally: + if has_import_lib: + shutil.move(fullpath + '.bak', fullpath) diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_misc_util.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_misc_util.py new file mode 100644 index 0000000000000000000000000000000000000000..93cc906ca5da1f106dd11549628fd423b298f0d7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_misc_util.py @@ -0,0 +1,82 @@ +from os.path import join, sep, dirname + +from numpy.distutils.misc_util import ( + appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info + ) +from numpy.testing import ( + assert_, assert_equal + ) + +ajoin = lambda *paths: join(*((sep,)+paths)) + +class TestAppendpath: + + def test_1(self): + assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) + assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) + assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) + assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) + + def test_2(self): + assert_equal(appendpath('prefix/sub', 'name'), + join('prefix', 'sub', 'name')) + assert_equal(appendpath('prefix/sub', 'sup/name'), + join('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub', '/prefix/name'), + ajoin('prefix', 'sub', 'name')) + + def test_3(self): + assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), + ajoin('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) + +class TestMinrelpath: + + def test_1(self): + n = lambda path: path.replace('/', sep) + assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) + assert_equal(minrelpath('..'), '..') + assert_equal(minrelpath(n('aa/..')), '') + assert_equal(minrelpath(n('aa/../bb')), 'bb') + assert_equal(minrelpath(n('aa/bb/..')), 'aa') + assert_equal(minrelpath(n('aa/bb/../..')), '') + assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) + assert_equal(minrelpath(n('.././..')), n('../..')) + assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) + +class TestGpaths: + + def test_gpaths(self): + local_path = minrelpath(join(dirname(__file__), '..')) + ls = gpaths('command/*.py', local_path) + assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) + f = gpaths('system_info.py', local_path) + assert_(join(local_path, 'system_info.py') == f[0], repr(f)) + +class TestSharedExtension: + + def test_get_shared_lib_extension(self): + import sys + ext = get_shared_lib_extension(is_python_ext=False) + if sys.platform.startswith('linux'): + assert_equal(ext, '.so') + elif sys.platform.startswith('gnukfreebsd'): + assert_equal(ext, '.so') + elif sys.platform.startswith('darwin'): + assert_equal(ext, '.dylib') + elif sys.platform.startswith('win'): + assert_equal(ext, '.dll') + # just check for no crash + assert_(get_shared_lib_extension(is_python_ext=True)) + + +def test_installed_npymath_ini(): + # Regression test for gh-7707. If npymath.ini wasn't installed, then this + # will give an error. + info = get_info('npymath') + + assert isinstance(info, dict) + assert "define_macros" in info diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_npy_pkg_config.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_npy_pkg_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b1a1e079e1822d6f9e297b60746a293270d622d7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_npy_pkg_config.py @@ -0,0 +1,84 @@ +import os + +from numpy.distutils.npy_pkg_config import read_config, parse_flags +from numpy.testing import temppath, assert_ + +simple = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[default] +cflags = -I/usr/include +libs = -L/usr/lib +""" +simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', + 'version': '0.1', 'name': 'foo'} + +simple_variable = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[variables] +prefix = /foo/bar +libdir = ${prefix}/lib +includedir = ${prefix}/include + +[default] +cflags = -I${includedir} +libs = -L${libdir} +""" +simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', + 'version': '0.1', 'name': 'foo'} + +class TestLibraryInfo: + def test_simple(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_d['cflags']) + assert_(out.libs() == simple_d['libflags']) + assert_(out.name == simple_d['name']) + assert_(out.version == simple_d['version']) + + def test_simple_variable(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple_variable) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_variable_d['cflags']) + assert_(out.libs() == simple_variable_d['libflags']) + assert_(out.name == simple_variable_d['name']) + assert_(out.version == simple_variable_d['version']) + out.vars['prefix'] = '/Users/david' + assert_(out.cflags() == '-I/Users/david/include') + +class TestParseFlags: + def test_simple_cflags(self): + d = parse_flags("-I/usr/include") + assert_(d['include_dirs'] == ['/usr/include']) + + d = parse_flags("-I/usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + d = parse_flags("-I /usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + def test_simple_lflags(self): + d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) + + d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_shell_utils.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_shell_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..47e298d5408ace5ac0818543c962fd4c081163e4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_shell_utils.py @@ -0,0 +1,76 @@ +import pytest +import subprocess +import json +import sys + +from numpy.distutils import _shell_utils + +argv_cases = [ + [r'exe'], + [r'path/exe'], + [r'path\exe'], + [r'\\server\path\exe'], + [r'path to/exe'], + [r'path to\exe'], + + [r'exe', '--flag'], + [r'path/exe', '--flag'], + [r'path\exe', '--flag'], + [r'path to/exe', '--flag'], + [r'path to\exe', '--flag'], + + # flags containing literal quotes in their name + [r'path to/exe', '--flag-"quoted"'], + [r'path to\exe', '--flag-"quoted"'], + [r'path to/exe', '"--flag-quoted"'], + [r'path to\exe', '"--flag-quoted"'], +] + + +@pytest.fixture(params=[ + _shell_utils.WindowsParser, + _shell_utils.PosixParser +]) +def Parser(request): + return request.param + + +@pytest.fixture +def runner(Parser): + if Parser != _shell_utils.NativeParser: + pytest.skip('Unable to run with non-native parser') + + if Parser == _shell_utils.WindowsParser: + return lambda cmd: subprocess.check_output(cmd) + elif Parser == _shell_utils.PosixParser: + # posix has no non-shell string parsing + return lambda cmd: subprocess.check_output(cmd, shell=True) + else: + raise NotImplementedError + + +@pytest.mark.parametrize('argv', argv_cases) +def test_join_matches_subprocess(Parser, runner, argv): + """ + Test that join produces strings understood by subprocess + """ + # invoke python to return its arguments as json + cmd = [ + sys.executable, '-c', + 'import json, sys; print(json.dumps(sys.argv[1:]))' + ] + joined = Parser.join(cmd + argv) + json_out = runner(joined).decode() + assert json.loads(json_out) == argv + + +@pytest.mark.parametrize('argv', argv_cases) +def test_roundtrip(Parser, argv): + """ + Test that split is the inverse operation of join + """ + try: + joined = Parser.join(argv) + assert argv == Parser.split(joined) + except NotImplementedError: + pytest.skip("Not implemented") diff --git a/MLPY/Lib/site-packages/numpy/distutils/tests/test_system_info.py b/MLPY/Lib/site-packages/numpy/distutils/tests/test_system_info.py new file mode 100644 index 0000000000000000000000000000000000000000..0009187d6f4518bd4203298aeab544b0b3662f66 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/tests/test_system_info.py @@ -0,0 +1,320 @@ +import os +import shutil +import pytest +from tempfile import mkstemp, mkdtemp +from subprocess import Popen, PIPE +from distutils.errors import DistutilsError + +from numpy.testing import assert_, assert_equal, assert_raises +from numpy.distutils import ccompiler, customized_ccompiler +from numpy.distutils.system_info import system_info, ConfigParser, mkl_info +from numpy.distutils.system_info import AliasedOptionError +from numpy.distutils.system_info import default_lib_dirs, default_include_dirs +from numpy.distutils import _shell_utils + + +def get_class(name, notfound_action=1): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'temp1': Temp1Info, + 'temp2': Temp2Info, + 'duplicate_options': DuplicateOptionInfo, + }.get(name.lower(), _system_info) + return cl() + +simple_site = """ +[ALL] +library_dirs = {dir1:s}{pathsep:s}{dir2:s} +libraries = {lib1:s},{lib2:s} +extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os +runtime_library_dirs = {dir1:s} + +[temp1] +library_dirs = {dir1:s} +libraries = {lib1:s} +runtime_library_dirs = {dir1:s} + +[temp2] +library_dirs = {dir2:s} +libraries = {lib2:s} +extra_link_args = -Wl,-rpath={lib2_escaped:s} +rpath = {dir2:s} + +[duplicate_options] +mylib_libs = {lib1:s} +libraries = {lib2:s} +""" +site_cfg = simple_site + +fakelib_c_text = """ +/* This file is generated from numpy/distutils/testing/test_system_info.py */ +#include +void foo(void) { + printf("Hello foo"); +} +void bar(void) { + printf("Hello bar"); +} +""" + +def have_compiler(): + """ Return True if there appears to be an executable compiler + """ + compiler = customized_ccompiler() + try: + cmd = compiler.compiler # Unix compilers + except AttributeError: + try: + if not compiler.initialized: + compiler.initialize() # MSVC is different + except (DistutilsError, ValueError): + return False + cmd = [compiler.cc] + try: + p = Popen(cmd, stdout=PIPE, stderr=PIPE) + p.stdout.close() + p.stderr.close() + p.wait() + except OSError: + return False + return True + + +HAVE_COMPILER = have_compiler() + + +class _system_info(system_info): + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + verbosity=1, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {'library_dirs': '', + 'include_dirs': '', + 'runtime_library_dirs': '', + 'rpath': '', + 'src_dirs': '', + 'search_static_first': "0", + 'extra_compile_args': '', + 'extra_link_args': ''} + self.cp = ConfigParser(defaults) + # We have to parse the config files afterwards + # to have a consistent temporary filepath + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Override _check_libs to return with all dirs """ + info = {'libraries': libs, 'library_dirs': lib_dirs} + return info + + +class Temp1Info(_system_info): + """For testing purposes""" + section = 'temp1' + + +class Temp2Info(_system_info): + """For testing purposes""" + section = 'temp2' + +class DuplicateOptionInfo(_system_info): + """For testing purposes""" + section = 'duplicate_options' + + +class TestSystemInfoReading: + + def setup(self): + """ Create the libraries """ + # Create 2 sources and 2 libraries + self._dir1 = mkdtemp() + self._src1 = os.path.join(self._dir1, 'foo.c') + self._lib1 = os.path.join(self._dir1, 'libfoo.so') + self._dir2 = mkdtemp() + self._src2 = os.path.join(self._dir2, 'bar.c') + self._lib2 = os.path.join(self._dir2, 'libbar.so') + # Update local site.cfg + global simple_site, site_cfg + site_cfg = simple_site.format(**{ + 'dir1': self._dir1, + 'lib1': self._lib1, + 'dir2': self._dir2, + 'lib2': self._lib2, + 'pathsep': os.pathsep, + 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2]) + }) + # Write site.cfg + fd, self._sitecfg = mkstemp() + os.close(fd) + with open(self._sitecfg, 'w') as fd: + fd.write(site_cfg) + # Write the sources + with open(self._src1, 'w') as fd: + fd.write(fakelib_c_text) + with open(self._src2, 'w') as fd: + fd.write(fakelib_c_text) + # We create all class-instances + + def site_and_parse(c, site_cfg): + c.files = [site_cfg] + c.parse_config_files() + return c + self.c_default = site_and_parse(get_class('default'), self._sitecfg) + self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) + self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) + self.c_dup_options = site_and_parse(get_class('duplicate_options'), + self._sitecfg) + + + def teardown(self): + # Do each removal separately + try: + shutil.rmtree(self._dir1) + except Exception: + pass + try: + shutil.rmtree(self._dir2) + except Exception: + pass + try: + os.remove(self._sitecfg) + except Exception: + pass + + def test_all(self): + # Read in all information in the ALL block + tsi = self.c_default + assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) + assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) + assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) + extra = tsi.calc_extra_info() + assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) + + def test_temp1(self): + # Read in all information in the temp1 block + tsi = self.c_temp1 + assert_equal(tsi.get_lib_dirs(), [self._dir1]) + assert_equal(tsi.get_libraries(), [self._lib1]) + assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) + + def test_temp2(self): + # Read in all information in the temp2 block + tsi = self.c_temp2 + assert_equal(tsi.get_lib_dirs(), [self._dir2]) + assert_equal(tsi.get_libraries(), [self._lib2]) + # Now from rpath and not runtime_library_dirs + assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) + extra = tsi.calc_extra_info() + assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) + + def test_duplicate_options(self): + # Ensure that duplicates are raising an AliasedOptionError + tsi = self.c_dup_options + assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") + assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) + assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) + + @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") + def test_compile1(self): + # Compile source and link the first source + c = customized_ccompiler() + previousDir = os.getcwd() + try: + # Change directory to not screw up directories + os.chdir(self._dir1) + c.compile([os.path.basename(self._src1)], output_dir=self._dir1) + # Ensure that the object exists + assert_(os.path.isfile(self._src1.replace('.c', '.o')) or + os.path.isfile(self._src1.replace('.c', '.obj'))) + finally: + os.chdir(previousDir) + + @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") + @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), + reason="Fails with MSVC compiler ") + def test_compile2(self): + # Compile source and link the second source + tsi = self.c_temp2 + c = customized_ccompiler() + extra_link_args = tsi.calc_extra_info()['extra_link_args'] + previousDir = os.getcwd() + try: + # Change directory to not screw up directories + os.chdir(self._dir2) + c.compile([os.path.basename(self._src2)], output_dir=self._dir2, + extra_postargs=extra_link_args) + # Ensure that the object exists + assert_(os.path.isfile(self._src2.replace('.c', '.o'))) + finally: + os.chdir(previousDir) + + def test_overrides(self): + previousDir = os.getcwd() + cfg = os.path.join(self._dir1, 'site.cfg') + shutil.copy(self._sitecfg, cfg) + try: + os.chdir(self._dir1) + # Check that the '[ALL]' section does not override + # missing values from other sections + info = mkl_info() + lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep) + assert info.get_lib_dirs() != lib_dirs + + # But if we copy the values to a '[mkl]' section the value + # is correct + with open(cfg, 'r') as fid: + mkl = fid.read().replace('[ALL]', '[mkl]', 1) + with open(cfg, 'w') as fid: + fid.write(mkl) + info = mkl_info() + assert info.get_lib_dirs() == lib_dirs + + # Also, the values will be taken from a section named '[DEFAULT]' + with open(cfg, 'r') as fid: + dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) + with open(cfg, 'w') as fid: + fid.write(dflt) + info = mkl_info() + assert info.get_lib_dirs() == lib_dirs + finally: + os.chdir(previousDir) + + +def test_distutils_parse_env_order(monkeypatch): + from numpy.distutils.system_info import _parse_env_order + env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER' + + base_order = list('abcdef') + + monkeypatch.setenv(env, 'b,i,e,f') + order, unknown = _parse_env_order(base_order, env) + assert len(order) == 3 + assert order == list('bef') + assert len(unknown) == 1 + + # For when LAPACK/BLAS optimization is disabled + monkeypatch.setenv(env, '') + order, unknown = _parse_env_order(base_order, env) + assert len(order) == 0 + assert len(unknown) == 0 + + for prefix in '^!': + monkeypatch.setenv(env, f'{prefix}b,i,e') + order, unknown = _parse_env_order(base_order, env) + assert len(order) == 4 + assert order == list('acdf') + assert len(unknown) == 1 + + with pytest.raises(ValueError): + monkeypatch.setenv(env, 'b,^e,i') + _parse_env_order(base_order, env) + + with pytest.raises(ValueError): + monkeypatch.setenv(env, '!b,^e,i') + _parse_env_order(base_order, env) diff --git a/MLPY/Lib/site-packages/numpy/distutils/unixccompiler.py b/MLPY/Lib/site-packages/numpy/distutils/unixccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..dd0d3d138a08b1f89c752d66915b66d557d38903 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/distutils/unixccompiler.py @@ -0,0 +1,140 @@ +""" +unixccompiler - can handle very long argument lists for ar. + +""" +import os +import sys +import subprocess + +from distutils.errors import CompileError, DistutilsExecError, LibError +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.ccompiler import replace_method +from numpy.distutils.misc_util import _commandline_dep_string +from numpy.distutils import log + +# Note that UnixCCompiler._compile appeared in Python 2.3 +def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile a single source files with a Unix-style compiler.""" + # HP ad-hoc fix, see ticket 1383 + ccomp = self.compiler_so + if ccomp[0] == 'aCC': + # remove flags that will trigger ANSI-C mode for aCC + if '-Ae' in ccomp: + ccomp.remove('-Ae') + if '-Aa' in ccomp: + ccomp.remove('-Aa') + # add flags for (almost) sane C++ handling + ccomp += ['-AA'] + self.compiler_so = ccomp + # ensure OPT environment variable is read + if 'OPT' in os.environ: + # XXX who uses this? + from sysconfig import get_config_vars + opt = " ".join(os.environ['OPT'].split()) + gcv_opt = " ".join(get_config_vars('OPT')[0].split()) + ccomp_s = " ".join(self.compiler_so) + if opt not in ccomp_s: + ccomp_s = ccomp_s.replace(gcv_opt, opt) + self.compiler_so = ccomp_s.split() + llink_s = " ".join(self.linker_so) + if opt not in llink_s: + self.linker_so = llink_s.split() + opt.split() + + display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) + + # gcc style automatic dependencies, outputs a makefile (-MF) that lists + # all headers needed by a c file as a side effect of compilation (-MMD) + if getattr(self, '_auto_depends', False): + deps = ['-MMD', '-MF', obj + '.d'] + else: + deps = [] + + try: + self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + + extra_postargs, display = display) + except DistutilsExecError as e: + msg = str(e) + raise CompileError(msg) from None + + # add commandline flags to dependency file + if deps: + # After running the compiler, the file created will be in EBCDIC + # but will not be tagged as such. This tags it so the file does not + # have multiple different encodings being written to it + if sys.platform == 'zos': + subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) + with open(obj + '.d', 'a') as f: + f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) + +replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) + + +def UnixCCompiler_create_static_lib(self, objects, output_libname, + output_dir=None, debug=0, target_lang=None): + """ + Build a static library in a separate sub-process. + + Parameters + ---------- + objects : list or tuple of str + List of paths to object files used to build the static library. + output_libname : str + The library name as an absolute or relative (if `output_dir` is used) + path. + output_dir : str, optional + The path to the output directory. Default is None, in which case + the ``output_dir`` attribute of the UnixCCompiler instance. + debug : bool, optional + This parameter is not used. + target_lang : str, optional + This parameter is not used. + + Returns + ------- + None + + """ + objects, output_dir = self._fix_object_args(objects, output_dir) + + output_filename = \ + self.library_filename(output_libname, output_dir=output_dir) + + if self._need_link(objects, output_filename): + try: + # previous .a may be screwed up; best to remove it first + # and recreate. + # Also, ar on OS X doesn't handle updating universal archives + os.unlink(output_filename) + except (IOError, OSError): + pass + self.mkpath(os.path.dirname(output_filename)) + tmp_objects = objects + self.objects + while tmp_objects: + objects = tmp_objects[:50] + tmp_objects = tmp_objects[50:] + display = '%s: adding %d object files to %s' % ( + os.path.basename(self.archiver[0]), + len(objects), output_filename) + self.spawn(self.archiver + [output_filename] + objects, + display = display) + + # Not many Unices required ranlib anymore -- SunOS 4.x is, I + # think the only major Unix that does. Maybe we need some + # platform intelligence here to skip ranlib if it's not + # needed -- or maybe Python's configure script took care of + # it for us, hence the check for leading colon. + if self.ranlib: + display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), + output_filename) + try: + self.spawn(self.ranlib + [output_filename], + display = display) + except DistutilsExecError as e: + msg = str(e) + raise LibError(msg) from None + else: + log.debug("skipping %s (up-to-date)", output_filename) + return + +replace_method(UnixCCompiler, 'create_static_lib', + UnixCCompiler_create_static_lib) diff --git a/MLPY/Lib/site-packages/numpy/doc/__init__.py b/MLPY/Lib/site-packages/numpy/doc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f7900f7271b6fd46249e6f6286088d4c9a713a0a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/doc/__init__.py @@ -0,0 +1,26 @@ +import os + +ref_dir = os.path.join(os.path.dirname(__file__)) + +__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and + not f.startswith('__')) + +for f in __all__: + __import__(__name__ + '.' + f) + +del f, ref_dir + +__doc__ = """\ +Topical documentation +===================== + +The following topics are available: +%s + +You can view them by + +>>> help(np.doc.TOPIC) #doctest: +SKIP + +""" % '\n- '.join([''] + __all__) + +__all__.extend(['__doc__']) diff --git a/MLPY/Lib/site-packages/numpy/doc/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/doc/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d31c971cb06be9aef34892927bfc678ec742291 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/doc/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/doc/__pycache__/constants.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/doc/__pycache__/constants.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cb877038163d8d2ef1b29339697f3e78a2b89cc Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/doc/__pycache__/constants.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/doc/__pycache__/ufuncs.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/doc/__pycache__/ufuncs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66f63a768ca02640afc761accd51e95c59790670 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/doc/__pycache__/ufuncs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/doc/constants.py b/MLPY/Lib/site-packages/numpy/doc/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..b4af4a39d5714a75a3dee54e92f2ba6e0b287110 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/doc/constants.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +""" +========= +Constants +========= + +.. currentmodule:: numpy + +NumPy includes several constants: + +%(constant_list)s +""" +# +# Note: the docstring is autogenerated. +# +import re +import textwrap + +# Maintain same format as in numpy.add_newdocs +constants = [] +def add_newdoc(module, name, doc): + constants.append((name, doc)) + +add_newdoc('numpy', 'pi', + """ + ``pi = 3.1415926535897932384626433...`` + + References + ---------- + https://en.wikipedia.org/wiki/Pi + + """) + +add_newdoc('numpy', 'e', + """ + Euler's constant, base of natural logarithms, Napier's constant. + + ``e = 2.71828182845904523536028747135266249775724709369995...`` + + See Also + -------- + exp : Exponential function + log : Natural logarithm + + References + ---------- + https://en.wikipedia.org/wiki/E_%28mathematical_constant%29 + + """) + +add_newdoc('numpy', 'euler_gamma', + """ + ``γ = 0.5772156649015328606065120900824024310421...`` + + References + ---------- + https://en.wikipedia.org/wiki/Euler-Mascheroni_constant + + """) + +add_newdoc('numpy', 'inf', + """ + IEEE 754 floating point representation of (positive) infinity. + + Returns + ------- + y : float + A floating point representation of positive infinity. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity + + isposinf : Shows which elements are positive infinity + + isneginf : Shows which elements are negative infinity + + isnan : Shows which elements are Not a Number + + isfinite : Shows which elements are finite (not one of Not a Number, + positive infinity and negative infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. + + Examples + -------- + >>> np.inf + inf + >>> np.array([1]) / 0. + array([ Inf]) + + """) + +add_newdoc('numpy', 'nan', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + Returns + ------- + y : A floating point representation of Not a Number. + + See Also + -------- + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite (not one of + Not a Number, positive infinity and negative infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + `NaN` and `NAN` are aliases of `nan`. + + Examples + -------- + >>> np.nan + nan + >>> np.log(-1) + nan + >>> np.log([-1, 1, 2]) + array([ NaN, 0. , 0.69314718]) + + """) + +add_newdoc('numpy', 'newaxis', + """ + A convenient alias for None, useful for indexing arrays. + + Examples + -------- + >>> newaxis is None + True + >>> x = np.arange(3) + >>> x + array([0, 1, 2]) + >>> x[:, newaxis] + array([[0], + [1], + [2]]) + >>> x[:, newaxis, newaxis] + array([[[0]], + [[1]], + [[2]]]) + >>> x[:, newaxis] * x + array([[0, 0, 0], + [0, 1, 2], + [0, 2, 4]]) + + Outer product, same as ``outer(x, y)``: + + >>> y = np.arange(3, 6) + >>> x[:, newaxis] * y + array([[ 0, 0, 0], + [ 3, 4, 5], + [ 6, 8, 10]]) + + ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``: + + >>> x[newaxis, :].shape + (1, 3) + >>> x[newaxis].shape + (1, 3) + >>> x[None].shape + (1, 3) + >>> x[:, newaxis].shape + (3, 1) + + """) + +add_newdoc('numpy', 'NZERO', + """ + IEEE 754 floating point representation of negative zero. + + Returns + ------- + y : float + A floating point representation of negative zero. + + See Also + -------- + PZERO : Defines positive zero. + + isinf : Shows which elements are positive or negative infinity. + + isposinf : Shows which elements are positive infinity. + + isneginf : Shows which elements are negative infinity. + + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite - not one of + Not a Number, positive infinity and negative infinity. + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). Negative zero is considered to be a finite number. + + Examples + -------- + >>> np.NZERO + -0.0 + >>> np.PZERO + 0.0 + + >>> np.isfinite([np.NZERO]) + array([ True]) + >>> np.isnan([np.NZERO]) + array([False]) + >>> np.isinf([np.NZERO]) + array([False]) + + """) + +add_newdoc('numpy', 'PZERO', + """ + IEEE 754 floating point representation of positive zero. + + Returns + ------- + y : float + A floating point representation of positive zero. + + See Also + -------- + NZERO : Defines negative zero. + + isinf : Shows which elements are positive or negative infinity. + + isposinf : Shows which elements are positive infinity. + + isneginf : Shows which elements are negative infinity. + + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite - not one of + Not a Number, positive infinity and negative infinity. + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). Positive zero is considered to be a finite number. + + Examples + -------- + >>> np.PZERO + 0.0 + >>> np.NZERO + -0.0 + + >>> np.isfinite([np.PZERO]) + array([ True]) + >>> np.isnan([np.PZERO]) + array([False]) + >>> np.isinf([np.PZERO]) + array([False]) + + """) + +add_newdoc('numpy', 'NAN', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + `NaN` and `NAN` are equivalent definitions of `nan`. Please use + `nan` instead of `NAN`. + + See Also + -------- + nan + + """) + +add_newdoc('numpy', 'NaN', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + `NaN` and `NAN` are equivalent definitions of `nan`. Please use + `nan` instead of `NaN`. + + See Also + -------- + nan + + """) + +add_newdoc('numpy', 'NINF', + """ + IEEE 754 floating point representation of negative infinity. + + Returns + ------- + y : float + A floating point representation of negative infinity. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity + + isposinf : Shows which elements are positive infinity + + isneginf : Shows which elements are negative infinity + + isnan : Shows which elements are Not a Number + + isfinite : Shows which elements are finite (not one of Not a Number, + positive infinity and negative infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + Examples + -------- + >>> np.NINF + -inf + >>> np.log(0) + -inf + + """) + +add_newdoc('numpy', 'PINF', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'infty', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'Inf', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'Infinity', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + + +if __doc__: + constants_str = [] + constants.sort() + for name, doc in constants: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + new_lines.append('%s.. rubric:: %s' % (m.group(1), prev)) + new_lines.append('') + else: + new_lines.append(line) + s = "\n".join(new_lines) + + # Done. + constants_str.append(""".. data:: %s\n %s""" % (name, s)) + constants_str = "\n".join(constants_str) + + __doc__ = __doc__ % dict(constant_list=constants_str) + del constants_str, name, doc + del line, lines, new_lines, m, s, prev + +del constants, add_newdoc diff --git a/MLPY/Lib/site-packages/numpy/doc/ufuncs.py b/MLPY/Lib/site-packages/numpy/doc/ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..0a6f00f081e02bbcb4b60eeee35a67e7c7211240 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/doc/ufuncs.py @@ -0,0 +1,137 @@ +""" +=================== +Universal Functions +=================== + +Ufuncs are, generally speaking, mathematical functions or operations that are +applied element-by-element to the contents of an array. That is, the result +in each output array element only depends on the value in the corresponding +input array (or arrays) and on no other array elements. NumPy comes with a +large suite of ufuncs, and scipy extends that suite substantially. The simplest +example is the addition operator: :: + + >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) + array([1, 3, 2, 6]) + +The ufunc module lists all the available ufuncs in numpy. Documentation on +the specific ufuncs may be found in those modules. This documentation is +intended to address the more general aspects of ufuncs common to most of +them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) +have equivalent functions defined (e.g. add() for +) + +Type coercion +============= + +What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of +two different types? What is the type of the result? Typically, the result is +the higher of the two types. For example: :: + + float32 + float64 -> float64 + int8 + int32 -> int32 + int16 + float32 -> float32 + float32 + complex64 -> complex64 + +There are some less obvious cases generally involving mixes of types +(e.g. uints, ints and floats) where equal bit sizes for each are not +capable of saving all the information in a different type of equivalent +bit size. Some examples are int32 vs float32 or uint32 vs int32. +Generally, the result is the higher type of larger size than both +(if available). So: :: + + int32 + float32 -> float64 + uint32 + int32 -> int64 + +Finally, the type coercion behavior when expressions involve Python +scalars is different than that seen for arrays. Since Python has a +limited number of types, combining a Python int with a dtype=np.int8 +array does not coerce to the higher type but instead, the type of the +array prevails. So the rules for Python scalars combined with arrays is +that the result will be that of the array equivalent the Python scalar +if the Python scalar is of a higher 'kind' than the array (e.g., float +vs. int), otherwise the resultant type will be that of the array. +For example: :: + + Python int + int8 -> int8 + Python float + int8 -> float64 + +ufunc methods +============= + +Binary ufuncs support 4 methods. + +**.reduce(arr)** applies the binary operator to elements of the array in + sequence. For example: :: + + >>> np.add.reduce(np.arange(10)) # adds all elements of array + 45 + +For multidimensional arrays, the first dimension is reduced by default: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5)) + array([ 5, 7, 9, 11, 13]) + +The axis keyword can be used to specify different axes to reduce: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) + array([10, 35]) + +**.accumulate(arr)** applies the binary operator and generates an an +equivalently shaped array that includes the accumulated amount for each +element of the array. A couple examples: :: + + >>> np.add.accumulate(np.arange(10)) + array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) + >>> np.multiply.accumulate(np.arange(1,9)) + array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) + +The behavior for multidimensional arrays is the same as for .reduce(), +as is the use of the axis keyword). + +**.reduceat(arr,indices)** allows one to apply reduce to selected parts + of an array. It is a difficult method to understand. See the documentation + at: + +**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and + arr2. It will work on multidimensional arrays (the shape of the result is + the concatenation of the two input shapes.: :: + + >>> np.multiply.outer(np.arange(3),np.arange(4)) + array([[0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6]]) + +Output arguments +================ + +All ufuncs accept an optional output array. The array must be of the expected +output shape. Beware that if the type of the output array is of a different +(and lower) type than the output result, the results may be silently truncated +or otherwise corrupted in the downcast to the lower type. This usage is useful +when one wants to avoid creating large temporary arrays and instead allows one +to reuse the same array memory repeatedly (at the expense of not being able to +use more convenient operator notation in expressions). Note that when the +output argument is used, the ufunc still returns a reference to the result. + + >>> x = np.arange(2) + >>> np.add(np.arange(2),np.arange(2.),x) + array([0, 2]) + >>> x + array([0, 2]) + +and & or as ufuncs +================== + +Invariably people try to use the python 'and' and 'or' as logical operators +(and quite understandably). But these operators do not behave as normal +operators since Python treats these quite differently. They cannot be +overloaded with array equivalents. Thus using 'and' or 'or' with an array +results in an error. There are two alternatives: + + 1) use the ufunc functions logical_and() and logical_or(). + 2) use the bitwise operators & and \\|. The drawback of these is that if + the arguments to these operators are not boolean arrays, the result is + likely incorrect. On the other hand, most usages of logical_and and + logical_or are with boolean arrays. As long as one is careful, this is + a convenient way to apply these operators. + +""" diff --git a/MLPY/Lib/site-packages/numpy/dual.py b/MLPY/Lib/site-packages/numpy/dual.py new file mode 100644 index 0000000000000000000000000000000000000000..a3633be6d807331cf209284002197dbd90ee6658 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/dual.py @@ -0,0 +1,83 @@ +""" +.. deprecated:: 1.20 + +*This module is deprecated. Instead of importing functions from* +``numpy.dual``, *the functions should be imported directly from NumPy +or SciPy*. + +Aliases for functions which may be accelerated by SciPy. + +SciPy_ can be built to use accelerated or otherwise improved libraries +for FFTs, linear algebra, and special functions. This module allows +developers to transparently support these accelerated functions when +SciPy is available but still support users who have only installed +NumPy. + +.. _SciPy : https://www.scipy.org + +""" +import warnings + + +warnings.warn('The module numpy.dual is deprecated. Instead of using dual, ' + 'use the functions directly from numpy or scipy.', + category=DeprecationWarning, + stacklevel=2) + +# This module should be used for functions both in numpy and scipy if +# you want to use the numpy version if available but the scipy version +# otherwise. +# Usage --- from numpy.dual import fft, inv + +__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2', + 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals', + 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0'] + +import numpy.linalg as linpkg +import numpy.fft as fftpkg +from numpy.lib import i0 +import sys + + +fft = fftpkg.fft +ifft = fftpkg.ifft +fftn = fftpkg.fftn +ifftn = fftpkg.ifftn +fft2 = fftpkg.fft2 +ifft2 = fftpkg.ifft2 + +norm = linpkg.norm +inv = linpkg.inv +svd = linpkg.svd +solve = linpkg.solve +det = linpkg.det +eig = linpkg.eig +eigvals = linpkg.eigvals +eigh = linpkg.eigh +eigvalsh = linpkg.eigvalsh +lstsq = linpkg.lstsq +pinv = linpkg.pinv +cholesky = linpkg.cholesky + +_restore_dict = {} + +def register_func(name, func): + if name not in __all__: + raise ValueError("{} not a dual function.".format(name)) + f = sys._getframe(0).f_globals + _restore_dict[name] = f[name] + f[name] = func + +def restore_func(name): + if name not in __all__: + raise ValueError("{} not a dual function.".format(name)) + try: + val = _restore_dict[name] + except KeyError: + return + else: + sys._getframe(0).f_globals[name] = val + +def restore_all(): + for name in _restore_dict.keys(): + restore_func(name) diff --git a/MLPY/Lib/site-packages/numpy/f2py/__init__.py b/MLPY/Lib/site-packages/numpy/f2py/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cff85721c08540e4fb00d6c3647be201508be851 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/__init__.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +"""Fortran to Python Interface Generator. + +""" +__all__ = ['run_main', 'compile', 'get_include', 'f2py_testing'] + +import sys +import subprocess +import os + +from . import f2py2e +from . import diagnose + +run_main = f2py2e.run_main +main = f2py2e.main + + +def compile(source, + modulename='untitled', + extra_args='', + verbose=True, + source_fn=None, + extension='.f', + full_output=False + ): + """ + Build extension module from a Fortran 77 source string with f2py. + + Parameters + ---------- + source : str or bytes + Fortran source of module / subroutine to compile + + .. versionchanged:: 1.16.0 + Accept str as well as bytes + + modulename : str, optional + The name of the compiled python module + extra_args : str or list, optional + Additional parameters passed to f2py + + .. versionchanged:: 1.16.0 + A list of args may also be provided. + + verbose : bool, optional + Print f2py output to screen + source_fn : str, optional + Name of the file where the fortran source is written. + The default is to use a temporary file with the extension + provided by the `extension` parameter + extension : {'.f', '.f90'}, optional + Filename extension if `source_fn` is not provided. + The extension tells which fortran standard is used. + The default is `.f`, which implies F77 standard. + + .. versionadded:: 1.11.0 + + full_output : bool, optional + If True, return a `subprocess.CompletedProcess` containing + the stdout and stderr of the compile process, instead of just + the status code. + + .. versionadded:: 1.20.0 + + + Returns + ------- + result : int or `subprocess.CompletedProcess` + 0 on success, or a `subprocess.CompletedProcess` if + ``full_output=True`` + + Examples + -------- + .. include:: compile_session.dat + :literal: + + """ + import tempfile + import shlex + + if source_fn is None: + f, fname = tempfile.mkstemp(suffix=extension) + # f is a file descriptor so need to close it + # carefully -- not with .close() directly + os.close(f) + else: + fname = source_fn + + if not isinstance(source, str): + source = str(source, 'utf-8') + try: + with open(fname, 'w') as f: + f.write(source) + + args = ['-c', '-m', modulename, f.name] + + if isinstance(extra_args, str): + is_posix = (os.name == 'posix') + extra_args = shlex.split(extra_args, posix=is_posix) + + args.extend(extra_args) + + c = [sys.executable, + '-c', + 'import numpy.f2py as f2py2e;f2py2e.main()'] + args + try: + cp = subprocess.run(c, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + except OSError: + # preserve historic status code used by exec_command() + cp = subprocess.CompletedProcess(c, 127, stdout=b'', stderr=b'') + else: + if verbose: + print(cp.stdout.decode()) + finally: + if source_fn is None: + os.remove(fname) + + if full_output: + return cp + else: + return cp.returncode + + +def get_include(): + """ + Return the directory that contains the fortranobject.c and .h files. + + .. note:: + + This function is not needed when building an extension with + `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files + in one go. + + Python extension modules built with f2py-generated code need to use + ``fortranobject.c`` as a source file, and include the ``fortranobject.h`` + header. This function can be used to obtain the directory containing + both of these files. + + Returns + ------- + include_path : str + Absolute path to the directory containing ``fortranobject.c`` and + ``fortranobject.h``. + + Notes + ----- + .. versionadded:: 1.22.0 + + Unless the build system you are using has specific support for f2py, + building a Python extension using a ``.pyf`` signature file is a two-step + process. For a module ``mymod``: + + - Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This + generates ``_mymodmodule.c`` and (if needed) + ``_fblas-f2pywrappers.f`` files next to ``mymod.pyf``. + - Step 2: build your Python extension module. This requires the + following source files: + + - ``_mymodmodule.c`` + - ``_mymod-f2pywrappers.f`` (if it was generated in step 1) + - ``fortranobject.c`` + + See Also + -------- + numpy.get_include : function that returns the numpy include directory + + """ + return os.path.join(os.path.dirname(__file__), 'src') + + +if sys.version_info[:2] >= (3, 7): + # module level getattr is only supported in 3.7 onwards + # https://www.python.org/dev/peps/pep-0562/ + def __getattr__(attr): + + # Avoid importing things that aren't needed for building + # which might import the main numpy module + if attr == "f2py_testing": + import numpy.f2py.f2py_testing as f2py_testing + return f2py_testing + + elif attr == "test": + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + return test + + else: + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + + def __dir__(): + return list(globals().keys() | {"f2py_testing", "test"}) + +else: + from . import f2py_testing + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/f2py/__init__.pyi b/MLPY/Lib/site-packages/numpy/f2py/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..07964c993b15abce905cfa815d4bb51e519bcd16 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/__init__.pyi @@ -0,0 +1,18 @@ +from typing import Any, List + +from numpy.f2py import ( + f2py_testing as f2py_testing, +) + +__all__: List[str] + +def run_main(comline_list): ... +def compile( + source, + modulename=..., + extra_args=..., + verbose=..., + source_fn=..., + extension=..., + full_output=..., +): ... diff --git a/MLPY/Lib/site-packages/numpy/f2py/__main__.py b/MLPY/Lib/site-packages/numpy/f2py/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..8327074f7f91b8395c83828bc87d4b84b4e592b7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/__main__.py @@ -0,0 +1,4 @@ +# See http://cens.ioc.ee/projects/f2py2e/ +from numpy.f2py.f2py2e import main + +main() diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1934eefb3a4b2008409b57ab3d35da627772b0b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/__main__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/__main__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..456a39629a966af06d1839056cd5675a2c259573 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/__main__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/__version__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/__version__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfd4563041c13447c639a7daaa821506c489f69e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/__version__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28ecde8f1ea0d9ca2d977a79c41d5a58b283504b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/auxfuncs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1f22e1851d597687b2b3ee24f53c3a23d5eb9ce Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/capi_maps.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e0a19583ea91e320009cf4c07fb7f88f87403c3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/cb_rules.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..645a21cbce1d00f7edada59d70da573a267b52e6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/cfuncs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/common_rules.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/common_rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3450e1878ec72a3486a719a747442a84d656934 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/common_rules.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bb51869bae6ea9485f27023e3a88c726f472563 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/crackfortran.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/diagnose.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/diagnose.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1e185c44834897c37be368c3aa430ac5ae074ad Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/diagnose.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cc80d75efccfa874736c0b3490cbbc2d4f200e5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/f2py2e.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/f2py_testing.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/f2py_testing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8adf7fc5fc573b964fb50bd1469f96a5f3135da Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/f2py_testing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fe6273e55d4054bdd6a8cb6be116b11a51b558b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/f90mod_rules.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/func2subr.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/func2subr.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fed24586da5d4bf1eaff87e3f1582a1e2d4b576 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/func2subr.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/rules.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50f49373e7157f5c9cd54f3b7a881faa818af0ba Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/rules.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3e9f549805195cdfc9fa050ad78cb814b0620f7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__pycache__/use_rules.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/use_rules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91c6e83380acd7c7b6f7943f69bea61cd2ab4888 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/__pycache__/use_rules.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/__version__.py b/MLPY/Lib/site-packages/numpy/f2py/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..8813675308034731efa254fb5d1d13136a8477c2 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/__version__.py @@ -0,0 +1 @@ +from numpy.version import version diff --git a/MLPY/Lib/site-packages/numpy/f2py/auxfuncs.py b/MLPY/Lib/site-packages/numpy/f2py/auxfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..1afca28bb55eb9ac5ff1cdd2cd5d249f9207e2ba --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/auxfuncs.py @@ -0,0 +1,857 @@ +#!/usr/bin/env python3 +""" + +Auxiliary functions for f2py2e. + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) LICENSE. + + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/07/24 19:01:55 $ +Pearu Peterson + +""" +import pprint +import sys +import types +from functools import reduce + +from . import __version__ +from . import cfuncs + +__all__ = [ + 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', + 'getargs2', 'getcallprotoargument', 'getcallstatement', + 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', + 'getusercode1', 'hasbody', 'hascallstatement', 'hascommon', + 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', + 'isallocatable', 'isarray', 'isarrayofstrings', 'iscomplex', + 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', + 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', + 'isfunction_wrap', 'isint1array', 'isinteger', 'isintent_aux', + 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict', + 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace', + 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', + 'islogicalfunction', 'islong_complex', 'islong_double', + 'islong_doublefunction', 'islong_long', 'islong_longfunction', + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', + 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', + 'isstringarray', 'isstringfunction', 'issubroutine', + 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', + 'isunsigned_chararray', 'isunsigned_long_long', + 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', + 'replace', 'show', 'stripcomma', 'throw_error', +] + + +f2py_version = __version__.version + + +errmess = sys.stderr.write +show = pprint.pprint + +options = {} +debugoptions = [] +wrapfuncs = 1 + + +def outmess(t): + if options.get('verbose', 1): + sys.stdout.write(t) + + +def debugcapi(var): + return 'capi' in debugoptions + + +def _isstring(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def isstring(var): + return _isstring(var) and not isarray(var) + + +def ischaracter(var): + return isstring(var) and 'charselector' not in var + + +def isstringarray(var): + return isarray(var) and _isstring(var) + + +def isarrayofstrings(var): + # leaving out '*' for now so that `character*(*) a(m)` and `character + # a(m,*)` are treated differently. Luckily `character**` is illegal. + return isstringarray(var) and var['dimension'][-1] == '(*)' + + +def isarray(var): + return 'dimension' in var and not isexternal(var) + + +def isscalar(var): + return not (isarray(var) or isstring(var) or isexternal(var)) + + +def iscomplex(var): + return isscalar(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def islogical(var): + return isscalar(var) and var.get('typespec') == 'logical' + + +def isinteger(var): + return isscalar(var) and var.get('typespec') == 'integer' + + +def isreal(var): + return isscalar(var) and var.get('typespec') == 'real' + + +def get_kind(var): + try: + return var['kindselector']['*'] + except KeyError: + try: + return var['kindselector']['kind'] + except KeyError: + pass + + +def islong_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') not in ['integer', 'logical']: + return 0 + return get_kind(var) == '8' + + +def isunsigned_char(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-1' + + +def isunsigned_short(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-2' + + +def isunsigned(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-4' + + +def isunsigned_long_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-8' + + +def isdouble(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '8' + + +def islong_double(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '16' + + +def islong_complex(var): + if not iscomplex(var): + return 0 + return get_kind(var) == '32' + + +def iscomplexarray(var): + return isarray(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def isint1array(var): + return isarray(var) and var.get('typespec') == 'integer' \ + and get_kind(var) == '1' + + +def isunsigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-1' + + +def isunsigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-2' + + +def isunsignedarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-4' + + +def isunsigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-8' + + +def issigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '1' + + +def issigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '2' + + +def issigned_array(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '4' + + +def issigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '8' + + +def isallocatable(var): + return 'attrspec' in var and 'allocatable' in var['attrspec'] + + +def ismutable(var): + return not ('dimension' not in var or isstring(var)) + + +def ismoduleroutine(rout): + return 'modulename' in rout + + +def ismodule(rout): + return 'block' in rout and 'module' == rout['block'] + + +def isfunction(rout): + return 'block' in rout and 'function' == rout['block'] + + +def isfunction_wrap(rout): + if isintent_c(rout): + return 0 + return wrapfuncs and isfunction(rout) and (not isexternal(rout)) + + +def issubroutine(rout): + return 'block' in rout and 'subroutine' == rout['block'] + + +def issubroutine_wrap(rout): + if isintent_c(rout): + return 0 + return issubroutine(rout) and hasassumedshape(rout) + + +def hasassumedshape(rout): + if rout.get('hasassumedshape'): + return True + for a in rout['args']: + for d in rout['vars'].get(a, {}).get('dimension', []): + if d == ':': + rout['hasassumedshape'] = True + return True + return False + + +def requiresf90wrapper(rout): + return ismoduleroutine(rout) or hasassumedshape(rout) + + +def isroutine(rout): + return isfunction(rout) or issubroutine(rout) + + +def islogicalfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islogical(rout['vars'][a]) + return 0 + + +def islong_longfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_long(rout['vars'][a]) + return 0 + + +def islong_doublefunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_double(rout['vars'][a]) + return 0 + + +def iscomplexfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return iscomplex(rout['vars'][a]) + return 0 + + +def iscomplexfunction_warn(rout): + if iscomplexfunction(rout): + outmess("""\ + ************************************************************** + Warning: code with a function returning complex value + may not work correctly with your Fortran compiler. + Run the following test before using it in your applications: + $(f2py install dir)/test-site/{b/runme_scalar,e/runme} + When using GNU gcc/g77 compilers, codes should work correctly. + **************************************************************\n""") + return 1 + return 0 + + +def isstringfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return isstring(rout['vars'][a]) + return 0 + + +def hasexternals(rout): + return 'externals' in rout and rout['externals'] + + +def isthreadsafe(rout): + return 'f2pyenhancements' in rout and \ + 'threadsafe' in rout['f2pyenhancements'] + + +def hasvariables(rout): + return 'vars' in rout and rout['vars'] + + +def isoptional(var): + return ('attrspec' in var and 'optional' in var['attrspec'] and + 'required' not in var['attrspec']) and isintent_nothide(var) + + +def isexternal(var): + return 'attrspec' in var and 'external' in var['attrspec'] + + +def isrequired(var): + return not isoptional(var) and isintent_nothide(var) + + +def isintent_in(var): + if 'intent' not in var: + return 1 + if 'hide' in var['intent']: + return 0 + if 'inplace' in var['intent']: + return 0 + if 'in' in var['intent']: + return 1 + if 'out' in var['intent']: + return 0 + if 'inout' in var['intent']: + return 0 + if 'outin' in var['intent']: + return 0 + return 1 + + +def isintent_inout(var): + return ('intent' in var and ('inout' in var['intent'] or + 'outin' in var['intent']) and 'in' not in var['intent'] and + 'hide' not in var['intent'] and 'inplace' not in var['intent']) + + +def isintent_out(var): + return 'out' in var.get('intent', []) + + +def isintent_hide(var): + return ('intent' in var and ('hide' in var['intent'] or + ('out' in var['intent'] and 'in' not in var['intent'] and + (not l_or(isintent_inout, isintent_inplace)(var))))) + +def isintent_nothide(var): + return not isintent_hide(var) + + +def isintent_c(var): + return 'c' in var.get('intent', []) + + +def isintent_cache(var): + return 'cache' in var.get('intent', []) + + +def isintent_copy(var): + return 'copy' in var.get('intent', []) + + +def isintent_overwrite(var): + return 'overwrite' in var.get('intent', []) + + +def isintent_callback(var): + return 'callback' in var.get('intent', []) + + +def isintent_inplace(var): + return 'inplace' in var.get('intent', []) + + +def isintent_aux(var): + return 'aux' in var.get('intent', []) + + +def isintent_aligned4(var): + return 'aligned4' in var.get('intent', []) + + +def isintent_aligned8(var): + return 'aligned8' in var.get('intent', []) + + +def isintent_aligned16(var): + return 'aligned16' in var.get('intent', []) + +isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', + isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', + isintent_cache: 'INTENT_CACHE', + isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', + isintent_inplace: 'INTENT_INPLACE', + isintent_aligned4: 'INTENT_ALIGNED4', + isintent_aligned8: 'INTENT_ALIGNED8', + isintent_aligned16: 'INTENT_ALIGNED16', + } + + +def isprivate(var): + return 'attrspec' in var and 'private' in var['attrspec'] + + +def hasinitvalue(var): + return '=' in var + + +def hasinitvalueasstring(var): + if not hasinitvalue(var): + return 0 + return var['='][0] in ['"', "'"] + + +def hasnote(var): + return 'note' in var + + +def hasresultnote(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return hasnote(rout['vars'][a]) + return 0 + + +def hascommon(rout): + return 'common' in rout + + +def containscommon(rout): + if hascommon(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if containscommon(b): + return 1 + return 0 + + +def containsmodule(block): + if ismodule(block): + return 1 + if not hasbody(block): + return 0 + for b in block['body']: + if containsmodule(b): + return 1 + return 0 + + +def hasbody(rout): + return 'body' in rout + + +def hascallstatement(rout): + return getcallstatement(rout) is not None + + +def istrue(var): + return 1 + + +def isfalse(var): + return 0 + + +class F2PYError(Exception): + pass + + +class throw_error: + + def __init__(self, mess): + self.mess = mess + + def __call__(self, var): + mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) + raise F2PYError(mess) + + +def l_and(*f): + l, l2 = 'lambda v', [] + for i in range(len(f)): + l = '%s,f%d=f[%d]' % (l, i, i) + l2.append('f%d(v)' % (i)) + return eval('%s:%s' % (l, ' and '.join(l2))) + + +def l_or(*f): + l, l2 = 'lambda v', [] + for i in range(len(f)): + l = '%s,f%d=f[%d]' % (l, i, i) + l2.append('f%d(v)' % (i)) + return eval('%s:%s' % (l, ' or '.join(l2))) + + +def l_not(f): + return eval('lambda v,f=f:not f(v)') + + +def isdummyroutine(rout): + try: + return rout['f2pyenhancements']['fortranname'] == '' + except KeyError: + return 0 + + +def getfortranname(rout): + try: + name = rout['f2pyenhancements']['fortranname'] + if name == '': + raise KeyError + if not name: + errmess('Failed to use fortranname from %s\n' % + (rout['f2pyenhancements'])) + raise KeyError + except KeyError: + name = rout['name'] + return name + + +def getmultilineblock(rout, blockname, comment=1, counter=0): + try: + r = rout['f2pyenhancements'].get(blockname) + except KeyError: + return + if not r: + return + if counter > 0 and isinstance(r, str): + return + if isinstance(r, list): + if counter >= len(r): + return + r = r[counter] + if r[:3] == "'''": + if comment: + r = '\t/* start ' + blockname + \ + ' multiline (' + repr(counter) + ') */\n' + r[3:] + else: + r = r[3:] + if r[-3:] == "'''": + if comment: + r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/' + else: + r = r[:-3] + else: + errmess("%s multiline block should end with `'''`: %s\n" + % (blockname, repr(r))) + return r + + +def getcallstatement(rout): + return getmultilineblock(rout, 'callstatement') + + +def getcallprotoargument(rout, cb_map={}): + r = getmultilineblock(rout, 'callprotoargument', comment=0) + if r: + return r + if hascallstatement(rout): + outmess( + 'warning: callstatement is defined without callprotoargument\n') + return + from .capi_maps import getctype + arg_types, arg_types2 = [], [] + if l_and(isstringfunction, l_not(isfunction_wrap))(rout): + arg_types.extend(['char*', 'size_t']) + for n in rout['args']: + var = rout['vars'][n] + if isintent_callback(var): + continue + if n in cb_map: + ctype = cb_map[n] + '_typedef' + else: + ctype = getctype(var) + if l_and(isintent_c, l_or(isscalar, iscomplex))(var): + pass + elif isstring(var): + pass + else: + ctype = ctype + '*' + if isstring(var) or isarrayofstrings(var): + arg_types2.append('size_t') + arg_types.append(ctype) + + proto_args = ','.join(arg_types + arg_types2) + if not proto_args: + proto_args = 'void' + return proto_args + + +def getusercode(rout): + return getmultilineblock(rout, 'usercode') + + +def getusercode1(rout): + return getmultilineblock(rout, 'usercode', counter=1) + + +def getpymethoddef(rout): + return getmultilineblock(rout, 'pymethoddef') + + +def getargs(rout): + sortargs, args = [], [] + if 'args' in rout: + args = rout['args'] + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = rout['args'] + return args, sortargs + + +def getargs2(rout): + sortargs, args = [], rout.get('args', []) + auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a]) + and a not in args] + args = auxvars + args + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = auxvars + rout['args'] + return args, sortargs + + +def getrestdoc(rout): + if 'f2pymultilines' not in rout: + return None + k = None + if rout['block'] == 'python module': + k = rout['block'], rout['name'] + return rout['f2pymultilines'].get(k, None) + + +def gentitle(name): + l = (80 - len(name) - 6) // 2 + return '/*%s %s %s*/' % (l * '*', name, l * '*') + + +def flatlist(l): + if isinstance(l, list): + return reduce(lambda x, y, f=flatlist: x + f(y), l, []) + return [l] + + +def stripcomma(s): + if s and s[-1] == ',': + return s[:-1] + return s + + +def replace(str, d, defaultsep=''): + if isinstance(d, list): + return [replace(str, _m, defaultsep) for _m in d] + if isinstance(str, list): + return [replace(_m, d, defaultsep) for _m in str] + for k in 2 * list(d.keys()): + if k == 'separatorsfor': + continue + if 'separatorsfor' in d and k in d['separatorsfor']: + sep = d['separatorsfor'][k] + else: + sep = defaultsep + if isinstance(d[k], list): + str = str.replace('#%s#' % (k), sep.join(flatlist(d[k]))) + else: + str = str.replace('#%s#' % (k), d[k]) + return str + + +def dictappend(rd, ar): + if isinstance(ar, list): + for a in ar: + rd = dictappend(rd, a) + return rd + for k in ar.keys(): + if k[0] == '_': + continue + if k in rd: + if isinstance(rd[k], str): + rd[k] = [rd[k]] + if isinstance(rd[k], list): + if isinstance(ar[k], list): + rd[k] = rd[k] + ar[k] + else: + rd[k].append(ar[k]) + elif isinstance(rd[k], dict): + if isinstance(ar[k], dict): + if k == 'separatorsfor': + for k1 in ar[k].keys(): + if k1 not in rd[k]: + rd[k][k1] = ar[k][k1] + else: + rd[k] = dictappend(rd[k], ar[k]) + else: + rd[k] = ar[k] + return rd + + +def applyrules(rules, d, var={}): + ret = {} + if isinstance(rules, list): + for r in rules: + rr = applyrules(r, d, var) + ret = dictappend(ret, rr) + if '_break' in rr: + break + return ret + if '_check' in rules and (not rules['_check'](var)): + return ret + if 'need' in rules: + res = applyrules({'needs': rules['need']}, d, var) + if 'needs' in res: + cfuncs.append_needs(res['needs']) + + for k in rules.keys(): + if k == 'separatorsfor': + ret[k] = rules[k] + continue + if isinstance(rules[k], str): + ret[k] = replace(rules[k], d) + elif isinstance(rules[k], list): + ret[k] = [] + for i in rules[k]: + ar = applyrules({k: i}, d, var) + if k in ar: + ret[k].append(ar[k]) + elif k[0] == '_': + continue + elif isinstance(rules[k], dict): + ret[k] = [] + for k1 in rules[k].keys(): + if isinstance(k1, types.FunctionType) and k1(var): + if isinstance(rules[k][k1], list): + for i in rules[k][k1]: + if isinstance(i, dict): + res = applyrules({'supertext': i}, d, var) + if 'supertext' in res: + i = res['supertext'] + else: + i = '' + ret[k].append(replace(i, d)) + else: + i = rules[k][k1] + if isinstance(i, dict): + res = applyrules({'supertext': i}, d) + if 'supertext' in res: + i = res['supertext'] + else: + i = '' + ret[k].append(replace(i, d)) + else: + errmess('applyrules: ignoring rule %s.\n' % repr(rules[k])) + if isinstance(ret[k], list): + if len(ret[k]) == 1: + ret[k] = ret[k][0] + if ret[k] == []: + del ret[k] + return ret diff --git a/MLPY/Lib/site-packages/numpy/f2py/capi_maps.py b/MLPY/Lib/site-packages/numpy/f2py/capi_maps.py new file mode 100644 index 0000000000000000000000000000000000000000..76a33db6bf1af06c7ef38bc91db2e234e7b31dbd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/capi_maps.py @@ -0,0 +1,838 @@ +#!/usr/bin/env python3 +""" + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 10:57:33 $ +Pearu Peterson + +""" +from . import __version__ +f2py_version = __version__.version + +import copy +import re +import os +from .crackfortran import markoutercomma +from . import cb_rules + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +__all__ = [ + 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', + 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', + 'cb_sign2map', 'cb_routsign2map', 'common_sign2map' +] + + +# Numarray and Numeric users should set this False +using_newcore = True + +depargs = [] +lcb_map = {} +lcb2_map = {} +# forced casting: mainly caused by the fact that Python or Numeric +# C/APIs do not support the corresponding C types. +c2py_map = {'double': 'float', + 'float': 'float', # forced casting + 'long_double': 'float', # forced casting + 'char': 'int', # forced casting + 'signed_char': 'int', # forced casting + 'unsigned_char': 'int', # forced casting + 'short': 'int', # forced casting + 'unsigned_short': 'int', # forced casting + 'int': 'int', # (forced casting) + 'long': 'int', + 'long_long': 'long', + 'unsigned': 'int', # forced casting + 'complex_float': 'complex', # forced casting + 'complex_double': 'complex', + 'complex_long_double': 'complex', # forced casting + 'string': 'string', + } +c2capi_map = {'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_DOUBLE', # forced casting + 'char': 'NPY_STRING', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'long_long': 'NPY_LONG', # forced casting + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', # forced casting + 'string': 'NPY_STRING'} + +# These new maps aren't used anywhere yet, but should be by default +# unless building numeric or numarray extensions. +if using_newcore: + c2capi_map = {'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string':'NPY_STRING' + + } +c2pycode_map = {'double': 'd', + 'float': 'f', + 'long_double': 'd', # forced casting + 'char': '1', + 'signed_char': '1', + 'unsigned_char': 'b', + 'short': 's', + 'unsigned_short': 'w', + 'int': 'i', + 'unsigned': 'u', + 'long': 'l', + 'long_long': 'L', + 'complex_float': 'F', + 'complex_double': 'D', + 'complex_long_double': 'D', # forced casting + 'string': 'c' + } +if using_newcore: + c2pycode_map = {'double': 'd', + 'float': 'f', + 'long_double': 'g', + 'char': 'b', + 'unsigned_char': 'B', + 'signed_char': 'b', + 'short': 'h', + 'unsigned_short': 'H', + 'int': 'i', + 'unsigned': 'I', + 'long': 'l', + 'unsigned_long': 'L', + 'long_long': 'q', + 'unsigned_long_long': 'Q', + 'complex_float': 'F', + 'complex_double': 'D', + 'complex_long_double': 'G', + 'string': 'S'} +c2buildvalue_map = {'double': 'd', + 'float': 'f', + 'char': 'b', + 'signed_char': 'b', + 'short': 'h', + 'int': 'i', + 'long': 'l', + 'long_long': 'L', + 'complex_float': 'N', + 'complex_double': 'N', + 'complex_long_double': 'N', + 'string': 'y'} + +if using_newcore: + # c2buildvalue_map=??? + pass + +f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', + '12': 'long_double', '16': 'long_double'}, + 'integer': {'': 'int', '1': 'signed_char', '2': 'short', + '4': 'int', '8': 'long_long', + '-1': 'unsigned_char', '-2': 'unsigned_short', + '-4': 'unsigned', '-8': 'unsigned_long_long'}, + 'complex': {'': 'complex_float', '8': 'complex_float', + '16': 'complex_double', '24': 'complex_long_double', + '32': 'complex_long_double'}, + 'complexkind': {'': 'complex_float', '4': 'complex_float', + '8': 'complex_double', '12': 'complex_long_double', + '16': 'complex_long_double'}, + 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', + '8': 'long_long'}, + 'double complex': {'': 'complex_double'}, + 'double precision': {'': 'double'}, + 'byte': {'': 'char'}, + 'character': {'': 'string'} + } + +f2cmap_default = copy.deepcopy(f2cmap_all) + + +def load_f2cmap_file(f2cmap_file): + global f2cmap_all + + f2cmap_all = copy.deepcopy(f2cmap_default) + + if f2cmap_file is None: + # Default value + f2cmap_file = '.f2py_f2cmap' + if not os.path.isfile(f2cmap_file): + return + + # User defined additions to f2cmap_all. + # f2cmap_file must contain a dictionary of dictionaries, only. For + # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is + # interpreted as C 'float'. This feature is useful for F90/95 users if + # they use PARAMETERSs in type specifications. + try: + outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file)) + with open(f2cmap_file, 'r') as f: + d = eval(f.read(), {}, {}) + for k, d1 in list(d.items()): + for k1 in list(d1.keys()): + d1[k1.lower()] = d1[k1] + d[k.lower()] = d[k] + for k in list(d.keys()): + if k not in f2cmap_all: + f2cmap_all[k] = {} + for k1 in list(d[k].keys()): + if d[k][k1] in c2py_map: + if k1 in f2cmap_all[k]: + outmess( + "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" % (k, k1, f2cmap_all[k][k1], d[k][k1])) + f2cmap_all[k][k1] = d[k][k1] + outmess('\tMapping "%s(kind=%s)" to "%s"\n' % + (k, k1, d[k][k1])) + else: + errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % ( + k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) + outmess('Successfully applied user defined f2cmap changes\n') + except Exception as msg: + errmess( + 'Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg)) + +cformat_map = {'double': '%g', + 'float': '%g', + 'long_double': '%Lg', + 'char': '%d', + 'signed_char': '%d', + 'unsigned_char': '%hhu', + 'short': '%hd', + 'unsigned_short': '%hu', + 'int': '%d', + 'unsigned': '%u', + 'long': '%ld', + 'unsigned_long': '%lu', + 'long_long': '%ld', + 'complex_float': '(%g,%g)', + 'complex_double': '(%g,%g)', + 'complex_long_double': '(%Lg,%Lg)', + 'string': '%s', + } + +# Auxiliary functions + + +def getctype(var): + """ + Determines C type + """ + ctype = 'void' + if isfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getctype(var['vars'][a]) + else: + errmess('getctype: function %s has no return value?!\n' % a) + elif issubroutine(var): + return ctype + elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: + typespec = var['typespec'].lower() + f2cmap = f2cmap_all[typespec] + ctype = f2cmap[''] # default type + if 'kindselector' in var: + if '*' in var['kindselector']: + try: + ctype = f2cmap[var['kindselector']['*']] + except KeyError: + errmess('getctype: "%s %s %s" not supported.\n' % + (var['typespec'], '*', var['kindselector']['*'])) + elif 'kind' in var['kindselector']: + if typespec + 'kind' in f2cmap_all: + f2cmap = f2cmap_all[typespec + 'kind'] + try: + ctype = f2cmap[var['kindselector']['kind']] + except KeyError: + if typespec in f2cmap_all: + f2cmap = f2cmap_all[typespec] + try: + ctype = f2cmap[str(var['kindselector']['kind'])] + except KeyError: + errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' + % (typespec, var['kindselector']['kind'], ctype, + typespec, var['kindselector']['kind'], os.getcwd())) + + else: + if not isexternal(var): + errmess( + 'getctype: No C-type found in "%s", assuming void.\n' % var) + return ctype + + +def getstrlength(var): + if isstringfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getstrlength(var['vars'][a]) + else: + errmess('getstrlength: function %s has no return value?!\n' % a) + if not isstring(var): + errmess( + 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) + len = '1' + if 'charselector' in var: + a = var['charselector'] + if '*' in a: + len = a['*'] + elif 'len' in a: + len = a['len'] + if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): + if isintent_hide(var): + errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( + repr(var))) + len = '-1' + return len + + +def getarrdims(a, var, verbose=0): + ret = {} + if isstring(var) and not isarray(var): + ret['dims'] = getstrlength(var) + ret['size'] = ret['dims'] + ret['rank'] = '1' + elif isscalar(var): + ret['size'] = '1' + ret['rank'] = '0' + ret['dims'] = '' + elif isarray(var): + dim = copy.copy(var['dimension']) + ret['size'] = '*'.join(dim) + try: + ret['size'] = repr(eval(ret['size'])) + except Exception: + pass + ret['dims'] = ','.join(dim) + ret['rank'] = repr(len(dim)) + ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] + for i in range(len(dim)): # solve dim for dependencies + v = [] + if dim[i] in depargs: + v = [dim[i]] + else: + for va in depargs: + if re.match(r'.*?\b%s\b.*' % va, dim[i]): + v.append(va) + for va in v: + if depargs.index(va) > depargs.index(a): + dim[i] = '*' + break + ret['setdims'], i = '', -1 + for d in dim: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['setdims'], i, d) + if ret['setdims']: + ret['setdims'] = ret['setdims'][:-1] + ret['cbsetdims'], i = '', -1 + for d in var['dimension']: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, d) + elif isintent_in(var): + outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' + % (d)) + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, 0) + elif verbose: + errmess( + 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) + if ret['cbsetdims']: + ret['cbsetdims'] = ret['cbsetdims'][:-1] +# if not isintent_c(var): +# var['dimension'].reverse() + return ret + + +def getpydocsign(a, var): + global lcb_map + if isfunction(var): + if 'result' in var: + af = var['result'] + else: + af = var['name'] + if af in var['vars']: + return getpydocsign(af, var['vars'][af]) + else: + errmess('getctype: function %s has no return value?!\n' % af) + return '', '' + sig, sigout = a, a + opt = '' + if isintent_in(var): + opt = 'input' + elif isintent_inout(var): + opt = 'in/output' + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + init = '' + ctype = getctype(var) + + if hasinitvalue(var): + init, showinit = getinit(a, var) + init = ', optional\\n Default: %s' % showinit + if isscalar(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], + c2pycode_map[ctype], init) + else: + sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) + sigout = '%s : %s' % (out_a, c2py_map[ctype]) + elif isstring(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( + a, opt, getstrlength(var), init) + else: + sig = '%s : %s string(len=%s)%s' % ( + a, opt, getstrlength(var), init) + sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, + c2pycode_map[ + ctype], + ','.join(dim), init) + if a == out_a: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ + % (a, rank, c2pycode_map[ctype], ','.join(dim)) + else: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ + % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + elif isexternal(var): + ua = '' + if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: + ua = lcb2_map[lcb_map[a]]['argname'] + if not ua == a: + ua = ' => %s' % ua + else: + ua = '' + sig = '%s : call-back function%s' % (a, ua) + sigout = sig + else: + errmess( + 'getpydocsign: Could not resolve docsignature for "%s".\\n' % a) + return sig, sigout + + +def getarrdocsign(a, var): + ctype = getctype(var) + if isstring(var) and (not isarray(var)): + sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, + getstrlength(var)) + elif isscalar(var): + sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], + c2pycode_map[ctype],) + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, + c2pycode_map[ + ctype], + ','.join(dim)) + return sig + + +def getinit(a, var): + if isstring(var): + init, showinit = '""', "''" + else: + init, showinit = '', '' + if hasinitvalue(var): + init = var['='] + showinit = init + if iscomplex(var) or iscomplexarray(var): + ret = {} + + try: + v = var["="] + if ',' in v: + ret['init.r'], ret['init.i'] = markoutercomma( + v[1:-1]).split('@,@') + else: + v = eval(v, {}, {}) + ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) + except Exception: + raise ValueError( + 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) + if isarray(var): + init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( + ret['init.r'], ret['init.i']) + elif isstring(var): + if not init: + init, showinit = '""', "''" + if init[0] == "'": + init = '"%s"' % (init[1:-1].replace('"', '\\"')) + if init[0] == '"': + showinit = "'%s'" % (init[1:-1]) + return init, showinit + + +def sign2map(a, var): + """ + varname,ctype,atype + init,init.r,init.i,pytype + vardebuginfo,vardebugshowvalue,varshowvalue + varrfromat + intent + """ + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} + intent_flags = [] + for f, s in isintent_dict.items(): + if f(var): + intent_flags.append('F2PY_%s' % s) + if intent_flags: + # XXX: Evaluate intent_flags here. + ret['intent'] = '|'.join(intent_flags) + else: + ret['intent'] = 'F2PY_INTENT_IN' + if isarray(var): + ret['varrformat'] = 'N' + elif ret['ctype'] in c2buildvalue_map: + ret['varrformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['varrformat'] = 'O' + ret['init'], ret['showinit'] = getinit(a, var) + if hasinitvalue(var) and iscomplex(var) and not isarray(var): + ret['init.r'], ret['init.i'] = markoutercomma( + ret['init'][1:-1]).split('@,@') + if isexternal(var): + ret['cbnamekey'] = a + if a in lcb_map: + ret['cbname'] = lcb_map[a] + ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] + ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] + ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] + ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] + else: + ret['cbname'] = a + errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( + a, list(lcb_map.keys()))) + if isstring(var): + ret['length'] = getstrlength(var) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + dim = copy.copy(var['dimension']) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + # Debug info + if debugcapi(var): + il = [isintent_in, 'input', isintent_out, 'output', + isintent_inout, 'inoutput', isrequired, 'required', + isoptional, 'optional', isintent_hide, 'hidden', + iscomplex, 'complex scalar', + l_and(isscalar, l_not(iscomplex)), 'scalar', + isstring, 'string', isarray, 'array', + iscomplexarray, 'complex array', isstringarray, 'string array', + iscomplexfunction, 'complex function', + l_and(isfunction, l_not(iscomplexfunction)), 'function', + isexternal, 'callback', + isintent_callback, 'callback', + isintent_aux, 'auxiliary', + ] + rl = [] + for i in range(0, len(il), 2): + if il[i](var): + rl.append(il[i + 1]) + if isstring(var): + rl.append('slen(%s)=%s' % (a, ret['length'])) + if isarray(var): + ddim = ','.join( + map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) + rl.append('dims(%s)' % ddim) + if isexternal(var): + ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( + a, ret['cbname'], ','.join(rl)) + else: + ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( + ret['ctype'], a, ret['showinit'], ','.join(rl)) + if isscalar(var): + if ret['ctype'] in cformat_map: + ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstring(var): + ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isexternal(var): + ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) + if ret['ctype'] in cformat_map: + ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isstring(var): + ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + return ret + + +def routsign2map(rout): + """ + name,NAME,begintitle,endtitle + rname,ctype,rformat + routdebugshowvalue + """ + global lcb_map + name = rout['name'] + fname = getfortranname(rout) + ret = {'name': name, + 'texname': name.replace('_', '\\_'), + 'name_lower': name.lower(), + 'NAME': name.upper(), + 'begintitle': gentitle(name), + 'endtitle': gentitle('end of %s' % name), + 'fortranname': fname, + 'FORTRANNAME': fname.upper(), + 'callstatement': getcallstatement(rout) or '', + 'usercode': getusercode(rout) or '', + 'usercode1': getusercode1(rout) or '', + } + if '_' in fname: + ret['F_FUNC'] = 'F_FUNC_US' + else: + ret['F_FUNC'] = 'F_FUNC' + if '_' in name: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' + else: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' + lcb_map = {} + if 'use' in rout: + for u in rout['use'].keys(): + if u in cb_rules.cb_map: + for un in cb_rules.cb_map[u]: + ln = un[0] + if 'map' in rout['use'][u]: + for k in rout['use'][u]['map'].keys(): + if rout['use'][u]['map'][k] == un[0]: + ln = k + break + lcb_map[ln] = un[1] + elif 'externals' in rout and rout['externals']: + errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( + ret['name'], repr(rout['externals']))) + ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + ret['ctype'] = getctype(rout['vars'][a]) + if hasresultnote(rout): + ret['resultnote'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + if ret['ctype'] in c2buildvalue_map: + ret['rformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['rformat'] = 'O' + errmess('routsign2map: no c2buildvalue key for type %s\n' % + (repr(ret['ctype']))) + if debugcapi(rout): + if ret['ctype'] in cformat_map: + ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isstringfunction(rout): + ret['rlength'] = getstrlength(rout['vars'][a]) + if ret['rlength'] == '-1': + errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( + repr(rout['name']))) + ret['rlength'] = '10' + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def modsign2map(m): + """ + modulename + """ + if ismodule(m): + ret = {'f90modulename': m['name'], + 'F90MODULENAME': m['name'].upper(), + 'texf90modulename': m['name'].replace('_', '\\_')} + else: + ret = {'modulename': m['name'], + 'MODULENAME': m['name'].upper(), + 'texmodulename': m['name'].replace('_', '\\_')} + ret['restdoc'] = getrestdoc(m) or [] + if hasnote(m): + ret['note'] = m['note'] + ret['usercode'] = getusercode(m) or '' + ret['usercode1'] = getusercode1(m) or '' + if m['body']: + ret['interface_usercode'] = getusercode(m['body'][0]) or '' + else: + ret['interface_usercode'] = '' + ret['pymethoddef'] = getpymethoddef(m) or '' + if 'coutput' in m: + ret['coutput'] = m['coutput'] + if 'f2py_wrapper_output' in m: + ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] + return ret + + +def cb_sign2map(a, var, index=None): + ret = {'varname': a} + ret['varname_i'] = ret['varname'] + ret['ctype'] = getctype(var) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + return ret + + +def cb_routsign2map(rout, um): + """ + name,begintitle,endtitle,argname + ctype,rctype,maxnofargs,nofoptargs,returncptr + """ + ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), + 'returncptr': ''} + if isintent_callback(rout): + if '_' in rout['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + ret['callbackname'] = '%s(%s,%s)' \ + % (F_FUNC, + rout['name'].lower(), + rout['name'].upper(), + ) + ret['static'] = 'extern' + else: + ret['callbackname'] = ret['name'] + ret['static'] = 'static' + ret['argname'] = rout['name'] + ret['begintitle'] = gentitle(ret['name']) + ret['endtitle'] = gentitle('end of %s' % ret['name']) + ret['ctype'] = getctype(rout) + ret['rctype'] = 'void' + if ret['ctype'] == 'string': + ret['rctype'] = 'void' + else: + ret['rctype'] = ret['ctype'] + if ret['rctype'] != 'void': + if iscomplexfunction(rout): + ret['returncptr'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +return_value= +#endif +""" + else: + ret['returncptr'] = 'return_value=' + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['strlength'] = getstrlength(rout) + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if hasnote(rout['vars'][a]): + ret['note'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + if iscomplexfunction(rout): + ret['rctype'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +#ctype# +#else +void +#endif +""" + else: + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + nofargs = 0 + nofoptargs = 0 + if 'args' in rout and 'vars' in rout: + for a in rout['args']: + var = rout['vars'][a] + if l_or(isintent_in, isintent_inout)(var): + nofargs = nofargs + 1 + if isoptional(var): + nofoptargs = nofoptargs + 1 + ret['maxnofargs'] = repr(nofargs) + ret['nofoptargs'] = repr(nofoptargs) + if hasnote(rout) and isfunction(rout) and 'result' in rout: + ret['routnote'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def common_sign2map(a, var): # obsolute + ret = {'varname': a, 'ctype': getctype(var)} + if isstringarray(var): + ret['ctype'] = 'char' + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + elif isstring(var): + ret['size'] = getstrlength(var) + ret['rank'] = '1' + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + # for strings this returns 0-rank but actually is 1-rank + ret['arrdocstr'] = getarrdocsign(a, var) + return ret diff --git a/MLPY/Lib/site-packages/numpy/f2py/cb_rules.py b/MLPY/Lib/site-packages/numpy/f2py/cb_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..ca0e2baee437f69dfd70e82e0ead68380044c5af --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/cb_rules.py @@ -0,0 +1,616 @@ +#!/usr/bin/env python3 +""" + +Build call-back mechanism for f2py2e. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/07/20 11:27:58 $ +Pearu Peterson + +""" +from . import __version__ +from .auxfuncs import ( + applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray, + iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c, + isintent_hide, isintent_in, isintent_inout, isintent_nothide, + isintent_out, isoptional, isrequired, isscalar, isstring, + isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace, + stripcomma, throw_error +) +from . import cfuncs + +f2py_version = __version__.version + + +################## Rules for callback function ############## + +cb_routine_rules = { + 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', + 'body': """ +#begintitle# +typedef struct { + PyObject *capi; + PyTupleObject *args_capi; + int nofargs; + jmp_buf jmpbuf; +} #name#_t; + +#if defined(F2PY_THREAD_LOCAL_DECL) && !defined(F2PY_USE_PYTHON_TLS) + +static F2PY_THREAD_LOCAL_DECL #name#_t *_active_#name# = NULL; + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + #name#_t *prev = _active_#name#; + _active_#name# = ptr; + return prev; +} + +static #name#_t *get_active_#name#(void) { + return _active_#name#; +} + +#else + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PySwapThreadLocalCallbackPtr(key, ptr); +} + +static #name#_t *get_active_#name#(void) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PyGetThreadLocalCallbackPtr(key); +} + +#endif + +/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ +#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { + #name#_t cb_local = { NULL, NULL, 0 }; + #name#_t *cb = NULL; + PyTupleObject *capi_arglist = NULL; + PyObject *capi_return = NULL; + PyObject *capi_tmp = NULL; + PyObject *capi_arglist_list = NULL; + int capi_j,capi_i = 0; + int capi_longjmp_ok = 1; +#decl# +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_clock(); +#endif + cb = get_active_#name#(); + if (cb == NULL) { + capi_longjmp_ok = 0; + cb = &cb_local; + } + capi_arglist = cb->args_capi; + CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + if (cb->capi==NULL) { + capi_longjmp_ok = 0; + cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + } + if (cb->capi==NULL) { + PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); + goto capi_fail; + } + if (F2PyCapsule_Check(cb->capi)) { + #name#_typedef #name#_cptr; + #name#_cptr = F2PyCapsule_AsVoidPtr(cb->capi); + #returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); + #return# + } + if (capi_arglist==NULL) { + capi_longjmp_ok = 0; + capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); + if (capi_tmp) { + capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); + if (capi_arglist==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); + goto capi_fail; + } + } else { + PyErr_Clear(); + capi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); + } + } + if (capi_arglist == NULL) { + PyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); + goto capi_fail; + } +#setdims# +#ifdef PYPY_VERSION +#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) + capi_arglist_list = PySequence_List(capi_arglist); + if (capi_arglist_list == NULL) goto capi_fail; +#else +#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) +#endif +#pyobjfrom# +#undef CAPI_ARGLIST_SETITEM +#ifdef PYPY_VERSION + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); +#else + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +#endif + CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_call_clock(); +#endif +#ifdef PYPY_VERSION + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list); + Py_DECREF(capi_arglist_list); + capi_arglist_list = NULL; +#else + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); +#endif +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_call_clock(); +#endif + CFUNCSMESSPY(\"cb:capi_return=\",capi_return); + if (capi_return == NULL) { + fprintf(stderr,\"capi_return is NULL\\n\"); + goto capi_fail; + } + if (capi_return == Py_None) { + Py_DECREF(capi_return); + capi_return = Py_BuildValue(\"()\"); + } + else if (!PyTuple_Check(capi_return)) { + capi_return = Py_BuildValue(\"(N)\",capi_return); + } + capi_j = PyTuple_Size(capi_return); + capi_i = 0; +#frompyobj# + CFUNCSMESS(\"cb:#name#:successful\\n\"); + Py_DECREF(capi_return); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_clock(); +#endif + goto capi_return_pt; +capi_fail: + fprintf(stderr,\"Call-back #name# failed.\\n\"); + Py_XDECREF(capi_return); + Py_XDECREF(capi_arglist_list); + if (capi_longjmp_ok) { + longjmp(cb->jmpbuf,-1); + } +capi_return_pt: + ; +#return# +} +#endtitle# +""", + 'need': ['setjmp.h', 'CFUNCSMESS', 'F2PY_THREAD_LOCAL_DECL'], + 'maxnofargs': '#maxnofargs#', + 'nofoptargs': '#nofoptargs#', + 'docstr': """\ +\tdef #argname#(#docsignature#): return #docreturn#\\n\\ +#docstrsigns#""", + 'latexdocstr': """ +{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} +#routnote# + +#latexdocstrsigns#""", + 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#' +} +cb_rout_rules = [ + { # Init + 'separatorsfor': {'decl': '\n', + 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', + 'args_td': ',', 'optargs_td': '', + 'args_nm': ',', 'optargs_nm': '', + 'frompyobj': '\n', 'setdims': '\n', + 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', + 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', + 'args_td': [], 'optargs_td': '', 'strarglens_td': '', + 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', + 'noargs': '', + 'setdims': '/*setdims*/', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\tRequired arguments:', + 'docstropt': '\tOptional arguments:', + 'docstrout': '\tReturn objects:', + 'docstrcbs': '\tCall-back functions:', + 'docreturn': '', 'docsign': '', 'docsignopt': '', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { # Function + 'decl': ' #ctype# return_value;', + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', + {debugcapi: + ' fprintf(stderr,"#showvalueformat#.\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], + 'return': ' return return_value;', + '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) + }, + { # String function + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, + 'args': '#ctype# return_value,int return_value_len', + 'args_nm': 'return_value,&return_value_len', + 'args_td': '#ctype# ,int', + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting return_value->\\"");'}, + """ if (capi_j>capi_i) + GETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSTRFROMPYTUPLE'], + 'return': 'return;', + '_check': isstringfunction + }, + { # Complex function + 'optargs': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# *return_value +#endif +""", + 'optargs_nm': """ +#ifndef F2PY_CB_RETURNCOMPLEX +return_value +#endif +""", + 'optargs_td': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# * +#endif +""", + 'decl': """ +#ifdef F2PY_CB_RETURNCOMPLEX + #ctype# return_value; +#endif +""", + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + """\ + if (capi_j>capi_i) +#ifdef F2PY_CB_RETURNCOMPLEX + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); +#else + GETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\"); +#endif +""", + {debugcapi: """ +#ifdef F2PY_CB_RETURNCOMPLEX + fprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); +#else + fprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); +#endif + +"""} + ], + 'return': """ +#ifdef F2PY_CB_RETURNCOMPLEX + return return_value; +#else + return; +#endif +""", + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], + '_check': iscomplexfunction + }, + {'docstrout': '\t\t#pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasnote: '--- #note#'}], + 'docreturn': '#rname#,', + '_check': isfunction}, + {'_check': issubroutine, 'return': 'return;'} +] + +cb_arg_rules = [ + { # Doc + 'docstropt': {l_and(isoptional, isintent_nothide): '\t\t#pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): '\t\t#pydocsign#'}, + 'docstrout': {isintent_out: '\t\t#pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'}, + 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'}, + 'depend': '' + }, + { + 'args': { + l_and(isscalar, isintent_c): '#ctype# #varname_i#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi', + isarray: '#ctype# *#varname_i#', + isstring: '#ctype# #varname_i#' + }, + 'args_nm': { + l_and(isscalar, isintent_c): '#varname_i#', + l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi', + isarray: '#varname_i#', + isstring: '#varname_i#' + }, + 'args_td': { + l_and(isscalar, isintent_c): '#ctype#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *', + isarray: '#ctype# *', + isstring: '#ctype#' + }, + 'need': {l_or(isscalar, isarray, isstring): '#ctype#'}, + # untested with multiple args + 'strarglens': {isstring: ',int #varname_i#_cb_len'}, + 'strarglens_td': {isstring: ',int'}, # untested with multiple args + # untested with multiple args + 'strarglens_nm': {isstring: ',#varname_i#_cb_len'}, + }, + { # Scalars + 'decl': {l_not(isintent_c): ' #ctype# #varname_i#=(*#varname_i#_cb_capi);'}, + 'error': {l_and(isintent_c, isintent_out, + throw_error('intent(c,out) is forbidden for callback scalar arguments')): + ''}, + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + {isintent_out: + ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, + {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, + {l_and(debugcapi, l_and(iscomplex, isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, + {l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, + ], + 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, + {debugcapi: 'CFUNCSMESS'}], + '_check': isscalar + }, { + 'pyobjfrom': [{isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) + goto capi_fail;"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}, + {iscomplex: '#ctype#'}], + '_check': l_and(isscalar, isintent_nothide), + '_optional': '' + }, { # String + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->\\"");'}, + """ if (capi_j>capi_i) + GETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, + ], + 'need': ['#ctype#', 'GETSTRFROMPYTUPLE', + {debugcapi: 'CFUNCSMESS'}, 'string.h'], + '_check': l_and(isstring, isintent_out) + }, { + 'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, + {isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) { + int #varname_i#_cb_dims[] = {#varname_i#_cb_len}; + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) + goto capi_fail; + }"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}], + '_check': l_and(isstring, isintent_nothide), + '_optional': '' + }, + # Array ... + { + 'decl': ' npy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', + 'setdims': ' #cbsetdims#;', + '_check': isarray, + '_depend': '' + }, + { + 'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#\\n");'}, + {isintent_c: """\ + if (cb->nofargs>capi_i) { + int itemsize_ = #atype# == NPY_STRING ? 1 : 0; + /*XXX: Hmm, what will destroy this array??? */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_CARRAY,NULL); +""", + l_not(isintent_c): """\ + if (cb->nofargs>capi_i) { + int itemsize_ = #atype# == NPY_STRING ? 1 : 0; + /*XXX: Hmm, what will destroy this array??? */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_FARRAY,NULL); +""", + }, + """ + if (tmp_arr==NULL) + goto capi_fail; + if (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) + goto capi_fail; +}"""], + '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), + '_optional': '', + }, { + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + """ if (capi_j>capi_i) { + PyArrayObject *rv_cb_arr = NULL; + if ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; + rv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", + {isintent_c: '|F2PY_INTENT_C'}, + """,capi_tmp); + if (rv_cb_arr == NULL) { + fprintf(stderr,\"rv_cb_arr is NULL\\n\"); + goto capi_fail; + } + MEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); + if (capi_tmp != (PyObject *)rv_cb_arr) { + Py_DECREF(rv_cb_arr); + } + }""", + {debugcapi: ' fprintf(stderr,"<-.\\n");'}, + ], + 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}], + '_check': l_and(isarray, isintent_out) + }, { + 'docreturn': '#varname#,', + '_check': isintent_out + } +] + +################## Build call-back module ############# +cb_map = {} + + +def buildcallbacks(m): + cb_map[m['name']] = [] + for bi in m['body']: + if bi['block'] == 'interface': + for b in bi['body']: + if b: + buildcallback(b, m['name']) + else: + errmess('warning: empty body for %s\n' % (m['name'])) + + +def buildcallback(rout, um): + from . import capi_maps + + outmess('\tConstructing call-back function "cb_%s_in_%s"\n' % + (rout['name'], um)) + args, depargs = getargs(rout) + capi_maps.depargs = depargs + var = rout['vars'] + vrd = capi_maps.cb_routsign2map(rout, um) + rd = dictappend({}, vrd) + cb_map[um].append([rout['name'], rd['name']]) + for r in cb_rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + savevrd = {} + for i, a in enumerate(args): + vrd = capi_maps.cb_sign2map(a, var[a], index=i) + savevrd[a] = vrd + for r in cb_arg_rules: + if '_depend' in r: + continue + if '_optional' in r and isoptional(var[a]): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in args: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' in r: + continue + if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' not in r: + continue + if '_optional' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'args' in rd and 'optargs' in rd: + if isinstance(rd['optargs'], list): + rd['optargs'] = rd['optargs'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_nm'] = rd['optargs_nm'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_td'] = rd['optargs_td'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + optargs = stripcomma(replace('#docsignopt#', + {'docsignopt': rd['docsignopt']} + )) + if optargs == '': + rd['docsignature'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignature'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_') + rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ') + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + if 'args' not in rd: + rd['args'] = '' + rd['args_td'] = '' + rd['args_nm'] = '' + if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): + rd['noargs'] = 'void' + + ar = applyrules(cb_routine_rules, rd) + cfuncs.callbacks[rd['name']] = ar['body'] + if isinstance(ar['need'], str): + ar['need'] = [ar['need']] + + if 'need' in rd: + for t in cfuncs.typedefs.keys(): + if t in rd['need']: + ar['need'].append(t) + + cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs'] + ar['need'].append(rd['name'] + '_typedef') + cfuncs.needs[rd['name']] = ar['need'] + + capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], + 'nofoptargs': ar['nofoptargs'], + 'docstr': ar['docstr'], + 'latexdocstr': ar['latexdocstr'], + 'argname': rd['argname'] + } + outmess('\t %s\n' % (ar['docstrshort'])) + return +################## Build call-back function ############# diff --git a/MLPY/Lib/site-packages/numpy/f2py/cfuncs.py b/MLPY/Lib/site-packages/numpy/f2py/cfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..63f7d737cd8b0cc1269707eb9e60fb1ea08672c0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/cfuncs.py @@ -0,0 +1,1364 @@ +#!/usr/bin/env python3 +""" + +C declarations, CPP macros, and C functions for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 11:42:34 $ +Pearu Peterson + +""" +import sys +import copy + +from . import __version__ + +f2py_version = __version__.version +errmess = sys.stderr.write + +##################### Definitions ################## + +outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], + 'userincludes': [], + 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], + 'commonhooks': []} +needs = {} +includes0 = {'includes0': '/*need_includes0*/'} +includes = {'includes': '/*need_includes*/'} +userincludes = {'userincludes': '/*need_userincludes*/'} +typedefs = {'typedefs': '/*need_typedefs*/'} +typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} +cppmacros = {'cppmacros': '/*need_cppmacros*/'} +cfuncs = {'cfuncs': '/*need_cfuncs*/'} +callbacks = {'callbacks': '/*need_callbacks*/'} +f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', + 'initf90modhooksstatic': '/*initf90modhooksstatic*/', + 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', + } +commonhooks = {'commonhooks': '/*need_commonhooks*/', + 'initcommonhooks': '/*need_initcommonhooks*/', + } + +############ Includes ################### + +includes0['math.h'] = '#include ' +includes0['string.h'] = '#include ' +includes0['setjmp.h'] = '#include ' + +includes['Python.h'] = '#include "Python.h"' +needs['arrayobject.h'] = ['Python.h'] +includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API +#include "arrayobject.h"''' + +includes['arrayobject.h'] = '#include "fortranobject.h"' +includes['stdarg.h'] = '#include ' + +############# Type definitions ############### + +typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' +typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' +typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' +typedefs['signed_char'] = 'typedef signed char signed_char;' +typedefs['long_long'] = """\ +#ifdef _WIN32 +typedef __int64 long_long; +#else +typedef long long long_long; +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['unsigned_long_long'] = """\ +#ifdef _WIN32 +typedef __uint64 long_long; +#else +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['long_double'] = """\ +#ifndef _LONG_DOUBLE +typedef long double long_double; +#endif +""" +typedefs[ + 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' +typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' +typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' +typedefs['string'] = """typedef char * string;""" + + +############### CPP macros #################### +cppmacros['CFUNCSMESS'] = """\ +#ifdef DEBUGCFUNCS +#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); +#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +#else +#define CFUNCSMESS(mess) +#define CFUNCSMESSPY(mess,obj) +#endif +""" +cppmacros['F_FUNC'] = """\ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F +#else +#define F_FUNC(f,F) _##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F##_ +#else +#define F_FUNC(f,F) _##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F +#else +#define F_FUNC(f,F) f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F##_ +#else +#define F_FUNC(f,F) f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) +#else +#define F_FUNC_US(f,F) F_FUNC(f,F) +#endif +""" +cppmacros['F_WRAPPEDFUNC'] = """\ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) +#else +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) +#endif +""" +cppmacros['F_MODFUNC'] = """\ +#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f +#else +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f +#else +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) f ## .in. ## m +#else +#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ +#endif +#endif +/* +#if defined(UPPERCASE_FORTRAN) +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) +#else +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) +#endif +*/ + +#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) +""" +cppmacros['SWAPUNSAFE'] = """\ +#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) +""" +cppmacros['SWAP'] = """\ +#define SWAP(a,b,t) {\\ + t *c;\\ + c = a;\\ + a = b;\\ + b = c;} +""" +# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & +# NPY_ARRAY_C_CONTIGUOUS)' +cppmacros['PRINTPYOBJERR'] = """\ +#define PRINTPYOBJERR(obj)\\ + fprintf(stderr,\"#modulename#.error is related to \");\\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +""" +cppmacros['MINMAX'] = """\ +#ifndef max +#define max(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef min +#define min(a,b) ((a < b) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a,b) ((a < b) ? (a) : (b)) +#endif +""" +needs['len..'] = ['f2py_size'] +cppmacros['len..'] = """\ +#define rank(var) var ## _Rank +#define shape(var,dim) var ## _Dims[dim] +#define old_rank(var) (PyArray_NDIM((PyArrayObject *)(capi_ ## var ## _tmp))) +#define old_shape(var,dim) PyArray_DIM(((PyArrayObject *)(capi_ ## var ## _tmp)),dim) +#define fshape(var,dim) shape(var,rank(var)-dim-1) +#define len(var) shape(var,0) +#define flen(var) fshape(var,0) +#define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) +/* #define index(i) capi_i ## i */ +#define slen(var) capi_ ## var ## _len +#define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1) +""" +needs['f2py_size'] = ['stdarg.h'] +cfuncs['f2py_size'] = """\ +static int f2py_size(PyArrayObject* var, ...) +{ + npy_int sz = 0; + npy_int dim; + npy_int rank; + va_list argp; + va_start(argp, var); + dim = va_arg(argp, npy_int); + if (dim==-1) + { + sz = PyArray_SIZE(var); + } + else + { + rank = PyArray_NDIM(var); + if (dim>=1 && dim<=rank) + sz = PyArray_DIM(var, dim-1); + else + fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank); + } + va_end(argp); + return sz; +} +""" + +cppmacros[ + 'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyLong_FromLong(v))' +cppmacros[ + 'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyLong_FromLong(v))' +needs['pyobj_from_int1'] = ['signed_char'] +cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyLong_FromLong(v))' +cppmacros[ + 'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))' +needs['pyobj_from_long_long1'] = ['long_long'] +cppmacros['pyobj_from_long_long1'] = """\ +#ifdef HAVE_LONG_LONG +#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) +#else +#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. +#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) +#endif +""" +needs['pyobj_from_long_double1'] = ['long_double'] +cppmacros[ + 'pyobj_from_long_double1'] = '#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' +cppmacros[ + 'pyobj_from_double1'] = '#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' +cppmacros[ + 'pyobj_from_float1'] = '#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' +needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] +cppmacros[ + 'pyobj_from_complex_long_double1'] = '#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' +needs['pyobj_from_complex_double1'] = ['complex_double'] +cppmacros[ + 'pyobj_from_complex_double1'] = '#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' +needs['pyobj_from_complex_float1'] = ['complex_float'] +cppmacros[ + 'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' +needs['pyobj_from_string1'] = ['string'] +cppmacros[ + 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))' +needs['pyobj_from_string1size'] = ['string'] +cppmacros[ + 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))' +needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYPYARRAYTEMPLATE'] = """\ +/* New SciPy */ +#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; + +#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_INT: *(int *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ + case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_SHORT: *(short *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ + default: return -2;\\ + };\\ + return 1 +""" + +needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\ +#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {\\ + *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ + *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ + return 1;\\ + }\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r;*(double *)(PyArray_DATA(arr)+sizeof(double))=(*v).i;break;\\ + case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=(*v).r;*(float *)(PyArray_DATA(arr)+sizeof(float))=(*v).i;break;\\ + case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONG: *(long *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_INT: *(int *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_SHORT: *(short *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ + default: return -2;\\ + };\\ + return -1; +""" +# cppmacros['NUMFROMARROBJ']="""\ +# define NUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ +# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ +# cppmacros['CNUMFROMARROBJ']="""\ +# define CNUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ + + +needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] +cppmacros['GETSTRFROMPYTUPLE'] = """\ +#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ + PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ + if (rv_cb_str == NULL)\\ + goto capi_fail;\\ + if (PyBytes_Check(rv_cb_str)) {\\ + str[len-1]='\\0';\\ + STRINGCOPYN((str),PyBytes_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\ + } else {\\ + PRINTPYOBJERR(rv_cb_str);\\ + PyErr_SetString(#modulename#_error,\"string object expected\");\\ + goto capi_fail;\\ + }\\ + } +""" +cppmacros['GETSCALARFROMPYTUPLE'] = """\ +#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ + if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ + if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ + goto capi_fail;\\ + } +""" + +cppmacros['FAILNULL'] = """\\ +#define FAILNULL(p) do { \\ + if ((p) == NULL) { \\ + PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ + goto capi_fail; \\ + } \\ +} while (0) +""" +needs['MEMCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['MEMCOPY'] = """\ +#define MEMCOPY(to,from,n)\\ + do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) +""" +cppmacros['STRINGMALLOC'] = """\ +#define STRINGMALLOC(str,len)\\ + if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ + PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ + goto capi_fail;\\ + } else {\\ + (str)[len] = '\\0';\\ + } +""" +cppmacros['STRINGFREE'] = """\ +#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) +""" +needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPYN'] = """\ +#define STRINGCOPYN(to,from,buf_size) \\ + do { \\ + int _m = (buf_size); \\ + char *_to = (to); \\ + char *_from = (from); \\ + FAILNULL(_to); FAILNULL(_from); \\ + (void)strncpy(_to, _from, sizeof(char)*_m); \\ + _to[_m-1] = '\\0'; \\ + /* Padding with spaces instead of nulls */ \\ + for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ + _to[_m] = ' '; \\ + } \\ + } while (0) +""" +needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPY'] = """\ +#define STRINGCOPY(to,from)\\ + do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) +""" +cppmacros['CHECKGENERIC'] = """\ +#define CHECKGENERIC(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKARRAY'] = """\ +#define CHECKARRAY(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSTRING'] = """\ +#define CHECKSTRING(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + PyErr_SetString(#modulename#_error, errstring);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSCALAR'] = """\ +#define CHECKSCALAR(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + PyErr_SetString(#modulename#_error,errstring);\\ + /*goto capi_fail;*/\\ + } else """ +# cppmacros['CHECKDIMS']="""\ +# define CHECKDIMS(dims,rank) \\ +# for (int i=0;i<(rank);i++)\\ +# if (dims[i]<0) {\\ +# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ +# goto capi_fail;\\ +# } +# """ +cppmacros[ + 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' +cppmacros['OLDPYNUM'] = """\ +#ifdef OLDPYNUM +#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html +#endif +""" +cppmacros["F2PY_THREAD_LOCAL_DECL"] = """\ +#ifndef F2PY_THREAD_LOCAL_DECL +#if defined(_MSC_VER) \\ + || defined(_WIN32) || defined(_WIN64) \\ + || defined(__MINGW32__) || defined(__MINGW64__) +#define F2PY_THREAD_LOCAL_DECL __declspec(thread) +#elif defined(__STDC_VERSION__) \\ + && (__STDC_VERSION__ >= 201112L) \\ + && !defined(__STDC_NO_THREADS__) \\ + && (!defined(__GLIBC__) || __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 12)) +/* __STDC_NO_THREADS__ was first defined in a maintenance release of glibc 2.12, + see https://lists.gnu.org/archive/html/commit-hurd/2012-07/msg00180.html, + so `!defined(__STDC_NO_THREADS__)` may give false positive for the existence + of `threads.h` when using an older release of glibc 2.12 */ +#include +#define F2PY_THREAD_LOCAL_DECL thread_local +#elif defined(__GNUC__) \\ + && (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4))) +#define F2PY_THREAD_LOCAL_DECL __thread +#endif +#endif +""" +################# C functions ############### + +cfuncs['calcarrindex'] = """\ +static int calcarrindex(int *i,PyArrayObject *arr) { + int k,ii = i[0]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['calcarrindextr'] = """\ +static int calcarrindextr(int *i,PyArrayObject *arr) { + int k,ii = i[PyArray_NDIM(arr)-1]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['forcomb'] = """\ +static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; +static int initforcomb(npy_intp *dims,int nd,int tr) { + int k; + if (dims==NULL) return 0; + if (nd<0) return 0; + forcombcache.nd = nd; + forcombcache.d = dims; + forcombcache.tr = tr; + if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + for (k=1;kreal; + (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; + return 1; + } + } + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (long_double)cd.r; + (*v).i = (long_double)cd.i; + return 1; + } + return 0; +} +""" + + +needs['complex_double_from_pyobj'] = ['complex_double'] +cfuncs['complex_double_from_pyobj'] = """\ +static int +complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) { + Py_complex c; + if (PyComplex_Check(obj)) { + c = PyComplex_AsCComplex(obj); + (*v).r = c.real; + (*v).i = c.imag; + return 1; + } + if (PyArray_IsScalar(obj, ComplexFloating)) { + if (PyArray_IsScalar(obj, CFloat)) { + npy_cfloat new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)new.real; + (*v).i = (double)new.imag; + } + else if (PyArray_IsScalar(obj, CLongDouble)) { + npy_clongdouble new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)new.real; + (*v).i = (double)new.imag; + } + else { /* if (PyArray_IsScalar(obj, CDouble)) */ + PyArray_ScalarAsCtype(obj, v); + } + return 1; + } + if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ + PyObject *arr; + if (PyArray_Check(obj)) { + arr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); + } + else { + arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); + } + if (arr == NULL) { + return 0; + } + (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; + (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; + Py_DECREF(arr); + return 1; + } + /* Python does not provide PyNumber_Complex function :-( */ + (*v).i = 0.0; + if (PyFloat_Check(obj)) { + (*v).r = PyFloat_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PyLong_Check(obj)) { + (*v).r = PyLong_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) { + PyObject *tmp = PySequence_GetItem(obj,0); + if (tmp) { + if (complex_double_from_pyobj(v,tmp,errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) + err = PyExc_TypeError; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['complex_float_from_pyobj'] = [ + 'complex_float', 'complex_double_from_pyobj'] +cfuncs['complex_float_from_pyobj'] = """\ +static int +complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) +{ + complex_double cd={0.0,0.0}; + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (float)cd.r; + (*v).i = (float)cd.i; + return 1; + } + return 0; +} +""" + + +needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] +cfuncs[ + 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] +cfuncs[ + 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' +needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' +needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' +needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' +needs['try_pyarr_from_long_long'] = [ + 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] +cfuncs[ + 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' +needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' +needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' +needs['try_pyarr_from_complex_float'] = [ + 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] +cfuncs[ + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' +needs['try_pyarr_from_complex_double'] = [ + 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] +cfuncs[ + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + + +needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] +# create the list of arguments to be used when calling back to python +cfuncs['create_cb_arglist'] = """\ +static int +create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs, + const int nofoptargs, int *nofargs, PyTupleObject **args, + const char *errmess) +{ + PyObject *tmp = NULL; + PyObject *tmp_fun = NULL; + Py_ssize_t tot, opt, ext, siz, i, di = 0; + CFUNCSMESS(\"create_cb_arglist\\n\"); + tot=opt=ext=siz=0; + /* Get the total number of arguments */ + if (PyFunction_Check(fun)) { + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else { + di = 1; + if (PyObject_HasAttrString(fun,\"im_func\")) { + tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); + } + else if (PyObject_HasAttrString(fun,\"__call__\")) { + tmp = PyObject_GetAttrString(fun,\"__call__\"); + if (PyObject_HasAttrString(tmp,\"im_func\")) + tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); + else { + tmp_fun = fun; /* built-in function */ + Py_INCREF(tmp_fun); + tot = maxnofargs; + if (PyCFunction_Check(fun)) { + /* In case the function has a co_argcount (like on PyPy) */ + di = 0; + } + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + } + Py_XDECREF(tmp); + } + else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else if (F2PyCapsule_Check(fun)) { + tot = maxnofargs; + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + if(ext>0) { + fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); + goto capi_fail; + } + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + } + + if (tmp_fun == NULL) { + fprintf(stderr, + \"Call-back argument must be function|instance|instance.__call__|f2py-function \" + \"but got %s.\\n\", + ((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name)); + goto capi_fail; + } + + if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) { + PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\"); + Py_DECREF(tmp); + if (tmp_argcount == NULL) { + goto capi_fail; + } + tot = PyLong_AsSsize_t(tmp_argcount) - di; + Py_DECREF(tmp_argcount); + } + } + /* Get the number of optional arguments */ + if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) + opt = PyTuple_Size(tmp); + Py_XDECREF(tmp); + } + /* Get the number of extra arguments */ + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + /* Calculate the size of call-backs argument list */ + siz = MIN(maxnofargs+ext,tot); + *nofargs = MAX(0,siz-ext); + +#ifdef DEBUGCFUNCS + fprintf(stderr, + \"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\" + \"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\", + maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs); +#endif + + if (siz < tot-opt) { + fprintf(stderr, + \"create_cb_arglist: Failed to build argument list \" + \"(siz) with enough arguments (tot-opt) required by \" + \"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\", + siz, tot, opt); + goto capi_fail; + } + + /* Initialize argument list */ + *args = (PyTupleObject *)PyTuple_New(siz); + for (i=0;i<*nofargs;i++) { + Py_INCREF(Py_None); + PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None); + } + if (xa != NULL) + for (i=(*nofargs);i 0: + if outneeds[n][0] not in needs: + out.append(outneeds[n][0]) + del outneeds[n][0] + else: + flag = 0 + for k in outneeds[n][1:]: + if k in needs[outneeds[n][0]]: + flag = 1 + break + if flag: + outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] + else: + out.append(outneeds[n][0]) + del outneeds[n][0] + if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ + and outneeds[n] != []: + print(n, saveout) + errmess( + 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') + out = out + saveout + break + saveout = copy.copy(outneeds[n]) + if out == []: + out = [n] + res[n] = out + return res diff --git a/MLPY/Lib/site-packages/numpy/f2py/common_rules.py b/MLPY/Lib/site-packages/numpy/f2py/common_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..64e957af59c7bf690afccc1fa49808d328c01274 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/common_rules.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python3 +""" + +Build common block mechanism for f2py2e. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 10:57:33 $ +Pearu Peterson + +""" +from . import __version__ +f2py_version = __version__.version + +from .auxfuncs import ( + hasbody, hascommon, hasnote, isintent_hide, outmess +) +from . import capi_maps +from . import func2subr +from .crackfortran import rmbadname + + +def findcommonblocks(block, top=1): + ret = [] + if hascommon(block): + for key, value in block['common'].items(): + vars_ = {v: block['vars'][v] for v in value} + ret.append((key, value, vars_)) + elif hasbody(block): + for b in block['body']: + ret = ret + findcommonblocks(b, 0) + if top: + tret = [] + names = [] + for t in ret: + if t[0] not in names: + names.append(t[0]) + tret.append(t) + return tret + return ret + + +def buildhooks(m): + ret = {'commonhooks': [], 'initcommonhooks': [], + 'docs': ['"COMMON blocks:\\n"']} + fwrap = [''] + + def fadd(line, s=fwrap): + s[0] = '%s\n %s' % (s[0], line) + chooks = [''] + + def cadd(line, s=chooks): + s[0] = '%s\n%s' % (s[0], line) + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = '%s\n%s' % (s[0], line) + doc = [''] + + def dadd(line, s=doc): + s[0] = '%s\n%s' % (s[0], line) + for (name, vnames, vars) in findcommonblocks(m): + lower_name = name.lower() + hnames, inames = [], [] + for n in vnames: + if isintent_hide(vars[n]): + hnames.append(n) + else: + inames.append(n) + if hnames: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( + name, ','.join(inames), ','.join(hnames))) + else: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( + name, ','.join(inames))) + fadd('subroutine f2pyinit%s(setupfunc)' % name) + fadd('external setupfunc') + for n in vnames: + fadd(func2subr.var2fixfortran(vars, n)) + if name == '_BLNK_': + fadd('common %s' % (','.join(vnames))) + else: + fadd('common /%s/ %s' % (name, ','.join(vnames))) + fadd('call setupfunc(%s)' % (','.join(inames))) + fadd('end\n') + cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) + idims = [] + for n in inames: + ct = capi_maps.getctype(vars[n]) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, vars[n]) + if dm['dims']: + idims.append('(%s)' % (dm['dims'])) + else: + idims.append('') + dms = dm['dims'].strip() + if not dms: + dms = '-1' + cadd('\t{\"%s\",%s,{{%s}},%s},' % (n, dm['rank'], dms, at)) + cadd('\t{NULL}\n};') + inames1 = rmbadname(inames) + inames1_tps = ','.join(['char *' + s for s in inames1]) + cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) + cadd('\tint i_f2py=0;') + for n in inames1: + cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n)) + cadd('}') + if '_' in lower_name: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' + % (F_FUNC, lower_name, name.upper(), + ','.join(['char*'] * len(inames1)))) + cadd('static void f2py_init_%s(void) {' % name) + cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, lower_name, name.upper(), name)) + cadd('}\n') + iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name)) + iadd('\tF2PyDict_SetItemString(d, \"%s\", tmp);' % name) + iadd('\tPy_DECREF(tmp);') + tname = name.replace('_', '\\_') + dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) + dadd('\\begin{description}') + for n in inames: + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, vars[n]))) + if hasnote(vars[n]): + note = vars[n]['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd('--- %s' % (note)) + dadd('\\end{description}') + ret['docs'].append( + '"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims)))) + ret['commonhooks'] = chooks + ret['initcommonhooks'] = ihooks + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fwrap[0] diff --git a/MLPY/Lib/site-packages/numpy/f2py/crackfortran.py b/MLPY/Lib/site-packages/numpy/f2py/crackfortran.py new file mode 100644 index 0000000000000000000000000000000000000000..767457d286ab4ce19f8911dd28181405c85a361b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/crackfortran.py @@ -0,0 +1,3426 @@ +#!/usr/bin/env python3 +""" +crackfortran --- read fortran (77,90) code and extract declaration information. + +Copyright 1999-2004 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/09/27 07:13:49 $ +Pearu Peterson + + +Usage of crackfortran: +====================== +Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h + -m ,--ignore-contains +Functions: crackfortran, crack2fortran +The following Fortran statements/constructions are supported +(or will be if needed): + block data,byte,call,character,common,complex,contains,data, + dimension,double complex,double precision,end,external,function, + implicit,integer,intent,interface,intrinsic, + logical,module,optional,parameter,private,public, + program,real,(sequence?),subroutine,type,use,virtual, + include,pythonmodule +Note: 'virtual' is mapped to 'dimension'. +Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). +Note: code after 'contains' will be ignored until its scope ends. +Note: 'common' statement is extended: dimensions are moved to variable definitions +Note: f2py directive: f2py is read as +Note: pythonmodule is introduced to represent Python module + +Usage: + `postlist=crackfortran(files)` + `postlist` contains declaration information read from the list of files `files`. + `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file + + `postlist` has the following structure: + *** it is a list of dictionaries containing `blocks': + B = {'block','body','vars','parent_block'[,'name','prefix','args','result', + 'implicit','externals','interfaced','common','sortvars', + 'commonvars','note']} + B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | + 'program' | 'block data' | 'type' | 'pythonmodule' | + 'abstract interface' + B['body'] --- list containing `subblocks' with the same structure as `blocks' + B['parent_block'] --- dictionary of a parent block: + C['body'][]['parent_block'] is C + B['vars'] --- dictionary of variable definitions + B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) + B['name'] --- name of the block (not if B['block']=='interface') + B['prefix'] --- prefix string (only if B['block']=='function') + B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' + B['result'] --- name of the return value (only if B['block']=='function') + B['implicit'] --- dictionary {'a':,'b':...} | None + B['externals'] --- list of variables being external + B['interfaced'] --- list of variables being external and defined + B['common'] --- dictionary of common blocks (list of objects) + B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) + B['from'] --- string showing the 'parents' of the current block + B['use'] --- dictionary of modules used in current block: + {:{['only':<0|1>],['map':{:,...}]}} + B['note'] --- list of LaTeX comments on the block + B['f2pyenhancements'] --- optional dictionary + {'threadsafe':'','fortranname':, + 'callstatement':|, + 'callprotoargument':, + 'usercode':|, + 'pymethoddef:' + } + B['entry'] --- dictionary {entryname:argslist,..} + B['varnames'] --- list of variable names given in the order of reading the + Fortran code, useful for derived types. + B['saved_interface'] --- a string of scanned routine signature, defines explicit interface + *** Variable definition is a dictionary + D = B['vars'][] = + {'typespec'[,'attrspec','kindselector','charselector','=','typename']} + D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | + 'double precision' | 'integer' | 'logical' | 'real' | 'type' + D['attrspec'] --- list of attributes (e.g. 'dimension()', + 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', + 'optional','required', etc) + K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = + 'complex' | 'integer' | 'logical' | 'real' ) + C = D['charselector'] = {['*','len','kind']} + (only if D['typespec']=='character') + D['='] --- initialization expression string + D['typename'] --- name of the type if D['typespec']=='type' + D['dimension'] --- list of dimension bounds + D['intent'] --- list of intent specifications + D['depend'] --- list of variable names on which current variable depends on + D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised + D['note'] --- list of LaTeX comments on the variable + *** Meaning of kind/char selectors (few examples): + D['typespec>']*K['*'] + D['typespec'](kind=K['kind']) + character*C['*'] + character(len=C['len'],kind=C['kind']) + (see also fortran type declaration statement formats below) + +Fortran 90 type declaration statement format (F77 is subset of F90) +==================================================================== +(Main source: IBM XL Fortran 5.1 Language Reference Manual) +type declaration = [[]::] + = byte | + character[] | + complex[] | + double complex | + double precision | + integer[] | + logical[] | + real[] | + type() + = * | + ([len=][,[kind=]]) | + (kind=[,len=]) + = * | + ([kind=]) + = comma separated list of attributes. + Only the following attributes are used in + building up the interface: + external + (parameter --- affects '=' key) + optional + intent + Other attributes are ignored. + = in | out | inout + = comma separated list of dimension bounds. + = [[*][()] | [()]*] + [// | =] [,] + +In addition, the following attributes are used: check,depend,note + +TODO: + * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' + -> 'real x(2)') + The above may be solved by creating appropriate preprocessor program, for example. + +""" +import io +import sys +import string +import fileinput +import re +import os +import copy +import platform + +from . import __version__ + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + + +f2py_version = __version__.version + +# Global flags: +strictf77 = 1 # Ignore `!' comments unless line[0]=='!' +sourcecodeform = 'fix' # 'fix','free' +quiet = 0 # Be verbose if 0 (Obsolete: not used any more) +verbose = 1 # Be quiet if 0, extra verbose if > 1. +tabchar = 4 * ' ' +pyffilename = '' +f77modulename = '' +skipemptyends = 0 # for old F77 programs without 'program' statement +ignorecontains = 1 +dolowercase = 1 +debug = [] + +# Global variables +beginpattern = '' +currentfilename = '' +expectbegin = 1 +f90modulevars = {} +filepositiontext = '' +gotnextfile = 1 +groupcache = None +groupcounter = 0 +grouplist = {groupcounter: []} +groupname = '' +include_paths = [] +neededmodule = -1 +onlyfuncs = [] +previous_context = None +skipblocksuntil = -1 +skipfuncs = [] +skipfunctions = [] +usermodules = [] + + +def reset_global_f2py_vars(): + global groupcounter, grouplist, neededmodule, expectbegin + global skipblocksuntil, usermodules, f90modulevars, gotnextfile + global filepositiontext, currentfilename, skipfunctions, skipfuncs + global onlyfuncs, include_paths, previous_context + global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename + global f77modulename, skipemptyends, ignorecontains, dolowercase, debug + + # flags + strictf77 = 1 + sourcecodeform = 'fix' + quiet = 0 + verbose = 1 + tabchar = 4 * ' ' + pyffilename = '' + f77modulename = '' + skipemptyends = 0 + ignorecontains = 1 + dolowercase = 1 + debug = [] + # variables + groupcounter = 0 + grouplist = {groupcounter: []} + neededmodule = -1 + expectbegin = 1 + skipblocksuntil = -1 + usermodules = [] + f90modulevars = {} + gotnextfile = 1 + filepositiontext = '' + currentfilename = '' + skipfunctions = [] + skipfuncs = [] + onlyfuncs = [] + include_paths = [] + previous_context = None + + +def outmess(line, flag=1): + global filepositiontext + + if not verbose: + return + if not quiet: + if flag: + sys.stdout.write(filepositiontext) + sys.stdout.write(line) + +re._MAXCACHE = 50 +defaultimplicitrules = {} +for c in "abcdefghopqrstuvwxyz$_": + defaultimplicitrules[c] = {'typespec': 'real'} +for c in "ijklmn": + defaultimplicitrules[c] = {'typespec': 'integer'} +del c +badnames = {} +invbadnames = {} +for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', + 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', + 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', + 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', + 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', + 'max', 'min', + 'flen', 'fshape', + 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', + 'type', 'default']: + badnames[n] = n + '_bn' + invbadnames[n + '_bn'] = n + + +def rmbadname1(name): + if name in badnames: + errmess('rmbadname1: Replacing "%s" with "%s".\n' % + (name, badnames[name])) + return badnames[name] + return name + + +def rmbadname(names): + return [rmbadname1(_m) for _m in names] + + +def undo_rmbadname1(name): + if name in invbadnames: + errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' + % (name, invbadnames[name])) + return invbadnames[name] + return name + + +def undo_rmbadname(names): + return [undo_rmbadname1(_m) for _m in names] + + +def getextension(name): + i = name.rfind('.') + if i == -1: + return '' + if '\\' in name[i:]: + return '' + if '/' in name[i:]: + return '' + return name[i + 1:] + +is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search +_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match + + +def is_free_format(file): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = 0 + with open(file, 'r') as f: + line = f.readline() + n = 15 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = 1 + while n > 0 and line: + if line[0] != '!' and line.strip(): + n -= 1 + if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': + result = 1 + break + line = f.readline() + return result + + +# Read fortran (77,90) code +def readfortrancode(ffile, dowithline=show, istop=1): + """ + Read fortran codes from files and + 1) Get rid of comments, line continuations, and empty lines; lower cases. + 2) Call dowithline(line) on every line. + 3) Recursively call itself when statement \"include ''\" is met. + """ + global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 + global beginpattern, quiet, verbose, dolowercase, include_paths + + if not istop: + saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase + if ffile == []: + return + localdolowercase = dolowercase + # cont: set to True when the content of the last line read + # indicates statement continuation + cont = False + finalline = '' + ll = '' + includeline = re.compile( + r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) + cont1 = re.compile(r'(?P.*)&\s*\Z') + cont2 = re.compile(r'(\s*&|)(?P.*)') + mline_mark = re.compile(r".*?'''") + if istop: + dowithline('', -1) + ll, l1 = '', '' + spacedigits = [' '] + [str(_m) for _m in range(10)] + filepositiontext = '' + fin = fileinput.FileInput(ffile) + while True: + l = fin.readline() + if not l: + break + if fin.isfirstline(): + filepositiontext = '' + currentfilename = fin.filename() + gotnextfile = 1 + l1 = l + strictf77 = 0 + sourcecodeform = 'fix' + ext = os.path.splitext(currentfilename)[1] + if is_f_file(currentfilename) and \ + not (_has_f90_header(l) or _has_fix_header(l)): + strictf77 = 1 + elif is_free_format(currentfilename) and not _has_fix_header(l): + sourcecodeform = 'free' + if strictf77: + beginpattern = beginpattern77 + else: + beginpattern = beginpattern90 + outmess('\tReading file %s (format:%s%s)\n' + % (repr(currentfilename), sourcecodeform, + strictf77 and ',strict' or '')) + + l = l.expandtabs().replace('\xa0', ' ') + # Get rid of newline characters + while not l == '': + if l[-1] not in "\n\r\f": + break + l = l[:-1] + if not strictf77: + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + if l.strip() == '': # Skip empty line + if sourcecodeform == 'free': + # In free form, a statement continues in the next line + # that is not a comment line [3.3.2.4^1], lines with + # blanks are comment lines [3.3.2.3^1]. Hence, the + # line continuation flag must retain its state. + pass + else: + # In fixed form, statement continuation is determined + # by a non-blank character at the 6-th position. Empty + # line indicates a start of a new statement + # [3.3.3.3^1]. Hence, the line continuation flag must + # be reset. + cont = False + continue + if sourcecodeform == 'fix': + if l[0] in ['*', 'c', '!', 'C', '#']: + if l[1:5].lower() == 'f2py': # f2py directive + l = ' ' + l[5:] + else: # Skip comment line + cont = False + continue + elif strictf77: + if len(l) > 72: + l = l[:72] + if not (l[0] in spacedigits): + raise Exception('readfortrancode: Found non-(space,digit) char ' + 'in the first column.\n\tAre you sure that ' + 'this code is in fix form?\n\tline=%s' % repr(l)) + + if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): + # Continuation of a previous line + ll = ll + l[6:] + finalline = '' + origfinalline = '' + else: + if not strictf77: + # F90 continuation + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + + elif sourcecodeform == 'free': + if not cont and ext == '.pyf' and mline_mark.match(l): + l = l + '\n' + while True: + lc = fin.readline() + if not lc: + errmess( + 'Unexpected end of file when reading multiline\n') + break + l = l + lc + if mline_mark.match(lc): + break + l = l.rstrip() + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + raise ValueError( + "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [ + os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + l1 = ll + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + filepositiontext = '' + fin.close() + if istop: + dowithline('', 1) + else: + gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase = saveglobals + +# Crack line +beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))' + \ + r'\s*(?P(\b(%s)\b))' + \ + r'\s*(?P%s)\s*\Z' +## +fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' +typespattern = re.compile( + beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' +typespattern4implicit = re.compile(beforethisafter % ( + '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) +# +functionpattern = re.compile(beforethisafter % ( + r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' +subroutinepattern = re.compile(beforethisafter % ( + r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' +# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' +# +groupbegins77 = r'program|block\s*data' +beginpattern77 = re.compile( + beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' +groupbegins90 = groupbegins77 + \ + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|' + \ + r'type(?!\s*\()' +beginpattern90 = re.compile( + beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' +groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' + r'endinterface|endsubroutine|endfunction') +endpattern = re.compile( + beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end' +endifs = r'(end\s*(if|do|where|select|while|forall|associate|block|critical|enum|team))|(module\s*procedure)' +endifpattern = re.compile( + beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif' +# +implicitpattern = re.compile( + beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' +dimensionpattern = re.compile(beforethisafter % ( + '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' +externalpattern = re.compile( + beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' +optionalpattern = re.compile( + beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' +requiredpattern = re.compile( + beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' +publicpattern = re.compile( + beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' +privatepattern = re.compile( + beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' +intrinsicpattern = re.compile( + beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic' +intentpattern = re.compile(beforethisafter % ( + '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' +parameterpattern = re.compile( + beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' +datapattern = re.compile( + beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' +callpattern = re.compile( + beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' +entrypattern = re.compile( + beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' +callfunpattern = re.compile( + beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' +commonpattern = re.compile( + beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' +usepattern = re.compile( + beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' +containspattern = re.compile( + beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' +formatpattern = re.compile( + beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' +# Non-fortran and f2py-specific statements +f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', + 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' +multilinepattern = re.compile( + r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' +## + +def split_by_unquoted(line, characters): + """ + Splits the line into (line[:i], line[i:]), + where i is the index of first occurrence of one of the characters + not within quotes, or len(line) if no such index exists + """ + assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" + r = re.compile( + r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" + r"(?P{char}.*)\Z".format( + not_quoted="[^\"'{}]".format(re.escape(characters)), + char="[{}]".format(re.escape(characters)), + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")')) + m = r.match(line) + if m: + d = m.groupdict() + return (d["before"], d["after"]) + return (line, "") + +def _simplifyargs(argsline): + a = [] + for n in markoutercomma(argsline).split('@,@'): + for r in '(),': + n = n.replace(r, '_') + a.append(n) + return ','.join(a) + +crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) + + +def crackline(line, reset=0): + """ + reset=-1 --- initialize + reset=0 --- crack the line + reset=1 --- final check if mismatch of blocks occurred + + Cracked data is saved in grouplist[0]. + """ + global beginpattern, groupcounter, groupname, groupcache, grouplist + global filepositiontext, currentfilename, neededmodule, expectbegin + global skipblocksuntil, skipemptyends, previous_context, gotnextfile + + _, has_semicolon = split_by_unquoted(line, ";") + if has_semicolon and not (f2pyenhancementspattern[0].match(line) or + multilinepattern[0].match(line)): + # XXX: non-zero reset values need testing + assert reset == 0, repr(reset) + # split line on unquoted semicolons + line, semicolon_line = split_by_unquoted(line, ";") + while semicolon_line: + crackline(line, reset) + line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") + crackline(line, reset) + return + if reset < 0: + groupcounter = 0 + groupname = {groupcounter: ''} + groupcache = {groupcounter: {}} + grouplist = {groupcounter: []} + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = '' + groupcache[groupcounter]['name'] = '' + neededmodule = -1 + skipblocksuntil = -1 + return + if reset > 0: + fl = 0 + if f77modulename and neededmodule == groupcounter: + fl = 2 + while groupcounter > fl: + outmess('crackline: groupcounter=%s groupname=%s\n' % + (repr(groupcounter), repr(groupname))) + outmess( + 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if f77modulename and neededmodule == groupcounter: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end module + neededmodule = -1 + return + if line == '': + return + flag = 0 + for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, + requiredpattern, + parameterpattern, datapattern, publicpattern, privatepattern, + intrinsicpattern, + endifpattern, endpattern, + formatpattern, + beginpattern, functionpattern, subroutinepattern, + implicitpattern, typespattern, commonpattern, + callpattern, usepattern, containspattern, + entrypattern, + f2pyenhancementspattern, + multilinepattern + ]: + m = pat[0].match(line) + if m: + break + flag = flag + 1 + if not m: + re_1 = crackline_re_1 + if 0 <= skipblocksuntil <= groupcounter: + return + if 'externals' in groupcache[groupcounter]: + for name in groupcache[groupcounter]['externals']: + if name in invbadnames: + name = invbadnames[name] + if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: + continue + m1 = re.match( + r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) + if m1: + m2 = re_1.match(m1.group('before')) + a = _simplifyargs(m1.group('args')) + if m2: + line = 'callfun %s(%s) result (%s)' % ( + name, a, m2.group('result')) + else: + line = 'callfun %s(%s)' % (name, a) + m = callfunpattern[0].match(line) + if not m: + outmess( + 'crackline: could not resolve function call for line=%s.\n' % repr(line)) + return + analyzeline(m, 'callfun', line) + return + if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): + previous_context = None + outmess('crackline:%d: No pattern for line\n' % (groupcounter)) + return + elif pat[1] == 'end': + if 0 <= skipblocksuntil < groupcounter: + groupcounter = groupcounter - 1 + if skipblocksuntil <= groupcounter: + return + if groupcounter <= 0: + raise Exception('crackline: groupcounter(=%s) is nonpositive. ' + 'Check the blocks.' + % (groupcounter)) + m1 = beginpattern[0].match((line)) + if (m1) and (not m1.group('this') == groupname[groupcounter]): + raise Exception('crackline: End group %s does not match with ' + 'previous Begin group %s\n\t%s' % + (repr(m1.group('this')), repr(groupname[groupcounter]), + filepositiontext) + ) + if skipblocksuntil == groupcounter: + skipblocksuntil = -1 + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if not skipemptyends: + expectbegin = 1 + elif pat[1] == 'begin': + if 0 <= skipblocksuntil <= groupcounter: + groupcounter = groupcounter + 1 + return + gotnextfile = 0 + analyzeline(m, pat[1], line) + expectbegin = 0 + elif pat[1] == 'endif': + pass + elif pat[1] == 'contains': + if ignorecontains: + return + if 0 <= skipblocksuntil <= groupcounter: + return + skipblocksuntil = groupcounter + else: + if 0 <= skipblocksuntil <= groupcounter: + return + analyzeline(m, pat[1], line) + + +def markouterparen(line): + l = '' + f = 0 + for c in line: + if c == '(': + f = f + 1 + if f == 1: + l = l + '@(@' + continue + elif c == ')': + f = f - 1 + if f == 0: + l = l + '@)@' + continue + l = l + c + return l + + +def markoutercomma(line, comma=','): + l = '' + f = 0 + before, after = split_by_unquoted(line, comma + '()') + l += before + while after: + if (after[0] == comma) and (f == 0): + l += '@' + comma + '@' + else: + l += after[0] + if after[0] == '(': + f += 1 + elif after[0] == ')': + f -= 1 + before, after = split_by_unquoted(after[1:], comma + '()') + l += before + assert not f, repr((f, line, l)) + return l + +def unmarkouterparen(line): + r = line.replace('@(@', '(').replace('@)@', ')') + return r + + +def appenddecl(decl, decl2, force=1): + if not decl: + decl = {} + if not decl2: + return decl + if decl is decl2: + return decl + for k in list(decl2.keys()): + if k == 'typespec': + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'attrspec': + for l in decl2[k]: + decl = setattrspec(decl, l, force) + elif k == 'kindselector': + decl = setkindselector(decl, decl2[k], force) + elif k == 'charselector': + decl = setcharselector(decl, decl2[k], force) + elif k in ['=', 'typename']: + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'note': + pass + elif k in ['intent', 'check', 'dimension', 'optional', 'required']: + errmess('appenddecl: "%s" not implemented.\n' % k) + else: + raise Exception('appenddecl: Unknown variable definition key:' + + str(k)) + return decl + +selectpattern = re.compile( + r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) +nameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P.*)\s*@\)@))*\s*\Z', re.I) +callnameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) +real16pattern = re.compile( + r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') +real8pattern = re.compile( + r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') + +_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) + + +def _is_intent_callback(vdecl): + for a in vdecl.get('attrspec', []): + if _intentcallbackpattern.match(a): + return 1 + return 0 + + +def _resolvenameargspattern(line): + line = markouterparen(line) + m1 = nameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') + m1 = callnameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), None, None + return None, [], None, None + + +def analyzeline(m, case, line): + global groupcounter, groupname, groupcache, grouplist, filepositiontext + global currentfilename, f77modulename, neededinterface, neededmodule + global expectbegin, gotnextfile, previous_context + + block = m.group('this') + if case != 'multiline': + previous_context = None + if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ + and not skipemptyends and groupcounter < 1: + newname = os.path.basename(currentfilename).split('.')[0] + outmess( + 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) + gotnextfile = 0 + groupcounter = groupcounter + 1 + groupname[groupcounter] = 'program' + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = 'program' + groupcache[groupcounter]['name'] = newname + groupcache[groupcounter]['from'] = 'fromsky' + expectbegin = 0 + if case in ['begin', 'call', 'callfun']: + # Crack line => block,name,args,result + block = block.lower() + if re.match(r'block\s*data', block, re.I): + block = 'block data' + elif re.match(r'python\s*module', block, re.I): + block = 'python module' + elif re.match(r'abstract\s*interface', block, re.I): + block = 'abstract interface' + name, args, result, bind = _resolvenameargspattern(m.group('after')) + if name is None: + if block == 'block data': + name = '_BLOCK_DATA_' + else: + name = '' + if block not in ['interface', 'block data', 'abstract interface']: + outmess('analyzeline: No name/args pattern found for line.\n') + + previous_context = (block, name, groupcounter) + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + if '' in args: + while '' in args: + args.remove('') + outmess( + 'analyzeline: argument list is malformed (missing argument).\n') + + # end of crack line => block,name,args,result + needmodule = 0 + needinterface = 0 + + if case in ['call', 'callfun']: + needinterface = 1 + if 'args' not in groupcache[groupcounter]: + return + if name not in groupcache[groupcounter]['args']: + return + for it in grouplist[groupcounter]: + if it['name'] == name: + return + if name in groupcache[groupcounter]['interfaced']: + return + block = {'call': 'subroutine', 'callfun': 'function'}[case] + if f77modulename and neededmodule == -1 and groupcounter <= 1: + neededmodule = groupcounter + 2 + needmodule = 1 + if block not in ['interface', 'abstract interface']: + needinterface = 1 + # Create new block(s) + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needmodule: + if verbose > 1: + outmess('analyzeline: Creating module block %s\n' % + repr(f77modulename), 0) + groupname[groupcounter] = 'module' + groupcache[groupcounter]['block'] = 'python module' + groupcache[groupcounter]['name'] = f77modulename + groupcache[groupcounter]['from'] = '' + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needinterface: + if verbose > 1: + outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( + groupcounter), 0) + groupname[groupcounter] = 'interface' + groupcache[groupcounter]['block'] = 'interface' + groupcache[groupcounter]['name'] = 'unknown_interface' + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupname[groupcounter] = block + groupcache[groupcounter]['block'] = block + if not name: + name = 'unknown_' + block.replace(' ', '_') + groupcache[groupcounter]['prefix'] = m.group('before') + groupcache[groupcounter]['name'] = rmbadname1(name) + groupcache[groupcounter]['result'] = result + if groupcounter == 1: + groupcache[groupcounter]['from'] = currentfilename + else: + if f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) + else: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + for k in list(groupcache[groupcounter].keys()): + if not groupcache[groupcounter][k]: + del groupcache[groupcounter][k] + + groupcache[groupcounter]['args'] = args + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['entry'] = {} + # end of creation + if block == 'type': + groupcache[groupcounter]['varnames'] = [] + + if case in ['call', 'callfun']: # set parents variables + if name not in groupcache[groupcounter - 2]['externals']: + groupcache[groupcounter - 2]['externals'].append(name) + groupcache[groupcounter]['vars'] = copy.deepcopy( + groupcache[groupcounter - 2]['vars']) + try: + del groupcache[groupcounter]['vars'][name][ + groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] + except Exception: + pass + if block in ['function', 'subroutine']: # set global attributes + try: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) + except Exception: + pass + if case == 'callfun': # return type + if result and result in groupcache[groupcounter]['vars']: + if not name == result: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) + # if groupcounter>1: # name is interfaced + try: + groupcache[groupcounter - 2]['interfaced'].append(name) + except Exception: + pass + if block == 'function': + t = typespattern[0].match(m.group('before') + ' ' + name) + if t: + typespec, selector, attr, edecl = cracktypespec0( + t.group('this'), t.group('after')) + updatevars(typespec, selector, attr, edecl) + + if case in ['call', 'callfun']: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end routine + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + + elif case == 'entry': + name, args, result, bind = _resolvenameargspattern(m.group('after')) + if name is not None: + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + assert result is None, repr(result) + groupcache[groupcounter]['entry'][name] = args + previous_context = ('entry', name, groupcounter) + elif case == 'type': + typespec, selector, attr, edecl = cracktypespec0( + block, m.group('after')) + last_name = updatevars(typespec, selector, attr, edecl) + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']: + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip() + i = ll.find('::') + if i < 0 and case == 'intent': + i = markouterparen(ll).find('@)@') - 2 + ll = ll[:i + 1] + '::' + ll[i + 1:] + i = ll.find('::') + if ll[i:] == '::' and 'args' in groupcache[groupcounter]: + outmess('All arguments will have attribute %s%s\n' % + (m.group('this'), ll[:i])) + ll = ll + ','.join(groupcache[groupcounter]['args']) + if i < 0: + i = 0 + pl = '' + else: + pl = ll[:i].strip() + ll = ll[i + 2:] + ch = markoutercomma(pl).split('@,@') + if len(ch) > 1: + pl = ch[0] + outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( + ','.join(ch[1:]))) + last_name = None + + for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: + m1 = namepattern.match(e) + if not m1: + if case in ['public', 'private']: + k = '' + else: + print(m.groupdict()) + outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( + case, repr(e))) + continue + else: + k = rmbadname1(m1.group('name')) + if k not in edecl: + edecl[k] = {} + if case == 'dimension': + ap = case + m1.group('after') + if case == 'intent': + ap = m.group('this') + pl + if _intentcallbackpattern.match(ap): + if k not in groupcache[groupcounter]['args']: + if groupcounter > 1: + if '__user__' not in groupcache[groupcounter - 2]['name']: + outmess( + 'analyzeline: missing __user__ module (could be nothing)\n') + # fixes ticket 1693 + if k != groupcache[groupcounter]['name']: + outmess('analyzeline: appending intent(callback) %s' + ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + groupcache[groupcounter]['args'].append(k) + else: + errmess( + 'analyzeline: intent(callback) %s is ignored' % (k)) + else: + errmess('analyzeline: intent(callback) %s is already' + ' in argument list' % (k)) + if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']: + ap = case + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append(ap) + else: + edecl[k]['attrspec'] = [ap] + if case == 'external': + if groupcache[groupcounter]['block'] == 'program': + outmess('analyzeline: ignoring program arguments\n') + continue + if k not in groupcache[groupcounter]['args']: + continue + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(k) + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'parameter': + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip()[1:-1] + last_name = None + for e in markoutercomma(ll).split('@,@'): + try: + k, initexpr = [x.strip() for x in e.split('=')] + except Exception: + outmess( + 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) + continue + params = get_parameters(edecl) + k = rmbadname1(k) + if k not in edecl: + edecl[k] = {} + if '=' in edecl[k] and (not edecl[k]['='] == initexpr): + outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( + k, edecl[k]['='], initexpr)) + t = determineexprtype(initexpr, params) + if t: + if t.get('typespec') == 'real': + tt = list(initexpr) + for m in real16pattern.finditer(initexpr): + tt[m.start():m.end()] = list( + initexpr[m.start():m.end()].lower().replace('d', 'e')) + initexpr = ''.join(tt) + elif t.get('typespec') == 'complex': + initexpr = initexpr[1:].lower().replace('d', 'e').\ + replace(',', '+1j*(') + try: + v = eval(initexpr, {}, params) + except (SyntaxError, NameError, TypeError) as msg: + errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' + % (initexpr, msg)) + continue + edecl[k]['='] = repr(v) + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append('parameter') + else: + edecl[k]['attrspec'] = ['parameter'] + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'implicit': + if m.group('after').strip().lower() == 'none': + groupcache[groupcounter]['implicit'] = None + elif m.group('after'): + if 'implicit' in groupcache[groupcounter]: + impl = groupcache[groupcounter]['implicit'] + else: + impl = {} + if impl is None: + outmess( + 'analyzeline: Overwriting earlier "implicit none" statement.\n') + impl = {} + for e in markoutercomma(m.group('after')).split('@,@'): + decl = {} + m1 = re.match( + r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) + if not m1: + outmess( + 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) + continue + m2 = typespattern4implicit.match(m1.group('this')) + if not m2: + outmess( + 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) + continue + typespec, selector, attr, edecl = cracktypespec0( + m2.group('this'), m2.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + decl['typespec'] = typespec + decl['kindselector'] = kindselect + decl['charselector'] = charselect + decl['typename'] = typename + for k in list(decl.keys()): + if not decl[k]: + del decl[k] + for r in markoutercomma(m1.group('after')).split('@,@'): + if '-' in r: + try: + begc, endc = [x.strip() for x in r.split('-')] + except Exception: + outmess( + 'analyzeline: expected "-" instead of "%s" in range list of implicit statement\n' % r) + continue + else: + begc = endc = r.strip() + if not len(begc) == len(endc) == 1: + outmess( + 'analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n' % r) + continue + for o in range(ord(begc), ord(endc) + 1): + impl[chr(o)] = decl + groupcache[groupcounter]['implicit'] = impl + elif case == 'data': + ll = [] + dl = '' + il = '' + f = 0 + fc = 1 + inp = 0 + for c in m.group('after'): + if not inp: + if c == "'": + fc = not fc + if c == '/' and fc: + f = f + 1 + continue + if c == '(': + inp = inp + 1 + elif c == ')': + inp = inp - 1 + if f == 0: + dl = dl + c + elif f == 1: + il = il + c + elif f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + dl = c + il = '' + f = 0 + if f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + vars = {} + if 'vars' in groupcache[groupcounter]: + vars = groupcache[groupcounter]['vars'] + last_name = None + for l in ll: + l = [x.strip() for x in l] + if l[0][0] == ',': + l[0] = l[0][1:] + if l[0][0] == '(': + outmess( + 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) + continue + i = 0 + j = 0 + llen = len(l[1]) + for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): + if v[0] == '(': + outmess( + 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) + # XXX: subsequent init expressions may get wrong values. + # Ignoring since data statements are irrelevant for + # wrapping. + continue + fc = 0 + while (i < llen) and (fc or not l[1][i] == ','): + if l[1][i] == "'": + fc = not fc + i = i + 1 + i = i + 1 + if v not in vars: + vars[v] = {} + if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]: + outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % ( + v, vars[v]['='], l[1][j:i - 1])) + vars[v]['='] = l[1][j:i - 1] + j = i + last_name = v + groupcache[groupcounter]['vars'] = vars + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'common': + line = m.group('after').strip() + if not line[0] == '/': + line = '//' + line + cl = [] + f = 0 + bn = '' + ol = '' + for c in line: + if c == '/': + f = f + 1 + continue + if f >= 3: + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + f = f - 2 + bn = '' + ol = '' + if f % 2: + bn = bn + c + else: + ol = ol + c + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + commonkey = {} + if 'common' in groupcache[groupcounter]: + commonkey = groupcache[groupcounter]['common'] + for c in cl: + if c[0] not in commonkey: + commonkey[c[0]] = [] + for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: + if i: + commonkey[c[0]].append(i) + groupcache[groupcounter]['common'] = commonkey + previous_context = ('common', bn, groupcounter) + elif case == 'use': + m1 = re.match( + r'\A\s*(?P\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + if m1: + mm = m1.groupdict() + if 'use' not in groupcache[groupcounter]: + groupcache[groupcounter]['use'] = {} + name = m1.group('name') + groupcache[groupcounter]['use'][name] = {} + isonly = 0 + if 'list' in mm and mm['list'] is not None: + if 'notonly' in mm and mm['notonly'] is None: + isonly = 1 + groupcache[groupcounter]['use'][name]['only'] = isonly + ll = [x.strip() for x in mm['list'].split(',')] + rl = {} + for l in ll: + if '=' in l: + m2 = re.match( + r'\A\s*(?P\b\w+\b)\s*=\s*>\s*(?P\b\w+\b)\s*\Z', l, re.I) + if m2: + rl[m2.group('local').strip()] = m2.group( + 'use').strip() + else: + outmess( + 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) + else: + rl[l] = l + groupcache[groupcounter]['use'][name]['map'] = rl + else: + pass + else: + print(m.groupdict()) + outmess('analyzeline: Could not crack the use statement.\n') + elif case in ['f2pyenhancements']: + if 'f2pyenhancements' not in groupcache[groupcounter]: + groupcache[groupcounter]['f2pyenhancements'] = {} + d = groupcache[groupcounter]['f2pyenhancements'] + if m.group('this') == 'usercode' and 'usercode' in d: + if isinstance(d['usercode'], str): + d['usercode'] = [d['usercode']] + d['usercode'].append(m.group('after')) + else: + d[m.group('this')] = m.group('after') + elif case == 'multiline': + if previous_context is None: + if verbose: + outmess('analyzeline: No context for multiline block.\n') + return + gc = groupcounter + appendmultiline(groupcache[gc], + previous_context[:2], + m.group('this')) + else: + if verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') + + +def appendmultiline(group, context_name, ml): + if 'f2pymultilines' not in group: + group['f2pymultilines'] = {} + d = group['f2pymultilines'] + if context_name not in d: + d[context_name] = [] + d[context_name].append(ml) + return + + +def cracktypespec0(typespec, ll): + selector = None + attr = None + if re.match(r'double\s*complex', typespec, re.I): + typespec = 'double complex' + elif re.match(r'double\s*precision', typespec, re.I): + typespec = 'double precision' + else: + typespec = typespec.strip().lower() + m1 = selectpattern.match(markouterparen(ll)) + if not m1: + outmess( + 'cracktypespec0: no kind/char_selector pattern found for line.\n') + return + d = m1.groupdict() + for k in list(d.keys()): + d[k] = unmarkouterparen(d[k]) + if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: + selector = d['this'] + ll = d['after'] + i = ll.find('::') + if i >= 0: + attr = ll[:i].strip() + ll = ll[i + 2:] + return typespec, selector, attr, ll +##### +namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) +kindselector = re.compile( + r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|\*\s*(?P.*?))\s*\Z', re.I) +charselector = re.compile( + r'\s*(\((?P.*)\)|\*\s*(?P.*))\s*\Z', re.I) +lenkindpattern = re.compile( + r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)|))\s*\Z', re.I) +lenarraypattern = re.compile( + r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*\*\s*(?P.*?)|(\*\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) + + +def removespaces(expr): + expr = expr.strip() + if len(expr) <= 1: + return expr + expr2 = expr[0] + for i in range(1, len(expr) - 1): + if (expr[i] == ' ' and + ((expr[i + 1] in "()[]{}=+-/* ") or + (expr[i - 1] in "()[]{}=+-/* "))): + continue + expr2 = expr2 + expr[i] + expr2 = expr2 + expr[-1] + return expr2 + + +def markinnerspaces(line): + l = '' + f = 0 + cc = '\'' + cb = '' + for c in line: + if cb == '\\' and c in ['\\', '\'', '"']: + l = l + c + cb = c + continue + if f == 0 and c in ['\'', '"']: + cc = c + if c == cc: + f = f + 1 + elif c == cc: + f = f - 1 + elif c == ' ' and f == 1: + l = l + '@_@' + continue + l = l + c + cb = c + return l + + +def updatevars(typespec, selector, attrspec, entitydecl): + global groupcache, groupcounter + + last_name = None + kindselect, charselect, typename = cracktypespec(typespec, selector) + if attrspec: + attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] + l = [] + c = re.compile(r'(?P[a-zA-Z]+)') + for a in attrspec: + if not a: + continue + m = c.match(a) + if m: + s = m.group('start').lower() + a = s + a[len(s):] + l.append(a) + attrspec = l + el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] + el1 = [] + for e in el: + for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: + if e1: + el1.append(e1.replace('@_@', ' ')) + for e in el1: + m = namepattern.match(e) + if not m: + outmess( + 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) + continue + ename = rmbadname1(m.group('name')) + edecl = {} + if ename in groupcache[groupcounter]['vars']: + edecl = groupcache[groupcounter]['vars'][ename].copy() + not_has_typespec = 'typespec' not in edecl + if not_has_typespec: + edecl['typespec'] = typespec + elif typespec and (not typespec == edecl['typespec']): + outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typespec'], typespec)) + if 'kindselector' not in edecl: + edecl['kindselector'] = copy.copy(kindselect) + elif kindselect: + for k in list(kindselect.keys()): + if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): + outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['kindselector'][k], kindselect[k])) + else: + edecl['kindselector'][k] = copy.copy(kindselect[k]) + if 'charselector' not in edecl and charselect: + if not_has_typespec: + edecl['charselector'] = charselect + else: + errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' + % (ename, charselect)) + elif charselect: + for k in list(charselect.keys()): + if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): + outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['charselector'][k], charselect[k])) + else: + edecl['charselector'][k] = copy.copy(charselect[k]) + if 'typename' not in edecl: + edecl['typename'] = typename + elif typename and (not edecl['typename'] == typename): + outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typename'], typename)) + if 'attrspec' not in edecl: + edecl['attrspec'] = copy.copy(attrspec) + elif attrspec: + for a in attrspec: + if a not in edecl['attrspec']: + edecl['attrspec'].append(a) + else: + edecl['typespec'] = copy.copy(typespec) + edecl['kindselector'] = copy.copy(kindselect) + edecl['charselector'] = copy.copy(charselect) + edecl['typename'] = typename + edecl['attrspec'] = copy.copy(attrspec) + if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']: + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(e) + if m.group('after'): + m1 = lenarraypattern.match(markouterparen(m.group('after'))) + if m1: + d1 = m1.groupdict() + for lk in ['len', 'array', 'init']: + if d1[lk + '2'] is not None: + d1[lk] = d1[lk + '2'] + del d1[lk + '2'] + for k in list(d1.keys()): + if d1[k] is not None: + d1[k] = unmarkouterparen(d1[k]) + else: + del d1[k] + if 'len' in d1 and 'array' in d1: + if d1['len'] == '': + d1['len'] = d1['array'] + del d1['array'] + else: + d1['array'] = d1['array'] + ',' + d1['len'] + del d1['len'] + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( + typespec, e, typespec, ename, d1['array'])) + if 'array' in d1: + dm = 'dimension(%s)' % d1['array'] + if 'attrspec' not in edecl or (not edecl['attrspec']): + edecl['attrspec'] = [dm] + else: + edecl['attrspec'].append(dm) + for dm1 in edecl['attrspec']: + if dm1[:9] == 'dimension' and dm1 != dm: + del edecl['attrspec'][-1] + errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' + % (ename, dm1, dm)) + break + + if 'len' in d1: + if typespec in ['complex', 'integer', 'logical', 'real']: + if ('kindselector' not in edecl) or (not edecl['kindselector']): + edecl['kindselector'] = {} + edecl['kindselector']['*'] = d1['len'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + if 'init' in d1: + if '=' in edecl and (not edecl['='] == d1['init']): + outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['='], d1['init'])) + else: + edecl['='] = d1['init'] + else: + outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( + ename + m.group('after'))) + for k in list(edecl.keys()): + if not edecl[k]: + del edecl[k] + groupcache[groupcounter]['vars'][ename] = edecl + if 'varnames' in groupcache[groupcounter]: + groupcache[groupcounter]['varnames'].append(ename) + last_name = ename + return last_name + + +def cracktypespec(typespec, selector): + kindselect = None + charselect = None + typename = None + if selector: + if typespec in ['complex', 'integer', 'logical', 'real']: + kindselect = kindselector.match(selector) + if not kindselect: + outmess( + 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) + return + kindselect = kindselect.groupdict() + kindselect['*'] = kindselect['kind2'] + del kindselect['kind2'] + for k in list(kindselect.keys()): + if not kindselect[k]: + del kindselect[k] + for k, i in list(kindselect.items()): + kindselect[k] = rmbadname1(i) + elif typespec == 'character': + charselect = charselector.match(selector) + if not charselect: + outmess( + 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) + return + charselect = charselect.groupdict() + charselect['*'] = charselect['charlen'] + del charselect['charlen'] + if charselect['lenkind']: + lenkind = lenkindpattern.match( + markoutercomma(charselect['lenkind'])) + lenkind = lenkind.groupdict() + for lk in ['len', 'kind']: + if lenkind[lk + '2']: + lenkind[lk] = lenkind[lk + '2'] + charselect[lk] = lenkind[lk] + del lenkind[lk + '2'] + del charselect['lenkind'] + for k in list(charselect.keys()): + if not charselect[k]: + del charselect[k] + for k, i in list(charselect.items()): + charselect[k] = rmbadname1(i) + elif typespec == 'type': + typename = re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) + if typename: + typename = typename.group('name') + else: + outmess('cracktypespec: no typename found in %s\n' % + (repr(typespec + selector))) + else: + outmess('cracktypespec: no selector used for %s\n' % + (repr(selector))) + return kindselect, charselect, typename +###### + + +def setattrspec(decl, attr, force=0): + if not decl: + decl = {} + if not attr: + return decl + if 'attrspec' not in decl: + decl['attrspec'] = [attr] + return decl + if force: + decl['attrspec'].append(attr) + if attr in decl['attrspec']: + return decl + if attr == 'static' and 'automatic' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'automatic' and 'static' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'public': + if 'private' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'private': + if 'public' not in decl['attrspec']: + decl['attrspec'].append(attr) + else: + decl['attrspec'].append(attr) + return decl + + +def setkindselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'kindselector' not in decl: + decl['kindselector'] = sel + return decl + for k in list(sel.keys()): + if force or k not in decl['kindselector']: + decl['kindselector'][k] = sel[k] + return decl + + +def setcharselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'charselector' not in decl: + decl['charselector'] = sel + return decl + for k in list(sel.keys()): + if force or k not in decl['charselector']: + decl['charselector'][k] = sel[k] + return decl + + +def getblockname(block, unknown='unknown'): + if 'name' in block: + return block['name'] + return unknown + +# post processing + + +def setmesstext(block): + global filepositiontext + + try: + filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) + except Exception: + pass + + +def get_usedict(block): + usedict = {} + if 'parent_block' in block: + usedict = get_usedict(block['parent_block']) + if 'use' in block: + usedict.update(block['use']) + return usedict + + +def get_useparameters(block, param_map=None): + global f90modulevars + + if param_map is None: + param_map = {} + usedict = get_usedict(block) + if not usedict: + return param_map + for usename, mapping in list(usedict.items()): + usename = usename.lower() + if usename not in f90modulevars: + outmess('get_useparameters: no module %s info used by %s\n' % + (usename, block.get('name'))) + continue + mvars = f90modulevars[usename] + params = get_parameters(mvars) + if not params: + continue + # XXX: apply mapping + if mapping: + errmess('get_useparameters: mapping for %s not impl.' % (mapping)) + for k, v in list(params.items()): + if k in param_map: + outmess('get_useparameters: overriding parameter %s with' + ' value from module %s' % (repr(k), repr(usename))) + param_map[k] = v + + return param_map + + +def postcrack2(block, tab='', param_map=None): + global f90modulevars + + if not f90modulevars: + return block + if isinstance(block, list): + ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) + for g in block] + return ret + setmesstext(block) + outmess('%sBlock: %s\n' % (tab, block['name']), 0) + + if param_map is None: + param_map = get_useparameters(block) + + if param_map is not None and 'vars' in block: + vars = block['vars'] + for n in list(vars.keys()): + var = vars[n] + if 'kindselector' in var: + kind = var['kindselector'] + if 'kind' in kind: + val = kind['kind'] + if val in param_map: + kind['kind'] = param_map[val] + new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) + for b in block['body']] + block['body'] = new_body + + return block + + +def postcrack(block, args=None, tab=''): + """ + TODO: + function return values + determine expression types if in argument list + """ + global usermodules, onlyfunctions + + if isinstance(block, list): + gret = [] + uret = [] + for g in block: + setmesstext(g) + g = postcrack(g, tab=tab + '\t') + # sort user routines to appear first + if 'name' in g and '__user__' in g['name']: + uret.append(g) + else: + gret.append(g) + return uret + gret + setmesstext(block) + if not isinstance(block, dict) and 'block' not in block: + raise Exception('postcrack: Expected block dictionary instead of ' + + str(block)) + if 'name' in block and not block['name'] == 'unknown_interface': + outmess('%sBlock: %s\n' % (tab, block['name']), 0) + block = analyzeargs(block) + block = analyzecommon(block) + block['vars'] = analyzevars(block) + block['sortvars'] = sortvarnames(block['vars']) + if 'args' in block and block['args']: + args = block['args'] + block['body'] = analyzebody(block, args, tab=tab) + + userisdefined = [] + if 'use' in block: + useblock = block['use'] + for k in list(useblock.keys()): + if '__user__' in k: + userisdefined.append(k) + else: + useblock = {} + name = '' + if 'name' in block: + name = block['name'] + # and not userisdefined: # Build a __user__ module + if 'externals' in block and block['externals']: + interfaced = [] + if 'interfaced' in block: + interfaced = block['interfaced'] + mvars = copy.copy(block['vars']) + if name: + mname = name + '__user__routines' + else: + mname = 'unknown__user__routines' + if mname in userisdefined: + i = 1 + while '%s_%i' % (mname, i) in userisdefined: + i = i + 1 + mname = '%s_%i' % (mname, i) + interface = {'block': 'interface', 'body': [], + 'vars': {}, 'name': name + '_user_interface'} + for e in block['externals']: + if e in interfaced: + edef = [] + j = -1 + for b in block['body']: + j = j + 1 + if b['block'] == 'interface': + i = -1 + for bb in b['body']: + i = i + 1 + if 'name' in bb and bb['name'] == e: + edef = copy.copy(bb) + del b['body'][i] + break + if edef: + if not b['body']: + del block['body'][j] + del interfaced[interfaced.index(e)] + break + interface['body'].append(edef) + else: + if e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] + if interface['vars'] or interface['body']: + block['interfaced'] = interfaced + mblock = {'block': 'python module', 'body': [ + interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} + useblock[mname] = {} + usermodules.append(mblock) + if useblock: + block['use'] = useblock + return block + + +def sortvarnames(vars): + indep = [] + dep = [] + for v in list(vars.keys()): + if 'depend' in vars[v] and vars[v]['depend']: + dep.append(v) + else: + indep.append(v) + n = len(dep) + i = 0 + while dep: # XXX: How to catch dependence cycles correctly? + v = dep[0] + fl = 0 + for w in dep[1:]: + if w in vars[v]['depend']: + fl = 1 + break + if fl: + dep = dep[1:] + [v] + i = i + 1 + if i > n: + errmess('sortvarnames: failed to compute dependencies because' + ' of cyclic dependencies between ' + + ', '.join(dep) + '\n') + indep = indep + dep + break + else: + indep.append(v) + dep = dep[1:] + n = len(dep) + i = 0 + return indep + + +def analyzecommon(block): + if not hascommon(block): + return block + commonvars = [] + for k in list(block['common'].keys()): + comvars = [] + for e in block['common'][k]: + m = re.match( + r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) + if m: + dims = [] + if m.group('dims'): + dims = [x.strip() + for x in markoutercomma(m.group('dims')).split('@,@')] + n = rmbadname1(m.group('name').strip()) + if n in block['vars']: + if 'attrspec' in block['vars'][n]: + block['vars'][n]['attrspec'].append( + 'dimension(%s)' % (','.join(dims))) + else: + block['vars'][n]['attrspec'] = [ + 'dimension(%s)' % (','.join(dims))] + else: + if dims: + block['vars'][n] = { + 'attrspec': ['dimension(%s)' % (','.join(dims))]} + else: + block['vars'][n] = {} + if n not in commonvars: + commonvars.append(n) + else: + n = e + errmess( + 'analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n' % (e, k)) + comvars.append(n) + block['common'][k] = comvars + if 'commonvars' not in block: + block['commonvars'] = commonvars + else: + block['commonvars'] = block['commonvars'] + commonvars + return block + + +def analyzebody(block, args, tab=''): + global usermodules, skipfuncs, onlyfuncs, f90modulevars + + setmesstext(block) + body = [] + for b in block['body']: + b['parent_block'] = block + if b['block'] in ['function', 'subroutine']: + if args is not None and b['name'] not in args: + continue + else: + as_ = b['args'] + if b['name'] in skipfuncs: + continue + if onlyfuncs and b['name'] not in onlyfuncs: + continue + b['saved_interface'] = crack2fortrangen( + b, '\n' + ' ' * 6, as_interface=True) + + else: + as_ = args + b = postcrack(b, as_, tab=tab + '\t') + if b['block'] in ['interface', 'abstract interface'] and not b['body']: + if 'f2pyenhancements' not in b: + continue + if b['block'].replace(' ', '') == 'pythonmodule': + usermodules.append(b) + else: + if b['block'] == 'module': + f90modulevars[b['name']] = b['vars'] + body.append(b) + return body + + +def buildimplicitrules(block): + setmesstext(block) + implicitrules = defaultimplicitrules + attrrules = {} + if 'implicit' in block: + if block['implicit'] is None: + implicitrules = None + if verbose > 1: + outmess( + 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) + else: + for k in list(block['implicit'].keys()): + if block['implicit'][k].get('typespec') not in ['static', 'automatic']: + implicitrules[k] = block['implicit'][k] + else: + attrrules[k] = block['implicit'][k]['typespec'] + return implicitrules, attrrules + + +def myeval(e, g=None, l=None): + """ Like `eval` but returns only integers and floats """ + r = eval(e, g, l) + if type(r) in [int, float]: + return r + raise ValueError('r=%r' % (r)) + +getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) + + +def getlincoef(e, xset): # e = a*x+b ; x in xset + """ + Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in + xset. + + >>> getlincoef('2*x + 1', {'x'}) + (2, 1, 'x') + >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) + (5, 3, 'x') + >>> getlincoef('0', {'x'}) + (0, 0, None) + >>> getlincoef('0*x', {'x'}) + (0, 0, 'x') + >>> getlincoef('x*x', {'x'}) + (None, None, None) + + This can be tricked by sufficiently complex expressions + + >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) + (2.0, 3.0, 'x') + """ + try: + c = int(myeval(e, {}, {})) + return 0, c, None + except Exception: + pass + if getlincoef_re_1.match(e): + return 1, 0, e + len_e = len(e) + for x in xset: + if len(x) > len_e: + continue + if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): + # skip function calls having x as an argument, e.g max(1, x) + continue + re_1 = re.compile(r'(?P.*?)\b' + x + r'\b(?P.*)', re.I) + m = re_1.match(e) + if m: + try: + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 0, m1.group('after')) + m1 = re_1.match(ee) + b = myeval(ee, {}, {}) + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 1, m1.group('after')) + m1 = re_1.match(ee) + a = myeval(ee, {}, {}) - b + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 0.5, m1.group('after')) + m1 = re_1.match(ee) + c = myeval(ee, {}, {}) + # computing another point to be sure that expression is linear + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 1.5, m1.group('after')) + m1 = re_1.match(ee) + c2 = myeval(ee, {}, {}) + if (a * 0.5 + b == c and a * 1.5 + b == c2): + return a, b, x + except Exception: + pass + break + return None, None, None + +_varname_match = re.compile(r'\A[a-z]\w*\Z').match + + +def getarrlen(dl, args, star='*'): + """ + Parameters + ---------- + dl : sequence of two str objects + dimensions of the array + args : Iterable[str] + symbols used in the expression + star : Any + unused + + Returns + ------- + expr : str + Some numeric expression as a string + arg : Optional[str] + If understood, the argument from `args` present in `expr` + expr2 : Optional[str] + If understood, an expression fragment that should be used as + ``"(%s%s".format(something, expr2)``. + + Examples + -------- + >>> getarrlen(['10*x + 20', '40*x'], {'x'}) + ('30 * x - 19', 'x', '+19)/(30)') + >>> getarrlen(['1', '10*x + 20'], {'x'}) + ('10 * x + 20', 'x', '-20)/(10)') + >>> getarrlen(['10*x + 20', '1'], {'x'}) + ('-10 * x - 18', 'x', '+18)/(-10)') + >>> getarrlen(['20', '1'], {'x'}) + ('-18', None, None) + """ + edl = [] + try: + edl.append(myeval(dl[0], {}, {})) + except Exception: + edl.append(dl[0]) + try: + edl.append(myeval(dl[1], {}, {})) + except Exception: + edl.append(dl[1]) + if isinstance(edl[0], int): + p1 = 1 - edl[0] + if p1 == 0: + d = str(dl[1]) + elif p1 < 0: + d = '%s-%s' % (dl[1], -p1) + else: + d = '%s+%s' % (dl[1], p1) + elif isinstance(edl[1], int): + p1 = 1 + edl[1] + if p1 == 0: + d = '-(%s)' % (dl[0]) + else: + d = '%s-(%s)' % (p1, dl[0]) + else: + d = '%s-(%s)+1' % (dl[1], dl[0]) + try: + return repr(myeval(d, {}, {})), None, None + except Exception: + pass + d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args) + if None not in [d1[0], d2[0]]: + if (d1[0], d2[0]) == (0, 0): + return repr(d2[1] - d1[1] + 1), None, None + b = d2[1] - d1[1] + 1 + d1 = (d1[0], 0, d1[2]) + d2 = (d2[0], b, d2[2]) + if d1[0] == 0 and d2[2] in args: + if b < 0: + return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0]) + elif b: + return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0]) + else: + return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0]) + if d2[0] == 0 and d1[2] in args: + + if b < 0: + return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0]) + elif b: + return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0]) + else: + return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0]) + if d1[2] == d2[2] and d1[2] in args: + a = d2[0] - d1[0] + if not a: + return repr(b), None, None + if b < 0: + return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a) + elif b: + return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a) + else: + return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a) + if d1[0] == d2[0] == 1: + c = str(d1[2]) + if c not in args: + if _varname_match(c): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) + c = '(%s)' % c + if b == 0: + d = '%s-%s' % (d2[2], c) + elif b < 0: + d = '%s-%s-%s' % (d2[2], c, -b) + else: + d = '%s-%s+%s' % (d2[2], c, b) + elif d1[0] == 0: + c2 = str(d2[2]) + if c2 not in args: + if _varname_match(c2): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) + c2 = '(%s)' % c2 + if d2[0] == 1: + pass + elif d2[0] == -1: + c2 = '-%s' % c2 + else: + c2 = '%s*%s' % (d2[0], c2) + + if b == 0: + d = c2 + elif b < 0: + d = '%s-%s' % (c2, -b) + else: + d = '%s+%s' % (c2, b) + elif d2[0] == 0: + c1 = str(d1[2]) + if c1 not in args: + if _varname_match(c1): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) + c1 = '(%s)' % c1 + if d1[0] == 1: + c1 = '-%s' % c1 + elif d1[0] == -1: + c1 = '+%s' % c1 + elif d1[0] < 0: + c1 = '+%s*%s' % (-d1[0], c1) + else: + c1 = '-%s*%s' % (d1[0], c1) + + if b == 0: + d = c1 + elif b < 0: + d = '%s-%s' % (c1, -b) + else: + d = '%s+%s' % (c1, b) + else: + c1 = str(d1[2]) + if c1 not in args: + if _varname_match(c1): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) + c1 = '(%s)' % c1 + if d1[0] == 1: + c1 = '-%s' % c1 + elif d1[0] == -1: + c1 = '+%s' % c1 + elif d1[0] < 0: + c1 = '+%s*%s' % (-d1[0], c1) + else: + c1 = '-%s*%s' % (d1[0], c1) + + c2 = str(d2[2]) + if c2 not in args: + if _varname_match(c2): + outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) + c2 = '(%s)' % c2 + if d2[0] == 1: + pass + elif d2[0] == -1: + c2 = '-%s' % c2 + else: + c2 = '%s*%s' % (d2[0], c2) + + if b == 0: + d = '%s%s' % (c2, c1) + elif b < 0: + d = '%s%s-%s' % (c2, c1, -b) + else: + d = '%s%s+%s' % (c2, c1, b) + return d, None, None + +word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) + + +def _get_depend_dict(name, vars, deps): + if name in vars: + words = vars[name].get('depend', []) + + if '=' in vars[name] and not isstring(vars[name]): + for word in word_pattern.findall(vars[name]['=']): + if word not in words and word in vars: + words.append(word) + for word in words[:]: + for w in deps.get(word, []) \ + or _get_depend_dict(word, vars, deps): + if w not in words: + words.append(w) + else: + outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) + words = [] + deps[name] = words + return words + + +def _calc_depend_dict(vars): + names = list(vars.keys()) + depend_dict = {} + for n in names: + _get_depend_dict(n, vars, depend_dict) + return depend_dict + + +def get_sorted_names(vars): + """ + """ + depend_dict = _calc_depend_dict(vars) + names = [] + for name in list(depend_dict.keys()): + if not depend_dict[name]: + names.append(name) + del depend_dict[name] + while depend_dict: + for name, lst in list(depend_dict.items()): + new_lst = [n for n in lst if n in depend_dict] + if not new_lst: + names.append(name) + del depend_dict[name] + else: + depend_dict[name] = new_lst + return [name for name in names if name in vars] + + +def _kind_func(string): + # XXX: return something sensible. + if string[0] in "'\"": + string = string[1:-1] + if real16pattern.match(string): + return 8 + elif real8pattern.match(string): + return 4 + return 'kind(' + string + ')' + + +def _selected_int_kind_func(r): + # XXX: This should be processor dependent + m = 10 ** r + if m <= 2 ** 8: + return 1 + if m <= 2 ** 16: + return 2 + if m <= 2 ** 32: + return 4 + if m <= 2 ** 63: + return 8 + if m <= 2 ** 128: + return 16 + return -1 + + +def _selected_real_kind_func(p, r=0, radix=0): + # XXX: This should be processor dependent + # This is only good for 0 <= p <= 20 + if p < 7: + return 4 + if p < 16: + return 8 + machine = platform.machine().lower() + if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')): + if p <= 20: + return 16 + else: + if p < 19: + return 10 + elif p <= 20: + return 16 + return -1 + + +def get_parameters(vars, global_params={}): + params = copy.copy(global_params) + g_params = copy.copy(global_params) + for name, func in [('kind', _kind_func), + ('selected_int_kind', _selected_int_kind_func), + ('selected_real_kind', _selected_real_kind_func), ]: + if name not in g_params: + g_params[name] = func + param_names = [] + for n in get_sorted_names(vars): + if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: + param_names.append(n) + kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) + selected_int_kind_re = re.compile( + r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) + selected_kind_re = re.compile( + r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) + for n in param_names: + if '=' in vars[n]: + v = vars[n]['='] + if islogical(vars[n]): + v = v.lower() + for repl in [ + ('.false.', 'False'), + ('.true.', 'True'), + # TODO: test .eq., .neq., etc replacements. + ]: + v = v.replace(*repl) + v = kind_re.sub(r'kind("\1")', v) + v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) + + # We need to act according to the data. + # The easy case is if the data has a kind-specifier, + # then we may easily remove those specifiers. + # However, it may be that the user uses other specifiers...(!) + is_replaced = False + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + orig_v_len = len(v) + v = v.replace('_' + vars[n]['kindselector']['kind'], '') + # Again, this will be true if even a single specifier + # has been replaced, see comment above. + is_replaced = len(v) < orig_v_len + + if not is_replaced: + if not selected_kind_re.match(v): + v_ = v.split('_') + # In case there are additive parameters + if len(v_) > 1: + v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') + + # Currently this will not work for complex numbers. + # There is missing code for extracting a complex number, + # which may be defined in either of these: + # a) (Re, Im) + # b) cmplx(Re, Im) + # c) dcmplx(Re, Im) + # d) cmplx(Re, Im, ) + + if isdouble(vars[n]): + tt = list(v) + for m in real16pattern.finditer(v): + tt[m.start():m.end()] = list( + v[m.start():m.end()].lower().replace('d', 'e')) + v = ''.join(tt) + + elif iscomplex(vars[n]): + # FIXME complex numbers may also have exponents + if v[0] == '(' and v[-1] == ')': + # FIXME, unused l looks like potential bug + l = markoutercomma(v[1:-1]).split('@,@') + + try: + params[n] = eval(v, g_params, params) + except Exception as msg: + params[n] = v + outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v))) + if isstring(vars[n]) and isinstance(params[n], int): + params[n] = chr(params[n]) + nl = n.lower() + if nl != n: + params[nl] = params[n] + else: + print(vars[n]) + outmess( + 'get_parameters:parameter %s does not have value?!\n' % (repr(n))) + return params + + +def _eval_length(length, params): + if length in ['(:)', '(*)', '*']: + return '(*)' + return _eval_scalar(length, params) + +_is_kind_number = re.compile(r'\d+_').match + + +def _eval_scalar(value, params): + if _is_kind_number(value): + value = value.split('_')[0] + try: + value = str(eval(value, {}, params)) + except (NameError, SyntaxError, TypeError): + return value + except Exception as msg: + errmess('"%s" in evaluating %r ' + '(available names: %s)\n' + % (msg, value, list(params.keys()))) + return value + + +def analyzevars(block): + global f90modulevars + + setmesstext(block) + implicitrules, attrrules = buildimplicitrules(block) + vars = copy.copy(block['vars']) + if block['block'] == 'function' and block['name'] not in vars: + vars[block['name']] = {} + if '' in block['vars']: + del vars[''] + if 'attrspec' in block['vars']['']: + gen = block['vars']['']['attrspec'] + for n in list(vars.keys()): + for k in ['public', 'private']: + if k in gen: + vars[n] = setattrspec(vars[n], k) + svars = [] + args = block['args'] + for a in args: + try: + vars[a] + svars.append(a) + except KeyError: + pass + for n in list(vars.keys()): + if n not in args: + svars.append(n) + + params = get_parameters(vars, get_useparameters(block)) + + dep_matches = {} + name_match = re.compile(r'[A-Za-z][\w$]*').match + for v in list(vars.keys()): + m = name_match(v) + if m: + n = v[m.start():m.end()] + try: + dep_matches[n] + except KeyError: + dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match + for n in svars: + if n[0] in list(attrrules.keys()): + vars[n] = setattrspec(vars[n], attrrules[n[0]]) + if 'typespec' not in vars[n]: + if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if implicitrules: + ln0 = n[0].lower() + for k in list(implicitrules[ln0].keys()): + if k == 'typespec' and implicitrules[ln0][k] == 'undefined': + continue + if k not in vars[n]: + vars[n][k] = implicitrules[ln0][k] + elif k == 'attrspec': + for l in implicitrules[ln0][k]: + vars[n] = setattrspec(vars[n], l) + elif n in block['args']: + outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( + repr(n), block['name'])) + + if 'charselector' in vars[n]: + if 'len' in vars[n]['charselector']: + l = vars[n]['charselector']['len'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['charselector']['len'] = l + + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + l = vars[n]['kindselector']['kind'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['kindselector']['kind'] = l + + savelindims = {} + if 'attrspec' in vars[n]: + attr = vars[n]['attrspec'] + attr.reverse() + vars[n]['attrspec'] = [] + dim, intent, depend, check, note = None, None, None, None, None + for a in attr: + if a[:9] == 'dimension': + dim = (a[9:].strip())[1:-1] + elif a[:6] == 'intent': + intent = (a[6:].strip())[1:-1] + elif a[:6] == 'depend': + depend = (a[6:].strip())[1:-1] + elif a[:5] == 'check': + check = (a[5:].strip())[1:-1] + elif a[:4] == 'note': + note = (a[4:].strip())[1:-1] + else: + vars[n] = setattrspec(vars[n], a) + if intent: + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: + # Remove spaces so that 'in out' becomes 'inout' + tmp = c.replace(' ', '') + if tmp not in vars[n]['intent']: + vars[n]['intent'].append(tmp) + intent = None + if note: + note = note.replace('\\n\\n', '\n\n') + note = note.replace('\\n ', '\n') + if 'note' not in vars[n]: + vars[n]['note'] = [note] + else: + vars[n]['note'].append(note) + note = None + if depend is not None: + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): + if c not in vars[n]['depend']: + vars[n]['depend'].append(c) + depend = None + if check is not None: + if 'check' not in vars[n]: + vars[n]['check'] = [] + for c in [x.strip() for x in markoutercomma(check).split('@,@')]: + if c not in vars[n]['check']: + vars[n]['check'].append(c) + check = None + if dim and 'dimension' not in vars[n]: + vars[n]['dimension'] = [] + for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): + star = '*' + if d == ':': + star = ':' + if d in params: + d = str(params[d]) + for p in list(params.keys()): + re_1 = re.compile(r'(?P.*?)\b' + p + r'\b(?P.*)', re.I) + m = re_1.match(d) + while m: + d = m.group('before') + \ + str(params[p]) + m.group('after') + m = re_1.match(d) + if d == star: + dl = [star] + else: + dl = markoutercomma(d, ':').split('@:@') + if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) + dl = ['*'] + d = '*' + if len(dl) == 1 and not dl[0] == star: + dl = ['1', dl[0]] + if len(dl) == 2: + d, v, di = getarrlen(dl, list(block['vars'].keys())) + if d[:4] == '1 * ': + d = d[4:] + if di and di[-4:] == '/(1)': + di = di[:-4] + if v: + savelindims[d] = v, di + vars[n]['dimension'].append(d) + if 'dimension' in vars[n]: + if isintent_c(vars[n]): + shape_macro = 'shape' + else: + shape_macro = 'shape' # 'fshape' + if isstringarray(vars[n]): + if 'charselector' in vars[n]: + d = vars[n]['charselector'] + if '*' in d: + d = d['*'] + errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n' + % (d, n, + ','.join(vars[n]['dimension']), + n, ','.join(vars[n]['dimension'] + [d]))) + vars[n]['dimension'].append(d) + del vars[n]['charselector'] + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + if 'c' not in vars[n]['intent']: + vars[n]['intent'].append('c') + else: + errmess( + "analyzevars: charselector=%r unhandled." % (d)) + if 'check' not in vars[n] and 'args' in block and n in block['args']: + flag = 'depend' not in vars[n] + if flag: + vars[n]['depend'] = [] + vars[n]['check'] = [] + if 'dimension' in vars[n]: + #/----< no check + i = -1 + ni = len(vars[n]['dimension']) + for d in vars[n]['dimension']: + ddeps = [] # dependencies of 'd' + ad = '' + pd = '' + if d not in vars: + if d in savelindims: + pd, ad = '(', savelindims[d][1] + d = savelindims[d][0] + else: + for r in block['args']: + if r not in vars: + continue + if re.match(r'.*?\b' + r + r'\b', d, re.I): + ddeps.append(r) + if d in vars: + if 'attrspec' in vars[d]: + for aa in vars[d]['attrspec']: + if aa[:6] == 'depend': + ddeps += aa[6:].strip()[1:-1].split(',') + if 'depend' in vars[d]: + ddeps = ddeps + vars[d]['depend'] + i = i + 1 + if d in vars and ('depend' not in vars[d]) \ + and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ + and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): + vars[d]['depend'] = [n] + if ni > 1: + vars[d]['='] = '%s%s(%s,%s)%s' % ( + pd, shape_macro, n, i, ad) + else: + vars[d]['='] = '%slen(%s)%s' % (pd, n, ad) + # /---< no check + if 1 and 'check' not in vars[d]: + if ni > 1: + vars[d]['check'] = ['%s%s(%s,%i)%s==%s' + % (pd, shape_macro, n, i, ad, d)] + else: + vars[d]['check'] = [ + '%slen(%s)%s>=%s' % (pd, n, ad, d)] + if 'attrspec' not in vars[d]: + vars[d]['attrspec'] = ['optional'] + if ('optional' not in vars[d]['attrspec']) and\ + ('required' not in vars[d]['attrspec']): + vars[d]['attrspec'].append('optional') + elif d not in ['*', ':']: + #/----< no check + if flag: + if d in vars: + if n not in ddeps: + vars[n]['depend'].append(d) + else: + vars[n]['depend'] = vars[n]['depend'] + ddeps + elif isstring(vars[n]): + length = '1' + if 'charselector' in vars[n]: + if '*' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['*'], + params) + vars[n]['charselector']['*'] = length + elif 'len' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['len'], + params) + del vars[n]['charselector']['len'] + vars[n]['charselector']['*'] = length + + if not vars[n]['check']: + del vars[n]['check'] + if flag and not vars[n]['depend']: + del vars[n]['depend'] + if '=' in vars[n]: + if 'attrspec' not in vars[n]: + vars[n]['attrspec'] = [] + if ('optional' not in vars[n]['attrspec']) and \ + ('required' not in vars[n]['attrspec']): + vars[n]['attrspec'].append('optional') + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for v, m in list(dep_matches.items()): + if m(vars[n]['=']): + vars[n]['depend'].append(v) + if not vars[n]['depend']: + del vars[n]['depend'] + if isscalar(vars[n]): + vars[n]['='] = _eval_scalar(vars[n]['='], params) + + for n in list(vars.keys()): + if n == block['name']: # n is block name + if 'note' in vars[n]: + block['note'] = vars[n]['note'] + if block['block'] == 'function': + if 'result' in block and block['result'] in vars: + vars[n] = appenddecl(vars[n], vars[block['result']]) + if 'prefix' in block: + pr = block['prefix'] + ispure = 0 + isrec = 1 + pr1 = pr.replace('pure', '') + ispure = (not pr == pr1) + pr = pr1.replace('recursive', '') + isrec = (not pr == pr1) + m = typespattern[0].match(pr) + if m: + typespec, selector, attr, edecl = cracktypespec0( + m.group('this'), m.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + vars[n]['typespec'] = typespec + if kindselect: + if 'kind' in kindselect: + try: + kindselect['kind'] = eval( + kindselect['kind'], {}, params) + except Exception: + pass + vars[n]['kindselector'] = kindselect + if charselect: + vars[n]['charselector'] = charselect + if typename: + vars[n]['typename'] = typename + if ispure: + vars[n] = setattrspec(vars[n], 'pure') + if isrec: + vars[n] = setattrspec(vars[n], 'recursive') + else: + outmess( + 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) + if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: + if 'commonvars' in block: + neededvars = copy.copy(block['args'] + block['commonvars']) + else: + neededvars = copy.copy(block['args']) + for n in list(vars.keys()): + if l_or(isintent_callback, isintent_aux)(vars[n]): + neededvars.append(n) + if 'entry' in block: + neededvars.extend(list(block['entry'].keys())) + for k in list(block['entry'].keys()): + for n in block['entry'][k]: + if n not in neededvars: + neededvars.append(n) + if block['block'] == 'function': + if 'result' in block: + neededvars.append(block['result']) + else: + neededvars.append(block['name']) + if block['block'] in ['subroutine', 'function']: + name = block['name'] + if name in vars and 'intent' in vars[name]: + block['intent'] = vars[name]['intent'] + if block['block'] == 'type': + neededvars.extend(list(vars.keys())) + for n in list(vars.keys()): + if n not in neededvars: + del vars[n] + return vars + +analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) + + +def expr2name(a, block, args=[]): + orig_a = a + a_is_expr = not analyzeargs_re_1.match(a) + if a_is_expr: # `a` is an expression + implicitrules, attrrules = buildimplicitrules(block) + at = determineexprtype(a, block['vars'], implicitrules) + na = 'e_' + for c in a: + c = c.lower() + if c not in string.ascii_lowercase + string.digits: + c = '_' + na = na + c + if na[-1] == '_': + na = na + 'e' + else: + na = na + '_e' + a = na + while a in block['vars'] or a in block['args']: + a = a + 'r' + if a in args: + k = 1 + while a + str(k) in args: + k = k + 1 + a = a + str(k) + if a_is_expr: + block['vars'][a] = at + else: + if a not in block['vars']: + if orig_a in block['vars']: + block['vars'][a] = block['vars'][orig_a] + else: + block['vars'][a] = {} + if 'externals' in block and orig_a in block['externals'] + block['interfaced']: + block['vars'][a] = setattrspec(block['vars'][a], 'external') + return a + + +def analyzeargs(block): + setmesstext(block) + implicitrules, attrrules = buildimplicitrules(block) + if 'args' not in block: + block['args'] = [] + args = [] + for a in block['args']: + a = expr2name(a, block, args) + args.append(a) + block['args'] = args + if 'entry' in block: + for k, args1 in list(block['entry'].items()): + for a in args1: + if a not in block['vars']: + block['vars'][a] = {} + + for b in block['body']: + if b['name'] in args: + if 'externals' not in block: + block['externals'] = [] + if b['name'] not in block['externals']: + block['externals'].append(b['name']) + if 'result' in block and block['result'] not in block['vars']: + block['vars'][block['result']] = {} + return block + +determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) +determineexprtype_re_3 = re.compile( + r'\A[+-]?[\d.]+[-\d+de.]*(_(?P\w+)|)\Z', re.I) +determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) +determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) + + +def _ensure_exprdict(r): + if isinstance(r, int): + return {'typespec': 'integer'} + if isinstance(r, float): + return {'typespec': 'real'} + if isinstance(r, complex): + return {'typespec': 'complex'} + if isinstance(r, dict): + return r + raise AssertionError(repr(r)) + + +def determineexprtype(expr, vars, rules={}): + if expr in vars: + return _ensure_exprdict(vars[expr]) + expr = expr.strip() + if determineexprtype_re_1.match(expr): + return {'typespec': 'complex'} + m = determineexprtype_re_2.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + return {'typespec': 'integer'} + m = determineexprtype_re_3.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + return {'typespec': 'real'} + for op in ['+', '-', '*', '/']: + for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: + if e in vars: + return _ensure_exprdict(vars[e]) + t = {} + if determineexprtype_re_4.match(expr): # in parenthesis + t = determineexprtype(expr[1:-1], vars, rules) + else: + m = determineexprtype_re_5.match(expr) + if m: + rn = m.group('name') + t = determineexprtype(m.group('name'), vars, rules) + if t and 'attrspec' in t: + del t['attrspec'] + if not t: + if rn[0] in rules: + return _ensure_exprdict(rules[rn[0]]) + if expr[0] in '\'"': + return {'typespec': 'character', 'charselector': {'*': '*'}} + if not t: + outmess( + 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) + return t + +###### + + +def crack2fortrangen(block, tab='\n', as_interface=False): + global skipfuncs, onlyfuncs + + setmesstext(block) + ret = '' + if isinstance(block, list): + for g in block: + if g and g['block'] in ['function', 'subroutine']: + if g['name'] in skipfuncs: + continue + if onlyfuncs and g['name'] not in onlyfuncs: + continue + ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) + return ret + prefix = '' + name = '' + args = '' + blocktype = block['block'] + if blocktype == 'program': + return '' + argsl = [] + if 'name' in block: + name = block['name'] + if 'args' in block: + vars = block['vars'] + for a in block['args']: + a = expr2name(a, block, argsl) + if not isintent_callback(vars[a]): + argsl.append(a) + if block['block'] == 'function' or argsl: + args = '(%s)' % ','.join(argsl) + f2pyenhancements = '' + if 'f2pyenhancements' in block: + for k in list(block['f2pyenhancements'].keys()): + f2pyenhancements = '%s%s%s %s' % ( + f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) + intent_lst = block.get('intent', [])[:] + if blocktype == 'function' and 'callback' in intent_lst: + intent_lst.remove('callback') + if intent_lst: + f2pyenhancements = '%s%sintent(%s) %s' %\ + (f2pyenhancements, tab + tabchar, + ','.join(intent_lst), name) + use = '' + if 'use' in block: + use = use2fortran(block['use'], tab + tabchar) + common = '' + if 'common' in block: + common = common2fortran(block['common'], tab + tabchar) + if name == 'unknown_interface': + name = '' + result = '' + if 'result' in block: + result = ' result (%s)' % block['result'] + if block['result'] not in argsl: + argsl.append(block['result']) + body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) + vars = vars2fortran( + block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) + mess = '' + if 'from' in block and not as_interface: + mess = '! in %s' % block['from'] + if 'entry' in block: + entry_stmts = '' + for k, i in list(block['entry'].items()): + entry_stmts = '%s%sentry %s(%s)' \ + % (entry_stmts, tab + tabchar, k, ','.join(i)) + body = body + entry_stmts + if blocktype == 'block data' and name == '_BLOCK_DATA_': + name = '' + ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( + tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + return ret + + +def common2fortran(common, tab=''): + ret = '' + for k in list(common.keys()): + if k == '_BLNK_': + ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) + else: + ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) + return ret + + +def use2fortran(use, tab=''): + ret = '' + for m in list(use.keys()): + ret = '%s%suse %s,' % (ret, tab, m) + if use[m] == {}: + if ret and ret[-1] == ',': + ret = ret[:-1] + continue + if 'only' in use[m] and use[m]['only']: + ret = '%s only:' % (ret) + if 'map' in use[m] and use[m]['map']: + c = ' ' + for k in list(use[m]['map'].keys()): + if k == use[m]['map'][k]: + ret = '%s%s%s' % (ret, c, k) + c = ',' + else: + ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) + c = ',' + if ret and ret[-1] == ',': + ret = ret[:-1] + return ret + + +def true_intent_list(var): + lst = var['intent'] + ret = [] + for intent in lst: + try: + f = globals()['isintent_%s' % intent] + except KeyError: + pass + else: + if f(var): + ret.append(intent) + return ret + + +def vars2fortran(block, vars, args, tab='', as_interface=False): + """ + TODO: + public sub + ... + """ + setmesstext(block) + ret = '' + nout = [] + for a in args: + if a in block['vars']: + nout.append(a) + if 'commonvars' in block: + for a in block['commonvars']: + if a in vars: + if a not in nout: + nout.append(a) + else: + errmess( + 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) + if 'varnames' in block: + nout.extend(block['varnames']) + if not as_interface: + for a in list(vars.keys()): + if a not in nout: + nout.append(a) + for a in nout: + if 'depend' in vars[a]: + for d in vars[a]['depend']: + if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: + errmess( + 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) + if 'externals' in block and a in block['externals']: + if isintent_callback(vars[a]): + ret = '%s%sintent(callback) %s' % (ret, tab, a) + ret = '%s%sexternal %s' % (ret, tab, a) + if isoptional(vars[a]): + ret = '%s%soptional %s' % (ret, tab, a) + if a in vars and 'typespec' not in vars[a]: + continue + cont = 1 + for b in block['body']: + if a == b['name'] and b['block'] == 'function': + cont = 0 + break + if cont: + continue + if a not in vars: + show(vars) + outmess('vars2fortran: No definition for argument "%s".\n' % a) + continue + if a == block['name']: + if block['block'] != 'function' or block.get('result'): + # 1) skip declaring a variable that name matches with + # subroutine name + # 2) skip declaring function when its type is + # declared via `result` construction + continue + if 'typespec' not in vars[a]: + if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: + if a in args: + ret = '%s%sexternal %s' % (ret, tab, a) + continue + show(vars[a]) + outmess('vars2fortran: No typespec for argument "%s".\n' % a) + continue + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = '%s(%s)' % (vardef, vars[a]['typename']) + selector = {} + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + if '*' in selector: + if selector['*'] in ['*', ':']: + vardef = '%s*(%s)' % (vardef, selector['*']) + else: + vardef = '%s*%s' % (vardef, selector['*']) + else: + if 'len' in selector: + vardef = '%s(len=%s' % (vardef, selector['len']) + if 'kind' in selector: + vardef = '%s,kind=%s)' % (vardef, selector['kind']) + else: + vardef = '%s)' % (vardef) + elif 'kind' in selector: + vardef = '%s(kind=%s)' % (vardef, selector['kind']) + c = ' ' + if 'attrspec' in vars[a]: + attr = [l for l in vars[a]['attrspec'] + if l not in ['external']] + if attr: + vardef = '%s, %s' % (vardef, ','.join(attr)) + c = ',' + if 'dimension' in vars[a]: + vardef = '%s%sdimension(%s)' % ( + vardef, c, ','.join(vars[a]['dimension'])) + c = ',' + if 'intent' in vars[a]: + lst = true_intent_list(vars[a]) + if lst: + vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) + c = ',' + if 'check' in vars[a]: + vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) + c = ',' + if 'depend' in vars[a]: + vardef = '%s%sdepend(%s)' % ( + vardef, c, ','.join(vars[a]['depend'])) + c = ',' + if '=' in vars[a]: + v = vars[a]['='] + if vars[a]['typespec'] in ['complex', 'double complex']: + try: + v = eval(v) + v = '(%s,%s)' % (v.real, v.imag) + except Exception: + pass + vardef = '%s :: %s=%s' % (vardef, a, v) + else: + vardef = '%s :: %s' % (vardef, a) + ret = '%s%s%s' % (ret, tab, vardef) + return ret +###### + + +def crackfortran(files): + global usermodules + + outmess('Reading fortran codes...\n', 0) + readfortrancode(files, crackline) + outmess('Post-processing...\n', 0) + usermodules = [] + postlist = postcrack(grouplist[0]) + outmess('Post-processing (stage 2)...\n', 0) + postlist = postcrack2(postlist) + return usermodules + postlist + + +def crack2fortran(block): + global f2py_version + + pyf = crack2fortrangen(block) + '\n' + header = """! -*- f90 -*- +! Note: the context of this file is case sensitive. +""" + footer = """ +! This file was auto-generated with f2py (version:%s). +! See http://cens.ioc.ee/projects/f2py2e/ +""" % (f2py_version) + return header + pyf + footer + +if __name__ == "__main__": + files = [] + funcs = [] + f = 1 + f2 = 0 + f3 = 0 + showblocklist = 0 + for l in sys.argv[1:]: + if l == '': + pass + elif l[0] == ':': + f = 0 + elif l == '-quiet': + quiet = 1 + verbose = 0 + elif l == '-verbose': + verbose = 2 + quiet = 0 + elif l == '-fix': + if strictf77: + outmess( + 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) + skipemptyends = 1 + sourcecodeform = 'fix' + elif l == '-skipemptyends': + skipemptyends = 1 + elif l == '--ignore-contains': + ignorecontains = 1 + elif l == '-f77': + strictf77 = 1 + sourcecodeform = 'fix' + elif l == '-f90': + strictf77 = 0 + sourcecodeform = 'free' + skipemptyends = 1 + elif l == '-h': + f2 = 1 + elif l == '-show': + showblocklist = 1 + elif l == '-m': + f3 = 1 + elif l[0] == '-': + errmess('Unknown option %s\n' % repr(l)) + elif f2: + f2 = 0 + pyffilename = l + elif f3: + f3 = 0 + f77modulename = l + elif f: + try: + open(l).close() + files.append(l) + except IOError as detail: + errmess('IOError: %s\n' % str(detail)) + else: + funcs.append(l) + if not strictf77 and f77modulename and not skipemptyends: + outmess("""\ + Warning: You have specified module name for non Fortran 77 code + that should not need one (expect if you are scanning F90 code + for non module blocks but then you should use flag -skipemptyends + and also be sure that the files do not contain programs without program statement). +""", 0) + + postlist = crackfortran(files) + if pyffilename: + outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) + pyf = crack2fortran(postlist) + with open(pyffilename, 'w') as f: + f.write(pyf) + if showblocklist: + show(postlist) diff --git a/MLPY/Lib/site-packages/numpy/f2py/diagnose.py b/MLPY/Lib/site-packages/numpy/f2py/diagnose.py new file mode 100644 index 0000000000000000000000000000000000000000..47b2a704d25dc55be5241ba0ca8a6a6225166df8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/diagnose.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +import os +import sys +import tempfile + + +def run_command(cmd): + print('Running %r:' % (cmd)) + os.system(cmd) + print('------') + + +def run(): + _path = os.getcwd() + os.chdir(tempfile.gettempdir()) + print('------') + print('os.name=%r' % (os.name)) + print('------') + print('sys.platform=%r' % (sys.platform)) + print('------') + print('sys.version:') + print(sys.version) + print('------') + print('sys.prefix:') + print(sys.prefix) + print('------') + print('sys.path=%r' % (':'.join(sys.path))) + print('------') + + try: + import numpy + has_newnumpy = 1 + except ImportError: + print('Failed to import new numpy:', sys.exc_info()[1]) + has_newnumpy = 0 + + try: + from numpy.f2py import f2py2e + has_f2py2e = 1 + except ImportError: + print('Failed to import f2py2e:', sys.exc_info()[1]) + has_f2py2e = 0 + + try: + import numpy.distutils + has_numpy_distutils = 2 + except ImportError: + try: + import numpy_distutils + has_numpy_distutils = 1 + except ImportError: + print('Failed to import numpy_distutils:', sys.exc_info()[1]) + has_numpy_distutils = 0 + + if has_newnumpy: + try: + print('Found new numpy version %r in %s' % + (numpy.__version__, numpy.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_f2py2e: + try: + print('Found f2py2e version %r in %s' % + (f2py2e.__version__.version, f2py2e.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_numpy_distutils: + try: + if has_numpy_distutils == 2: + print('Found numpy.distutils version %r in %r' % ( + numpy.distutils.__version__, + numpy.distutils.__file__)) + else: + print('Found numpy_distutils version %r in %r' % ( + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 1: + print( + 'Importing numpy_distutils.command.build_flib ...', end=' ') + import numpy_distutils.command.build_flib as build_flib + print('ok') + print('------') + try: + print( + 'Checking availability of supported Fortran compilers:') + for compiler_class in build_flib.all_compilers: + compiler_class(verbose=1).is_available() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print( + 'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.fcompiler ...', end=' ') + import numpy.distutils.fcompiler as fcompiler + else: + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler + print('ok') + print('------') + try: + print('Checking availability of supported Fortran compilers:') + fcompiler.show_fcompilers() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.cpuinfo ...', end=' ') + from numpy.distutils.cpuinfo import cpuinfo + print('ok') + print('------') + else: + try: + print( + 'Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo + print('ok') + print('------') + cpu = cpuinfo() + print('CPU information:', end=' ') + for name in dir(cpuinfo): + if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): + print(name[1:], end=' ') + print('------') + except Exception as msg: + print('error:', msg) + print('------') + os.chdir(_path) +if __name__ == "__main__": + run() diff --git a/MLPY/Lib/site-packages/numpy/f2py/f2py2e.py b/MLPY/Lib/site-packages/numpy/f2py/f2py2e.py new file mode 100644 index 0000000000000000000000000000000000000000..e6ccde51393687e9bb8557eb6661375c7d1e1ebb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/f2py2e.py @@ -0,0 +1,692 @@ +#!/usr/bin/env python3 +""" + +f2py2e - Fortran to Python C/API generator. 2nd Edition. + See __usage__ below. + +Copyright 1999--2011 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/05/06 08:31:19 $ +Pearu Peterson + +""" +import sys +import os +import pprint +import re + +from . import crackfortran +from . import rules +from . import cb_rules +from . import auxfuncs +from . import cfuncs +from . import f90mod_rules +from . import __version__ +from . import capi_maps + +f2py_version = __version__.version +numpy_version = __version__.version +errmess = sys.stderr.write +# outmess=sys.stdout.write +show = pprint.pprint +outmess = auxfuncs.outmess + +__usage__ =\ +f"""Usage: + +1) To construct extension module sources: + + f2py [] [[[only:]||[skip:]] \\ + ] \\ + [: ...] + +2) To compile fortran files and build extension modules: + + f2py -c [, , ] + +3) To generate signature files: + + f2py -h ...< same options as in (1) > + +Description: This program generates a Python C/API file (module.c) + that contains wrappers for given fortran functions so that they + can be called from Python. With the -c option the corresponding + extension modules are built. + +Options: + + --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT] + --2d-numeric Use f2py2e tool with Numeric support. + --2d-numarray Use f2py2e tool with Numarray support. + --g3-numpy Use 3rd generation f2py from the separate f2py package. + [NOT AVAILABLE YET] + + -h Write signatures of the fortran routines to file + and exit. You can then edit and use it instead + of . If ==stdout then the + signatures are printed to stdout. + Names of fortran routines for which Python C/API + functions will be generated. Default is all that are found + in . + Paths to fortran/signature files that will be scanned for + in order to determine their signatures. + skip: Ignore fortran functions that follow until `:'. + only: Use only fortran functions that follow until `:'. + : Get back to mode. + + -m Name of the module; f2py generates a Python/C API + file module.c or extension module . + Default is 'untitled'. + + --[no-]lower Do [not] lower the cases in . By default, + --lower is assumed with -h key, and --no-lower without -h key. + + --build-dir All f2py generated files are created in . + Default is tempfile.mkdtemp(). + + --overwrite-signature Overwrite existing signature file. + + --[no-]latex-doc Create (or not) module.tex. + Default is --no-latex-doc. + --short-latex Create 'incomplete' LaTeX document (without commands + \\documentclass, \\tableofcontents, and \\begin{{document}}, + \\end{{document}}). + + --[no-]rest-doc Create (or not) module.rst. + Default is --no-rest-doc. + + --debug-capi Create C/API code that reports the state of the wrappers + during runtime. Useful for debugging. + + --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 + functions. --wrap-functions is default because it ensures + maximum portability/compiler independence. + + --include-paths ::... Search include files from the given + directories. + + --help-link [..] List system resources found by system_info.py. See also + --link- switch below. [..] is optional list + of resources names. E.g. try 'f2py --help-link lapack_opt'. + + --f2cmap Load Fortran-to-Python KIND specification from the given + file. Default: .f2py_f2cmap in current directory. + + --quiet Run quietly. + --verbose Run with extra verbosity. + -v Print f2py version ID and exit. + + +numpy.distutils options (only effective with -c): + + --fcompiler= Specify Fortran compiler type by vendor + --compiler= Specify C compiler type (as defined by distutils) + + --help-fcompiler List available Fortran compilers and exit + --f77exec= Specify the path to F77 compiler + --f90exec= Specify the path to F90 compiler + --f77flags= Specify F77 compiler flags + --f90flags= Specify F90 compiler flags + --opt= Specify optimization flags + --arch= Specify architecture specific optimization flags + --noopt Compile without optimization + --noarch Compile without arch-dependent optimization + --debug Compile with debugging information + +Extra options (only effective with -c): + + --link- Link extension module with as defined + by numpy.distutils/system_info.py. E.g. to link + with optimized LAPACK libraries (vecLib on MacOSX, + ATLAS elsewhere), use --link-lapack_opt. + See also --help-link switch. + + -L/path/to/lib/ -l + -D -U + -I/path/to/include/ + .o .so .a + + Using the following macros may be required with non-gcc Fortran + compilers: + -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN + -DUNDERSCORE_G77 + + When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY + interface is printed out at exit (platforms: Linux). + + When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is + sent to stderr whenever F2PY interface makes a copy of an + array. Integer sets the threshold for array sizes when + a message should be shown. + +Version: {f2py_version} +numpy Version: {numpy_version} +Requires: Python 3.5 or higher. +License: NumPy license (see LICENSE.txt in the NumPy source code) +Copyright 1999 - 2011 Pearu Peterson all rights reserved. +http://cens.ioc.ee/projects/f2py2e/""" + + +def scaninputline(inputline): + files, skipfuncs, onlyfuncs, debug = [], [], [], [] + f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0 + verbose = 1 + dolc = -1 + dolatexdoc = 0 + dorestdoc = 0 + wrapfuncs = 1 + buildpath = '.' + include_paths = [] + signsfile, modulename = None, None + options = {'buildpath': buildpath, + 'coutput': None, + 'f2py_wrapper_output': None} + for l in inputline: + if l == '': + pass + elif l == 'only:': + f = 0 + elif l == 'skip:': + f = -1 + elif l == ':': + f = 1 + elif l[:8] == '--debug-': + debug.append(l[8:]) + elif l == '--lower': + dolc = 1 + elif l == '--build-dir': + f6 = 1 + elif l == '--no-lower': + dolc = 0 + elif l == '--quiet': + verbose = 0 + elif l == '--verbose': + verbose += 1 + elif l == '--latex-doc': + dolatexdoc = 1 + elif l == '--no-latex-doc': + dolatexdoc = 0 + elif l == '--rest-doc': + dorestdoc = 1 + elif l == '--no-rest-doc': + dorestdoc = 0 + elif l == '--wrap-functions': + wrapfuncs = 1 + elif l == '--no-wrap-functions': + wrapfuncs = 0 + elif l == '--short-latex': + options['shortlatex'] = 1 + elif l == '--coutput': + f8 = 1 + elif l == '--f2py-wrapper-output': + f9 = 1 + elif l == '--f2cmap': + f10 = 1 + elif l == '--overwrite-signature': + options['h-overwrite'] = 1 + elif l == '-h': + f2 = 1 + elif l == '-m': + f3 = 1 + elif l[:2] == '-v': + print(f2py_version) + sys.exit() + elif l == '--show-compilers': + f5 = 1 + elif l[:8] == '-include': + cfuncs.outneeds['userincludes'].append(l[9:-1]) + cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] + elif l[:15] in '--include_paths': + outmess( + 'f2py option --include_paths is deprecated, use --include-paths instead.\n') + f7 = 1 + elif l[:15] in '--include-paths': + f7 = 1 + elif l[0] == '-': + errmess('Unknown option %s\n' % repr(l)) + sys.exit() + elif f2: + f2 = 0 + signsfile = l + elif f3: + f3 = 0 + modulename = l + elif f6: + f6 = 0 + buildpath = l + elif f7: + f7 = 0 + include_paths.extend(l.split(os.pathsep)) + elif f8: + f8 = 0 + options["coutput"] = l + elif f9: + f9 = 0 + options["f2py_wrapper_output"] = l + elif f10: + f10 = 0 + options["f2cmap_file"] = l + elif f == 1: + try: + with open(l): + pass + files.append(l) + except IOError as detail: + errmess('IOError: %s. Skipping file "%s".\n' % + (str(detail), l)) + elif f == -1: + skipfuncs.append(l) + elif f == 0: + onlyfuncs.append(l) + if not f5 and not files and not modulename: + print(__usage__) + sys.exit() + if not os.path.isdir(buildpath): + if not verbose: + outmess('Creating build directory %s' % (buildpath)) + os.mkdir(buildpath) + if signsfile: + signsfile = os.path.join(buildpath, signsfile) + if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: + errmess( + 'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile)) + sys.exit() + + options['debug'] = debug + options['verbose'] = verbose + if dolc == -1 and not signsfile: + options['do-lower'] = 0 + else: + options['do-lower'] = dolc + if modulename: + options['module'] = modulename + if signsfile: + options['signsfile'] = signsfile + if onlyfuncs: + options['onlyfuncs'] = onlyfuncs + if skipfuncs: + options['skipfuncs'] = skipfuncs + options['dolatexdoc'] = dolatexdoc + options['dorestdoc'] = dorestdoc + options['wrapfuncs'] = wrapfuncs + options['buildpath'] = buildpath + options['include_paths'] = include_paths + options.setdefault('f2cmap_file', None) + return files, options + + +def callcrackfortran(files, options): + rules.options = options + crackfortran.debug = options['debug'] + crackfortran.verbose = options['verbose'] + if 'module' in options: + crackfortran.f77modulename = options['module'] + if 'skipfuncs' in options: + crackfortran.skipfuncs = options['skipfuncs'] + if 'onlyfuncs' in options: + crackfortran.onlyfuncs = options['onlyfuncs'] + crackfortran.include_paths[:] = options['include_paths'] + crackfortran.dolowercase = options['do-lower'] + postlist = crackfortran.crackfortran(files) + if 'signsfile' in options: + outmess('Saving signatures to file "%s"\n' % (options['signsfile'])) + pyf = crackfortran.crack2fortran(postlist) + if options['signsfile'][-6:] == 'stdout': + sys.stdout.write(pyf) + else: + with open(options['signsfile'], 'w') as f: + f.write(pyf) + if options["coutput"] is None: + for mod in postlist: + mod["coutput"] = "%smodule.c" % mod["name"] + else: + for mod in postlist: + mod["coutput"] = options["coutput"] + if options["f2py_wrapper_output"] is None: + for mod in postlist: + mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] + else: + for mod in postlist: + mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + return postlist + + +def buildmodules(lst): + cfuncs.buildcfuncs() + outmess('Building modules...\n') + modules, mnames, isusedby = [], [], {} + for i in range(len(lst)): + if '__user__' in lst[i]['name']: + cb_rules.buildcallbacks(lst[i]) + else: + if 'use' in lst[i]: + for u in lst[i]['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(lst[i]['name']) + modules.append(lst[i]) + mnames.append(lst[i]['name']) + ret = {} + for i in range(len(mnames)): + if mnames[i] in isusedby: + outmess('\tSkipping module "%s" which is used by %s.\n' % ( + mnames[i], ','.join(['"%s"' % s for s in isusedby[mnames[i]]]))) + else: + um = [] + if 'use' in modules[i]: + for u in modules[i]['use'].keys(): + if u in isusedby and u in mnames: + um.append(modules[mnames.index(u)]) + else: + outmess( + '\tModule "%s" uses nonexisting "%s" which will be ignored.\n' % (mnames[i], u)) + ret[mnames[i]] = {} + dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um)) + return ret + + +def dict_append(d_out, d_in): + for (k, v) in d_in.items(): + if k not in d_out: + d_out[k] = [] + if isinstance(v, list): + d_out[k] = d_out[k] + v + else: + d_out[k].append(v) + + +def run_main(comline_list): + """ + Equivalent to running:: + + f2py + + where ``=string.join(,' ')``, but in Python. Unless + ``-h`` is used, this function returns a dictionary containing + information on generated modules and their dependencies on source + files. For example, the command ``f2py -m scalar scalar.f`` can be + executed from Python as follows + + You cannot build extension modules with this function, that is, + using ``-c`` is not allowed. Use ``compile`` command instead + + Examples + -------- + .. include:: run_main_session.dat + :literal: + + """ + crackfortran.reset_global_f2py_vars() + f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__)) + fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') + fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') + files, options = scaninputline(comline_list) + auxfuncs.options = options + capi_maps.load_f2cmap_file(options['f2cmap_file']) + postlist = callcrackfortran(files, options) + isusedby = {} + for i in range(len(postlist)): + if 'use' in postlist[i]: + for u in postlist[i]['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(postlist[i]['name']) + for i in range(len(postlist)): + if postlist[i]['block'] == 'python module' and '__user__' in postlist[i]['name']: + if postlist[i]['name'] in isusedby: + # if not quiet: + outmess('Skipping Makefile build for module "%s" which is used by %s\n' % ( + postlist[i]['name'], ','.join(['"%s"' % s for s in isusedby[postlist[i]['name']]]))) + if 'signsfile' in options: + if options['verbose'] > 1: + outmess( + 'Stopping. Edit the signature file and then run f2py on the signature file: ') + outmess('%s %s\n' % + (os.path.basename(sys.argv[0]), options['signsfile'])) + return + for i in range(len(postlist)): + if postlist[i]['block'] != 'python module': + if 'python module' not in options: + errmess( + 'Tip: If your original code is Fortran source then you must use -m option.\n') + raise TypeError('All blocks must be python module blocks but got %s' % ( + repr(postlist[i]['block']))) + auxfuncs.debugoptions = options['debug'] + f90mod_rules.options = options + auxfuncs.wrapfuncs = options['wrapfuncs'] + + ret = buildmodules(postlist) + + for mn in ret.keys(): + dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc}) + return ret + + +def filter_files(prefix, suffix, files, remove_prefix=None): + """ + Filter files by prefix and suffix. + """ + filtered, rest = [], [] + match = re.compile(prefix + r'.*' + suffix + r'\Z').match + if remove_prefix: + ind = len(prefix) + else: + ind = 0 + for file in [x.strip() for x in files]: + if match(file): + filtered.append(file[ind:]) + else: + rest.append(file) + return filtered, rest + + +def get_prefix(module): + p = os.path.dirname(os.path.dirname(module.__file__)) + return p + + +def run_compile(): + """ + Do it all in one call! + """ + import tempfile + + i = sys.argv.index('-c') + del sys.argv[i] + + remove_build_dir = 0 + try: + i = sys.argv.index('--build-dir') + except ValueError: + i = None + if i is not None: + build_dir = sys.argv[i + 1] + del sys.argv[i + 1] + del sys.argv[i] + else: + remove_build_dir = 1 + build_dir = tempfile.mkdtemp() + + _reg1 = re.compile(r'--link-') + sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] + if sysinfo_flags: + sysinfo_flags = [f[7:] for f in sysinfo_flags] + + _reg2 = re.compile( + r'--((no-|)(wrap-functions|lower)|debug-capi|quiet)|-include') + f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] + f2py_flags2 = [] + fl = 0 + for a in sys.argv[1:]: + if a in ['only:', 'skip:']: + fl = 1 + elif a == ':': + fl = 0 + if fl or a == ':': + f2py_flags2.append(a) + if f2py_flags2 and f2py_flags2[-1] != ':': + f2py_flags2.append(':') + f2py_flags.extend(f2py_flags2) + + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] + _reg3 = re.compile( + r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') + flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in flib_flags] + _reg4 = re.compile( + r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in fc_flags] + + if 1: + del_list = [] + for s in flib_flags: + v = '--fcompiler=' + if s[:len(v)] == v: + from numpy.distutils import fcompiler + fcompiler.load_all_fcompiler_classes() + allowed_keys = list(fcompiler.fcompiler_class.keys()) + nv = ov = s[len(v):].lower() + if ov not in allowed_keys: + vmap = {} # XXX + try: + nv = vmap[ov] + except KeyError: + if ov not in vmap.values(): + print('Unknown vendor: "%s"' % (s[len(v):])) + nv = ov + i = flib_flags.index(s) + flib_flags[i] = '--fcompiler=' + nv + continue + for s in del_list: + i = flib_flags.index(s) + del flib_flags[i] + assert len(flib_flags) <= 2, repr(flib_flags) + + _reg5 = re.compile(r'--(verbose)') + setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in setup_flags] + + if '--quiet' in f2py_flags: + setup_flags.append('--quiet') + + modulename = 'untitled' + sources = sys.argv[1:] + + for optname in ['--include_paths', '--include-paths', '--f2cmap']: + if optname in sys.argv: + i = sys.argv.index(optname) + f2py_flags.extend(sys.argv[i:i + 2]) + del sys.argv[i + 1], sys.argv[i] + sources = sys.argv[1:] + + if '-m' in sys.argv: + i = sys.argv.index('-m') + modulename = sys.argv[i + 1] + del sys.argv[i + 1], sys.argv[i] + sources = sys.argv[1:] + else: + from numpy.distutils.command.build_src import get_f2py_modulename + pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources) + sources = pyf_files + sources + for f in pyf_files: + modulename = get_f2py_modulename(f) + if modulename: + break + + extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources) + include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1) + library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) + libraries, sources = filter_files('-l', '', sources, remove_prefix=1) + undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) + define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) + for i in range(len(define_macros)): + name_value = define_macros[i].split('=', 1) + if len(name_value) == 1: + name_value.append(None) + if len(name_value) == 2: + define_macros[i] = tuple(name_value) + else: + print('Invalid use of -D:', name_value) + + from numpy.distutils.system_info import get_info + + num_info = {} + if num_info: + include_dirs.extend(num_info.get('include_dirs', [])) + + from numpy.distutils.core import setup, Extension + ext_args = {'name': modulename, 'sources': sources, + 'include_dirs': include_dirs, + 'library_dirs': library_dirs, + 'libraries': libraries, + 'define_macros': define_macros, + 'undef_macros': undef_macros, + 'extra_objects': extra_objects, + 'f2py_options': f2py_flags, + } + + if sysinfo_flags: + from numpy.distutils.misc_util import dict_append + for n in sysinfo_flags: + i = get_info(n) + if not i: + outmess('No %s resources found in system' + ' (try `f2py --help-link`)\n' % (repr(n))) + dict_append(ext_args, **i) + + ext = Extension(**ext_args) + sys.argv = [sys.argv[0]] + setup_flags + sys.argv.extend(['build', + '--build-temp', build_dir, + '--build-base', build_dir, + '--build-platlib', '.', + # disable CCompilerOpt + '--disable-optimization']) + if fc_flags: + sys.argv.extend(['config_fc'] + fc_flags) + if flib_flags: + sys.argv.extend(['build_ext'] + flib_flags) + + setup(ext_modules=[ext]) + + if remove_build_dir and os.path.exists(build_dir): + import shutil + outmess('Removing build directory %s\n' % (build_dir)) + shutil.rmtree(build_dir) + + +def main(): + if '--help-link' in sys.argv[1:]: + sys.argv.remove('--help-link') + from numpy.distutils.system_info import show_all + show_all() + return + + # Probably outdated options that were not working before 1.16 + if '--g3-numpy' in sys.argv[1:]: + sys.stderr.write("G3 f2py support is not implemented, yet.\\n") + sys.exit(1) + elif '--2e-numeric' in sys.argv[1:]: + sys.argv.remove('--2e-numeric') + elif '--2e-numarray' in sys.argv[1:]: + # Note that this errors becaust the -DNUMARRAY argument is + # not recognized. Just here for back compatibility and the + # error message. + sys.argv.append("-DNUMARRAY") + sys.argv.remove('--2e-numarray') + elif '--2e-numpy' in sys.argv[1:]: + sys.argv.remove('--2e-numpy') + else: + pass + + if '-c' in sys.argv[1:]: + run_compile() + else: + run_main(sys.argv[1:]) diff --git a/MLPY/Lib/site-packages/numpy/f2py/f2py_testing.py b/MLPY/Lib/site-packages/numpy/f2py/f2py_testing.py new file mode 100644 index 0000000000000000000000000000000000000000..ea0f09127e32863598bb92cecbe269828a532e26 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/f2py_testing.py @@ -0,0 +1,46 @@ +import sys +import re + +from numpy.testing import jiffies, memusage + + +def cmdline(): + m = re.compile(r'\A\d+\Z') + args = [] + repeat = 1 + for a in sys.argv[1:]: + if m.match(a): + repeat = eval(a) + else: + args.append(a) + f2py_opts = ' '.join(args) + return repeat, f2py_opts + + +def run(runtest, test_functions, repeat=1): + l = [(t, repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] + start_memusage = memusage() + diff_memusage = None + start_jiffies = jiffies() + i = 0 + while i < repeat: + i += 1 + for t, fname in l: + runtest(t) + if start_memusage is None: + continue + if diff_memusage is None: + diff_memusage = memusage() - start_memusage + else: + diff_memusage2 = memusage() - start_memusage + if diff_memusage2 != diff_memusage: + print('memory usage change at step %i:' % i, + diff_memusage2 - diff_memusage, + fname) + diff_memusage = diff_memusage2 + current_memusage = memusage() + print('run', repeat * len(test_functions), 'tests', + 'in %.2f seconds' % ((jiffies() - start_jiffies) / 100.0)) + if start_memusage: + print('initial virtual memory size:', start_memusage, 'bytes') + print('current virtual memory size:', current_memusage, 'bytes') diff --git a/MLPY/Lib/site-packages/numpy/f2py/f90mod_rules.py b/MLPY/Lib/site-packages/numpy/f2py/f90mod_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..7764b2859d3cccb646c847a51b9d2ee7617c42a0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/f90mod_rules.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python3 +""" + +Build F90 module support for f2py2e. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/02/03 19:30:23 $ +Pearu Peterson + +""" +__version__ = "$Revision: 1.27 $"[10:-1] + +f2py_version = 'See `f2py -v`' + +import numpy as np + +from . import capi_maps +from . import func2subr +from .crackfortran import undo_rmbadname, undo_rmbadname1 + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +options = {} + + +def findf90modules(m): + if ismodule(m): + return [m] + if not hasbody(m): + return [] + ret = [] + for b in m['body']: + if ismodule(b): + ret.append(b) + else: + ret = ret + findf90modules(b) + return ret + +fgetdims1 = """\ + external f2pysetdata + logical ns + integer r,i + integer(%d) s(*) + ns = .FALSE. + if (allocated(d)) then + do i=1,r + if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then + ns = .TRUE. + end if + end do + if (ns) then + deallocate(d) + end if + end if + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + +fgetdims2 = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + end if + flag = 1 + call f2pysetdata(d,allocated(d))""" + +fgetdims2_sa = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + !s(r) must be equal to len(d(1)) + end if + flag = 2 + call f2pysetdata(d,allocated(d))""" + + +def buildhooks(pymod): + from . import rules + ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [], + 'need': ['F_FUNC', 'arrayobject.h'], + 'separatorsfor': {'includes0': '\n', 'includes': '\n'}, + 'docs': ['"Fortran 90/95 modules:\\n"'], + 'latexdoc': []} + fhooks = [''] + + def fadd(line, s=fhooks): + s[0] = '%s\n %s' % (s[0], line) + doc = [''] + + def dadd(line, s=doc): + s[0] = '%s\n%s' % (s[0], line) + for m in findf90modules(pymod): + sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ + m['name']], [] + sargsp = [] + ifargs = [] + mfargs = [] + if hasbody(m): + for b in m['body']: + notvars.append(b['name']) + for n in m['vars'].keys(): + var = m['vars'][n] + if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + onlyvars.append(n) + mfargs.append(n) + outmess('\t\tConstructing F90 module support for "%s"...\n' % + (m['name'])) + if onlyvars: + outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) + chooks = [''] + + def cadd(line, s=chooks): + s[0] = '%s\n%s' % (s[0], line) + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = '%s\n%s' % (s[0], line) + + vrd = capi_maps.modsign2map(m) + cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) + dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) + if hasnote(m): + note = m['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(note) + if onlyvars: + dadd('\\begin{description}') + for n in onlyvars: + var = m['vars'][n] + modobjs.append(n) + ct = capi_maps.getctype(var) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, var) + dms = dm['dims'].replace('*', '-1').strip() + dms = dms.replace(':', '-1').strip() + if not dms: + dms = '-1' + use_fgetdims2 = fgetdims2 + if isstringarray(var): + if 'charselector' in var and 'len' in var['charselector']: + cadd('\t{"%s",%s,{{%s,%s}},%s},' + % (undo_rmbadname1(n), dm['rank'], dms, var['charselector']['len'], at)) + use_fgetdims2 = fgetdims2_sa + else: + cadd('\t{"%s",%s,{{%s}},%s},' % + (undo_rmbadname1(n), dm['rank'], dms, at)) + else: + cadd('\t{"%s",%s,{{%s}},%s},' % + (undo_rmbadname1(n), dm['rank'], dms, at)) + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, var))) + if hasnote(var): + note = var['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd('--- %s' % (note)) + if isallocatable(var): + fargs.append('f2py_%s_getdims_%s' % (m['name'], n)) + efargs.append(fargs[-1]) + sargs.append( + 'void (*%s)(int*,int*,void(*)(char*,int*),int*)' % (n)) + sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)') + iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n)) + fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1])) + fadd('use %s, only: d => %s\n' % + (m['name'], undo_rmbadname1(n))) + fadd('integer flag\n') + fhooks[0] = fhooks[0] + fgetdims1 + dms = range(1, int(dm['rank']) + 1) + fadd(' allocate(d(%s))\n' % + (','.join(['s(%s)' % i for i in dms]))) + fhooks[0] = fhooks[0] + use_fgetdims2 + fadd('end subroutine %s' % (fargs[-1])) + else: + fargs.append(n) + sargs.append('char *%s' % (n)) + sargsp.append('char*') + iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n)) + if onlyvars: + dadd('\\end{description}') + if hasbody(m): + for b in m['body']: + if not isroutine(b): + outmess("f90mod_rules.buildhooks:" + f" skipping {b['block']} {b['name']}\n") + continue + modobjs.append('%s()' % (b['name'])) + b['modulename'] = m['name'] + api, wrap = rules.buildapi(b) + if isfunction(b): + fhooks[0] = fhooks[0] + wrap + fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + else: + if wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) + else: + fargs.append(b['name']) + mfargs.append(fargs[-1]) + api['externroutines'] = [] + ar = applyrules(api, vrd) + ar['docs'] = [] + ar['docshort'] = [] + ret = dictappend(ret, ar) + cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},' % + (b['name'], m['name'], b['name'], m['name'], b['name'])) + sargs.append('char *%s' % (b['name'])) + sargsp.append('char *') + iadd('\tf2py_%s_def[i_f2py++].data = %s;' % + (m['name'], b['name'])) + cadd('\t{NULL}\n};\n') + iadd('}') + ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( + m['name'], ','.join(sargs), ihooks[0]) + if '_' in m['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' + % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) + iadd('static void f2py_init_%s(void) {' % (m['name'])) + iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, m['name'], m['name'].upper(), m['name'])) + iadd('}\n') + ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks + ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( + m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + fadd('') + fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name'])) + if mfargs: + for a in undo_rmbadname(mfargs): + fadd('use %s, only : %s' % (m['name'], a)) + if ifargs: + fadd(' '.join(['interface'] + ifargs)) + fadd('end interface') + fadd('external f2pysetupfunc') + if efargs: + for a in undo_rmbadname(efargs): + fadd('external %s' % (a)) + fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs)))) + fadd('end subroutine f2pyinit%s\n' % (m['name'])) + + dadd('\n'.join(ret['latexdoc']).replace( + r'\subsection{', r'\subsubsection{')) + + ret['latexdoc'] = [] + ret['docs'].append('"\t%s --- %s"' % (m['name'], + ','.join(undo_rmbadname(modobjs)))) + + ret['routine_defs'] = '' + ret['doc'] = [] + ret['docshort'] = [] + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fhooks[0] diff --git a/MLPY/Lib/site-packages/numpy/f2py/func2subr.py b/MLPY/Lib/site-packages/numpy/f2py/func2subr.py new file mode 100644 index 0000000000000000000000000000000000000000..e341bbc85827c552d911041df59710844cfee59b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/func2subr.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python3 +""" + +Rules for building C/API module with f2py2e. + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2004/11/26 11:13:06 $ +Pearu Peterson + +""" +__version__ = "$Revision: 1.16 $"[10:-1] + +f2py_version = 'See `f2py -v`' + +import copy + +from .auxfuncs import ( + getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in, + isintent_out, islogicalfunction, ismoduleroutine, isscalar, + issubroutine, issubroutine_wrap, outmess, show +) + + +def var2fixfortran(vars, a, fa=None, f90mode=None): + if fa is None: + fa = a + if a not in vars: + show(vars) + outmess('var2fixfortran: No definition for argument "%s".\n' % a) + return '' + if 'typespec' not in vars[a]: + show(vars[a]) + outmess('var2fixfortran: No typespec for argument "%s".\n' % a) + return '' + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = '%s(%s)' % (vardef, vars[a]['typename']) + selector = {} + lk = '' + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + lk = 'kind' + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + lk = 'len' + if '*' in selector: + if f90mode: + if selector['*'] in ['*', ':', '(*)']: + vardef = '%s(len=*)' % (vardef) + else: + vardef = '%s(%s=%s)' % (vardef, lk, selector['*']) + else: + if selector['*'] in ['*', ':']: + vardef = '%s*(%s)' % (vardef, selector['*']) + else: + vardef = '%s*%s' % (vardef, selector['*']) + else: + if 'len' in selector: + vardef = '%s(len=%s' % (vardef, selector['len']) + if 'kind' in selector: + vardef = '%s,kind=%s)' % (vardef, selector['kind']) + else: + vardef = '%s)' % (vardef) + elif 'kind' in selector: + vardef = '%s(kind=%s)' % (vardef, selector['kind']) + + vardef = '%s %s' % (vardef, fa) + if 'dimension' in vars[a]: + vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension'])) + return vardef + + +def createfuncwrapper(rout, signature=0): + assert isfunction(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = '%s\n %s' % (ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + newname = '%sf2pywrap' % (name) + + if newname not in vars: + vars[newname] = vars[name] + args = [newname] + rout['args'][1:] + else: + args = [newname] + rout['args'] + + l = var2fixfortran(vars, name, newname, f90mode) + if l[:13] == 'character*(*)': + if f90mode: + l = 'character(len=10)' + l[13:] + else: + l = 'character*10' + l[13:] + charselect = vars[name]['charselector'] + if charselect.get('*', '') == '(*)': + charselect['*'] = '10' + sargs = ', '.join(args) + if f90mode: + add('subroutine f2pywrap_%s_%s (%s)' % + (rout['modulename'], name, sargs)) + if not signature: + add('use %s, only : %s' % (rout['modulename'], fortranname)) + else: + add('subroutine f2pywrap%s (%s)' % (name, sargs)) + if not need_interface: + add('external %s' % (fortranname)) + l = l + ', ' + fortranname + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + args = args[1:] + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s' % (a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isintent_in(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + add(l) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + if islogicalfunction(rout): + add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs)) + else: + add('%s = %s(%s)' % (newname, fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + else: + add('end') + return ret[0] + + +def createsubrwrapper(rout, signature=0): + assert issubroutine(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = '%s\n %s' % (ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + + args = rout['args'] + + sargs = ', '.join(args) + if f90mode: + add('subroutine f2pywrap_%s_%s (%s)' % + (rout['modulename'], name, sargs)) + if not signature: + add('use %s, only : %s' % (rout['modulename'], fortranname)) + else: + add('subroutine f2pywrap%s (%s)' % (name, sargs)) + if not need_interface: + add('external %s' % (fortranname)) + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s' % (a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' in line: + continue + add(line) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + add('call %s(%s)' % (fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + else: + add('end') + return ret[0] + + +def assubr(rout): + if isfunction_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( + name, fortranname)) + rout = copy.copy(rout) + fname = name + rname = fname + if 'result' in rout: + rname = rout['result'] + rout['vars'][fname] = rout['vars'][rname] + fvar = rout['vars'][fname] + if not isintent_out(fvar): + if 'intent' not in fvar: + fvar['intent'] = [] + fvar['intent'].append('out') + flag = 1 + for i in fvar['intent']: + if i.startswith('out='): + flag = 0 + break + if flag: + fvar['intent'].append('out=%s' % (rname)) + rout['args'][:] = [fname] + rout['args'] + return rout, createfuncwrapper(rout) + if issubroutine_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' % ( + name, fortranname)) + rout = copy.copy(rout) + return rout, createsubrwrapper(rout) + return rout, '' diff --git a/MLPY/Lib/site-packages/numpy/f2py/rules.py b/MLPY/Lib/site-packages/numpy/f2py/rules.py new file mode 100644 index 0000000000000000000000000000000000000000..865b8c7c6ebcd4e6d5e9c0b7a1ae91ecfe32de2d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/rules.py @@ -0,0 +1,1460 @@ +#!/usr/bin/env python3 +""" + +Rules for building C/API module with f2py2e. + +Here is a skeleton of a new wrapper function (13Dec2001): + +wrapper_function(args) + declarations + get_python_arguments, say, `a' and `b' + + get_a_from_python + if (successful) { + + get_b_from_python + if (successful) { + + callfortran + if (successful) { + + put_a_to_python + if (successful) { + + put_b_to_python + if (successful) { + + buildvalue = ... + + } + + } + + } + + } + cleanup_b + + } + cleanup_a + + return buildvalue + +Copyright 1999,2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2005/08/30 08:58:42 $ +Pearu Peterson + +""" +import os +import time +import copy + +# __version__.version is now the same as the NumPy version +from . import __version__ +f2py_version = __version__.version +numpy_version = __version__.version + +from .auxfuncs import ( + applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, + hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote, + isarray, isarrayofstrings, iscomplex, iscomplexarray, + iscomplexfunction, iscomplexfunction_warn, isdummyroutine, isexternal, + isfunction, isfunction_wrap, isint1array, isintent_aux, isintent_c, + isintent_callback, isintent_copy, isintent_hide, isintent_inout, + isintent_nothide, isintent_out, isintent_overwrite, islogical, + islong_complex, islong_double, islong_doublefunction, islong_long, + islong_longfunction, ismoduleroutine, isoptional, isrequired, isscalar, + issigned_long_longarray, isstring, isstringarray, isstringfunction, + issubroutine, issubroutine_wrap, isthreadsafe, isunsigned, + isunsigned_char, isunsigned_chararray, isunsigned_long_long, + isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, + l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper +) + +from . import capi_maps +from . import cfuncs +from . import common_rules +from . import use_rules +from . import f90mod_rules +from . import func2subr + +options = {} +sepdict = {} +#for k in ['need_cfuncs']: sepdict[k]=',' +for k in ['decl', + 'frompyobj', + 'cleanupfrompyobj', + 'topyarr', 'method', + 'pyobjfrom', 'closepyobjfrom', + 'freemem', + 'userincludes', + 'includes0', 'includes', 'typedefs', 'typedefs_generated', + 'cppmacros', 'cfuncs', 'callbacks', + 'latexdoc', + 'restdoc', + 'routine_defs', 'externroutines', + 'initf2pywraphooks', + 'commonhooks', 'initcommonhooks', + 'f90modhooks', 'initf90modhooks']: + sepdict[k] = '\n' + +#################### Rules for C/API module ################# + +generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) +module_rules = { + 'modulebody': """\ +/* File: #modulename#module.c + * This file is auto-generated with f2py (version:#f2py_version#). + * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, + * written by Pearu Peterson . + * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ + * Do not edit this file directly unless you know what you are doing!!! + */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +""" + gentitle("See f2py2e/cfuncs.py: includes") + """ +#includes# +#includes0# + +""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """ +static PyObject *#modulename#_error; +static PyObject *#modulename#_module; + +""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """ +#typedefs# + +""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """ +#typedefs_generated# + +""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """ +#cppmacros# + +""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """ +#cfuncs# + +""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """ +#userincludes# + +""" + gentitle("See f2py2e/capi_rules.py: usercode") + """ +#usercode# + +/* See f2py2e/rules.py */ +#externroutines# + +""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """ +#usercode1# + +""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """ +#callbacks# + +""" + gentitle("See f2py2e/rules.py: buildapi") + """ +#body# + +""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """ +#f90modhooks# + +""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """ + +""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """ +#commonhooks# + +""" + gentitle("See f2py2e/rules.py") + """ + +static FortranDataDef f2py_routine_defs[] = { +#routine_defs# +\t{NULL} +}; + +static PyMethodDef f2py_module_methods[] = { +#pymethoddef# +\t{NULL,NULL} +}; + +static struct PyModuleDef moduledef = { +\tPyModuleDef_HEAD_INIT, +\t"#modulename#", +\tNULL, +\t-1, +\tf2py_module_methods, +\tNULL, +\tNULL, +\tNULL, +\tNULL +}; + +PyMODINIT_FUNC PyInit_#modulename#(void) { +\tint i; +\tPyObject *m,*d, *s, *tmp; +\tm = #modulename#_module = PyModule_Create(&moduledef); +\tPy_SET_TYPE(&PyFortran_Type, &PyType_Type); +\timport_array(); +\tif (PyErr_Occurred()) +\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} +\td = PyModule_GetDict(m); +\ts = PyUnicode_FromString(\"#f2py_version#\"); +\tPyDict_SetItemString(d, \"__version__\", s); +\tPy_DECREF(s); +\ts = PyUnicode_FromString( +\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); +\tPyDict_SetItemString(d, \"__doc__\", s); +\tPy_DECREF(s); +\ts = PyUnicode_FromString(\"""" + numpy_version + """\"); +\tPyDict_SetItemString(d, \"__f2py_numpy_version__\", s); +\tPy_DECREF(s); +\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); +\t/* +\t * Store the error object inside the dict, so that it could get deallocated. +\t * (in practice, this is a module, so it likely will not and cannot.) +\t */ +\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); +\tPy_DECREF(#modulename#_error); +\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) { +\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); +\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); +\t\tPy_DECREF(tmp); +\t} +#initf2pywraphooks# +#initf90modhooks# +#initcommonhooks# +#interface_usercode# + +#ifdef F2PY_REPORT_ATEXIT +\tif (! PyErr_Occurred()) +\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); +#endif +\treturn m; +} +#ifdef __cplusplus +} +#endif +""", + 'separatorsfor': {'latexdoc': '\n\n', + 'restdoc': '\n\n'}, + 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n', + '#modnote#\n', + '#latexdoc#'], + 'restdoc': ['Module #modulename#\n' + '=' * 80, + '\n#restdoc#'] +} + +defmod_rules = [ + {'body': '/*eof body*/', + 'method': '/*eof method*/', + 'externroutines': '/*eof externroutines*/', + 'routine_defs': '/*eof routine_defs*/', + 'initf90modhooks': '/*eof initf90modhooks*/', + 'initf2pywraphooks': '/*eof initf2pywraphooks*/', + 'initcommonhooks': '/*eof initcommonhooks*/', + 'latexdoc': '', + 'restdoc': '', + 'modnote': {hasnote: '#note#', l_not(hasnote): ''}, + } +] + +routine_rules = { + 'separatorsfor': sepdict, + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; +/* #declfortranroutine# */ +static PyObject *#apiname#(const PyObject *capi_self, + PyObject *capi_args, + PyObject *capi_keywds, + #functype# (*f2py_func)(#callprotoargument#)) { + PyObject * volatile capi_buildvalue = NULL; + volatile int f2py_success = 1; +#decl# + static char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; +#usercode# +#routdebugenter# +#ifdef F2PY_REPORT_ATEXIT +f2py_start_clock(); +#endif + if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ + \"#argformat#|#keyformat##xaformat#:#pyname#\",\\ + capi_kwlist#args_capi##keys_capi##keys_xa#))\n return NULL; +#frompyobj# +/*end of frompyobj*/ +#ifdef F2PY_REPORT_ATEXIT +f2py_start_call_clock(); +#endif +#callfortranroutine# +if (PyErr_Occurred()) + f2py_success = 0; +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_call_clock(); +#endif +/*end of callfortranroutine*/ + if (f2py_success) { +#pyobjfrom# +/*end of pyobjfrom*/ + CFUNCSMESS(\"Building return value.\\n\"); + capi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); +/*closepyobjfrom*/ +#closepyobjfrom# + } /*if (f2py_success) after callfortranroutine*/ +/*cleanupfrompyobj*/ +#cleanupfrompyobj# + if (capi_buildvalue == NULL) { +#routdebugfailure# + } else { +#routdebugleave# + } + CFUNCSMESS(\"Freeing memory.\\n\"); +#freemem# +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_clock(); +#endif + return capi_buildvalue; +} +#endtitle# +""", + 'routine_defs': '#routine_def#', + 'initf2pywraphooks': '#initf2pywraphook#', + 'externroutines': '#declfortranroutine#', + 'doc': '#docreturn##name#(#docsignature#)', + 'docshort': '#docreturn##name#(#docsignatureshort#)', + 'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n', + 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], + 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, + 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', + """ +\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} +#routnote# + +#latexdocstrsigns# +"""], + 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80, + + ] +} + +################## Rules for C/API function ############## + +rout_rules = [ + { # Init + 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', + 'routdebugleave': '\n', 'routdebugfailure': '\n', + 'setjmpbuf': ' || ', + 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', + 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', + 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', + 'freemem': '/*freemem*/', + 'docsignshort': '', 'docsignoptshort': '', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\\nParameters\\n----------', + 'docstropt': '\\nOther Parameters\\n----------------', + 'docstrout': '\\nReturns\\n-------', + 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'args_capi': '', 'keys_capi': '', 'functype': '', + 'frompyobj': '/*frompyobj*/', + # this list will be reversed + 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], + 'pyobjfrom': '/*pyobjfrom*/', + # this list will be reversed + 'closepyobjfrom': ['/*end of closepyobjfrom*/'], + 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', + 'routdebugenter': '/*routdebugenter*/', + 'routdebugfailure': '/*routdebugfailure*/', + 'callfortranroutine': '/*callfortranroutine*/', + 'argformat': '', 'keyformat': '', 'need_cfuncs': '', + 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', + 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', + 'initf2pywraphook': '', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { + 'apiname': 'f2py_rout_#modulename#_#name#', + 'pyname': '#modulename#.#name#', + 'decl': '', + '_check': l_not(ismoduleroutine) + }, { + 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#', + 'pyname': '#modulename#.#f90modulename#.#name#', + 'decl': '', + '_check': ismoduleroutine + }, { # Subroutine + 'functype': 'void', + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);', + ismoduleroutine: '', + isdummyroutine: '' + }, + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, + 'callfortranroutine': [ + {debugcapi: [ + """\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ +\t\tif (#setjmpbuf#) { +\t\t\tf2py_success = 0; +\t\t} else {"""}, + {isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'}, + {hascallstatement: '''\t\t\t\t#callstatement#; +\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : '\t\t\t\t(*f2py_func)(#callfortran#);'}, + {isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'}, + {hasexternals: """\t\t}"""} + ], + '_check': l_and(issubroutine, l_not(issubroutine_wrap)), + }, { # Wrapped function + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ +\tif (#setjmpbuf#) { +\t\tf2py_success = 0; +\t} else {"""}, + {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : '\t(*f2py_func)(#callfortran#);'}, + {hascallstatement: + '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, + {hasexternals: '\t}'} + ], + '_check': isfunction_wrap, + }, { # Wrapped subroutine + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern void #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ +\tif (#setjmpbuf#) { +\t\tf2py_success = 0; +\t} else {"""}, + {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : '\t(*f2py_func)(#callfortran#);'}, + {hascallstatement: + '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, + {hasexternals: '\t}'} + ], + '_check': issubroutine_wrap, + }, { # Function + 'functype': '#ctype#', + 'docreturn': {l_not(isintent_hide): '#rname#,'}, + 'docstrout': '#pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasresultnote: '--- #resultnote#'}], + 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ +#ifdef USESCOMPAQFORTRAN +\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); +#else +\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +#endif +"""}, + {l_and(debugcapi, l_not(isstringfunction)): """\ +\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +"""} + ], + '_check': l_and(isfunction, l_not(isfunction_wrap)) + }, { # Scalar function + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', + isdummyroutine: '' + }, + 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};', + l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'}, + {iscomplexfunction: + '\tPyObject *#name#_return_value_capi = Py_None;'} + ], + 'callfortranroutine': [ + {hasexternals: """\ +\tif (#setjmpbuf#) { +\t\tf2py_success = 0; +\t} else {"""}, + {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'}, + {hascallstatement: '''\t#callstatement#; +/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ +'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : '\t#name#_return_value = (*f2py_func)(#callfortran#);'}, + {isthreadsafe: '\tPy_END_ALLOW_THREADS'}, + {hasexternals: '\t}'}, + {l_and(debugcapi, iscomplexfunction) + : '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + 'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, + 'need': [{l_not(isdummyroutine): 'F_FUNC'}, + {iscomplexfunction: 'pyobj_from_#ctype#1'}, + {islong_longfunction: 'long_long'}, + {islong_doublefunction: 'long_double'}], + 'returnformat': {l_not(isintent_hide): '#rformat#'}, + 'return': {iscomplexfunction: ',#name#_return_value_capi', + l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'}, + '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) + }, { # String function # in use for --no-wrap + 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): + '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c): + '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' + }, + 'decl': ['\t#ctype# #name#_return_value = NULL;', + '\tint #name#_return_value_len = 0;'], + 'callfortran':'#name#_return_value,#name#_return_value_len,', + 'callfortranroutine':['\t#name#_return_value_len = #rlength#;', + '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {', + '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");', + '\t\tf2py_success = 0;', + '\t} else {', + "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';", + '\t}', + '\tif (f2py_success) {', + {hasexternals: """\ +\t\tif (#setjmpbuf#) { +\t\t\tf2py_success = 0; +\t\t} else {"""}, + {isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'}, + """\ +#ifdef USESCOMPAQFORTRAN +\t\t(*f2py_func)(#callcompaqfortran#); +#else +\t\t(*f2py_func)(#callfortran#); +#endif +""", + {isthreadsafe: '\t\tPy_END_ALLOW_THREADS'}, + {hasexternals: '\t\t}'}, + {debugcapi: + '\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, + '\t} /* if (f2py_success) after (string)malloc */', + ], + 'returnformat': '#rformat#', + 'return': ',#name#_return_value', + 'freemem': '\tSTRINGFREE(#name#_return_value);', + 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], + '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + }, + { # Debugging + 'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', + 'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', + 'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', + '_check': debugcapi + } +] + +################ Rules for arguments ################## + +typedef_need_dict = {islong_long: 'long_long', + islong_double: 'long_double', + islong_complex: 'complex_long_double', + isunsigned_char: 'unsigned_char', + isunsigned_short: 'unsigned_short', + isunsigned: 'unsigned', + isunsigned_long_long: 'unsigned_long_long', + isunsigned_chararray: 'unsigned_char', + isunsigned_shortarray: 'unsigned_short', + isunsigned_long_longarray: 'unsigned_long_long', + issigned_long_longarray: 'long_long', + } + +aux_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': ['\t/* Processing auxiliary variable #varname# */', + {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + 'need': typedef_need_dict, + }, + # Scalars (not complex) + { # Common + 'decl': '\t#ctype# #varname# = 0;', + 'need': {hasinitvalue: 'math.h'}, + 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, + { + 'return': ',#varname#', + 'docstrout': '#pydocsignout#', + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': l_and(isscalar, l_not(iscomplex), isintent_out), + }, + # Complex scalars + { # Common + 'decl': '\t#ctype# #varname#;', + 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': iscomplex + }, + # String + { # Common + 'decl': ['\t#ctype# #varname# = NULL;', + '\tint slen(#varname#);', + ], + 'need':['len..'], + '_check':isstring + }, + # Array + { # Common + 'decl': ['\t#ctype# *#varname# = NULL;', + '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + '\tconst int #varname#_Rank = #rank#;', + ], + 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': isunsigned_chararray, + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +arg_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': ['\t/* Processing variable #varname# */', + {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + '_depend': '', + 'need': typedef_need_dict, + }, + # Doc signatures + { + 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'}, + 'docstrout': {isintent_out: '#pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'depend': '' + }, + # Required/Optional arguments + { + 'kwlist': '"#varname#",', + 'docsign': '#varname#,', + '_check': l_and(isintent_nothide, l_not(isoptional)) + }, + { + 'kwlistopt': '"#varname#",', + 'docsignopt': '#varname#=#showinit#,', + 'docsignoptshort': '#varname#,', + '_check': l_and(isintent_nothide, isoptional) + }, + # Docstring/BuildValue + { + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': isintent_out + }, + # Externals (call-back functions) + { # Common + 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'}, + 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'}, + 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'}, + 'docstrcbs': '#cbdocstr#', + 'latexdocstrcbs': '\\item[] #cblatexdocstr#', + 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, + 'decl': [' #cbname#_t #varname#_cb = { Py_None, NULL, 0 };', + ' #cbname#_t *#varname#_cb_ptr = &#varname#_cb;', + ' PyTupleObject *#varname#_xa_capi = NULL;', + {l_not(isintent_callback): + ' #cbname#_typedef #varname#_cptr;'} + ], + 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'}, + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'xaformat': {isintent_nothide: 'O!'}, + 'args_capi': {isrequired: ',&#varname#_cb.capi'}, + 'keys_capi': {isoptional: ',&#varname#_cb.capi'}, + 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi', + 'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))', + 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, + 'need': ['#cbname#', 'setjmp.h'], + '_check':isexternal + }, + { + 'frompyobj': [{l_not(isintent_callback): """\ +if(F2PyCapsule_Check(#varname#_cb.capi)) { + #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_cb.capi); +} else { + #varname#_cptr = #cbname#; +} +"""}, {isintent_callback: """\ +if (#varname#_cb.capi==Py_None) { + #varname#_cb.capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); + if (#varname#_cb.capi) { + if (#varname#_xa_capi==NULL) { + if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { + PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); + if (capi_tmp) { + #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + } + else { + #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); + } + if (#varname#_xa_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); + return NULL; + } + } + } + } + if (#varname#_cb.capi==NULL) { + PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); + return NULL; + } +} +"""}, + """\ + if (create_cb_arglist(#varname#_cb.capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#varname#_cb.nofargs,&#varname#_cb.args_capi,\"failed in processing argument list for call-back #varname#.\")) { +""", + {debugcapi: ["""\ + fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs); + CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""", + {l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, + """\ + CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);""", + ], + 'cleanupfrompyobj': + """\ + CFUNCSMESS(\"Restoring callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr); + Py_DECREF(#varname#_cb.args_capi); + }""", + 'need': ['SWAP', 'create_cb_arglist'], + '_check':isexternal, + '_depend':'' + }, + # Scalars (not complex) + { # Common + 'decl': '\t#ctype# #varname# = 0;', + 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, + 'return': {isintent_out: ',#varname#'}, + '_check': l_and(isscalar, l_not(iscomplex)) + }, { + 'need': {hasinitvalue: 'math.h'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, { # Not hidden + 'decl': '\tPyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': {isintent_inout: """\ +\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); +\tif (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide) + }, { + 'frompyobj': [ + # hasinitvalue... + # if pyobj is None: + # varname = init + # else + # from_pyobj(varname) + # + # isoptional and noinitvalue... + # if pyobj is not None: + # from_pyobj(varname) + # else: + # varname is uninitialized + # + # ... + # from_pyobj(varname) + # + {hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else', + '_depend': ''}, + {l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)', + '_depend': ''}, + {l_not(islogical): '''\ +\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); +\tif (f2py_success) {'''}, + {islogical: '''\ +\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); +\t\tf2py_success = 1; +\tif (f2py_success) {'''}, + ], + 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/', + 'need': {l_not(islogical): '#ctype#_from_pyobj'}, + '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), + '_depend': '' + }, { # Hidden + 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'}, + 'need': typedef_need_dict, + '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), + '_depend': '' + }, { # Common + 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + '_check': l_and(isscalar, l_not(iscomplex)), + '_depend': '' + }, + # Complex scalars + { # Common + 'decl': '\t#ctype# #varname#;', + 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, + 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'return': {isintent_out: ',#varname#_capi'}, + '_check': iscomplex + }, { # Not hidden + 'decl': '\tPyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + 'pyobjfrom': {isintent_inout: """\ +\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); +\t\tif (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, + '_check': l_and(iscomplex, isintent_nothide) + }, { + 'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, + {l_and(isoptional, l_not(hasinitvalue)) + : '\tif (#varname#_capi != Py_None)'}, + '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n\tif (f2py_success) {'], + 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/', + 'need': ['#ctype#_from_pyobj'], + '_check': l_and(iscomplex, isintent_nothide), + '_depend': '' + }, { # Hidden + 'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'}, + '_check': l_and(iscomplex, isintent_hide) + }, { + 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': l_and(iscomplex, isintent_hide), + '_depend': '' + }, { # Common + 'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, + 'need': ['pyobj_from_#ctype#1'], + '_check': iscomplex + }, { + 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + '_check': iscomplex, + '_depend': '' + }, + # String + { # Common + 'decl': ['\t#ctype# #varname# = NULL;', + '\tint slen(#varname#);', + '\tPyObject *#varname#_capi = Py_None;'], + 'callfortran':'#varname#,', + 'callfortranappend':'slen(#varname#),', + 'pyobjfrom':{debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + 'return': {isintent_out: ',#varname#'}, + 'need': ['len..'], # 'STRINGFREE'], + '_check':isstring + }, { # Common + 'frompyobj': """\ +\tslen(#varname#) = #length#; +\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\"); +\tif (f2py_success) {""", + 'cleanupfrompyobj': """\ +\t\tSTRINGFREE(#varname#); +\t} /*if (f2py_success) of #varname#*/""", + 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE'], + '_check':isstring, + '_depend':'' + }, { # Not hidden + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': {isintent_inout: '''\ +\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#); +\tif (f2py_success) {'''}, + 'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + '_check': l_and(isstring, isintent_nothide) + }, { # Hidden + '_check': l_and(isstring, isintent_hide) + }, { + 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + '_check': isstring, + '_depend': '' + }, + # Array + { # Common + 'decl': ['\t#ctype# *#varname# = NULL;', + '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + '\tconst int #varname#_Rank = #rank#;', + '\tPyArrayObject *capi_#varname#_tmp = NULL;', + '\tint capi_#varname#_intent = 0;', + ], + 'callfortran':'#varname#,', + 'return':{isintent_out: ',capi_#varname#_tmp'}, + 'need': 'len..', + '_check': isarray + }, { # intent(overwrite) array + 'decl': '\tint capi_overwrite_#varname# = 1;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=1,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', + '_check': l_and(isarray, isintent_overwrite), + }, { + 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_overwrite), + '_depend': '', + }, + { # intent(copy) array + 'decl': '\tint capi_overwrite_#varname# = 0;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=0,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', + '_check': l_and(isarray, isintent_copy), + }, { + 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_copy), + '_depend': '', + }, { + 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray, + '_depend': '' + }, { # Not hidden + 'decl': '\tPyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + '_check': l_and(isarray, isintent_nothide) + }, { + 'frompyobj': ['\t#setdims#;', + '\tcapi_#varname#_intent |= #intent#;', + {isintent_hide: + '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, + {isintent_nothide: + '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, + """\ +\tif (capi_#varname#_tmp == NULL) { +\t\tPyObject *exc, *val, *tb; +\t\tPyErr_Fetch(&exc, &val, &tb); +\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); +\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb); +\t} else { +\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp)); +""", + {hasinitvalue: [ + {isintent_nothide: + '\tif (#varname#_capi == Py_None) {'}, + {isintent_hide: '\t{'}, + {iscomplexarray: '\t\t#ctype# capi_c;'}, + """\ +\t\tint *_i,capi_i=0; +\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); +\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) { +\t\t\twhile ((_i = nextforcomb())) +\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */ +\t\t} else { +\t\t\tPyObject *exc, *val, *tb; +\t\t\tPyErr_Fetch(&exc, &val, &tb); +\t\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); +\t\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb); +\t\t\tf2py_success = 0; +\t\t} +\t} +\tif (f2py_success) {"""]}, + ], + 'cleanupfrompyobj': [ # note that this list will be reversed + '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', + {l_not(l_or(isintent_out, isintent_hide)): """\ +\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { +\t\tPy_XDECREF(capi_#varname#_tmp); }"""}, + {l_and(isintent_hide, l_not(isintent_out)) + : """\t\tPy_XDECREF(capi_#varname#_tmp);"""}, + {hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'}, + ], + '_check': isarray, + '_depend': '' + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': isunsigned_chararray, + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +################# Rules for checking ############### + +check_rules = [ + { + 'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, + 'need': 'len..' + }, { + 'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/', + 'need': 'CHECKSCALAR', + '_check': l_and(isscalar, l_not(iscomplex)), + '_break': '' + }, { + 'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/', + 'need': 'CHECKSTRING', + '_check': isstring, + '_break': '' + }, { + 'need': 'CHECKARRAY', + 'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/', + '_check': isarray, + '_break': '' + }, { + 'need': 'CHECKGENERIC', + 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/', + } +] + +########## Applying the rules. No need to modify what follows ############# + +#################### Build C/API module ####################### + + +def buildmodule(m, um): + """ + Return + """ + outmess('\tBuilding module "%s"...\n' % (m['name'])) + ret = {} + mod_rules = defmod_rules[:] + vrd = capi_maps.modsign2map(m) + rd = dictappend({'f2py_version': f2py_version}, vrd) + funcwrappers = [] + funcwrappers2 = [] # F90 codes + for n in m['interfaced']: + nb = None + for bi in m['body']: + if bi['block'] not in ['interface', 'abstract interface']: + errmess('buildmodule: Expected interface block. Skipping.\n') + continue + for b in bi['body']: + if b['name'] == n: + nb = b + break + + if not nb: + errmess( + 'buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n' % (n)) + continue + nb_list = [nb] + if 'entry' in nb: + for k, a in nb['entry'].items(): + nb1 = copy.deepcopy(nb) + del nb1['entry'] + nb1['name'] = k + nb1['args'] = a + nb_list.append(nb1) + for nb in nb_list: + # requiresf90wrapper must be called before buildapi as it + # rewrites assumed shape arrays as automatic arrays. + isf90 = requiresf90wrapper(nb) + api, wrap = buildapi(nb) + if wrap: + if isf90: + funcwrappers2.append(wrap) + else: + funcwrappers.append(wrap) + ar = applyrules(api, vrd) + rd = dictappend(rd, ar) + + # Construct COMMON block support + cr, wrap = common_rules.buildhooks(m) + if wrap: + funcwrappers.append(wrap) + ar = applyrules(cr, vrd) + rd = dictappend(rd, ar) + + # Construct F90 module support + mr, wrap = f90mod_rules.buildhooks(m) + if wrap: + funcwrappers2.append(wrap) + ar = applyrules(mr, vrd) + rd = dictappend(rd, ar) + + for u in um: + ar = use_rules.buildusevars(u, m['use'][u['name']]) + rd = dictappend(rd, ar) + + needs = cfuncs.get_needs() + code = {} + for n in needs.keys(): + code[n] = [] + for k in needs[n]: + c = '' + if k in cfuncs.includes0: + c = cfuncs.includes0[k] + elif k in cfuncs.includes: + c = cfuncs.includes[k] + elif k in cfuncs.userincludes: + c = cfuncs.userincludes[k] + elif k in cfuncs.typedefs: + c = cfuncs.typedefs[k] + elif k in cfuncs.typedefs_generated: + c = cfuncs.typedefs_generated[k] + elif k in cfuncs.cppmacros: + c = cfuncs.cppmacros[k] + elif k in cfuncs.cfuncs: + c = cfuncs.cfuncs[k] + elif k in cfuncs.callbacks: + c = cfuncs.callbacks[k] + elif k in cfuncs.f90modhooks: + c = cfuncs.f90modhooks[k] + elif k in cfuncs.commonhooks: + c = cfuncs.commonhooks[k] + else: + errmess('buildmodule: unknown need %s.\n' % (repr(k))) + continue + code[n].append(c) + mod_rules.append(code) + for r in mod_rules: + if ('_check' in r and r['_check'](m)) or ('_check' not in r): + ar = applyrules(r, vrd, m) + rd = dictappend(rd, ar) + ar = applyrules(module_rules, rd) + + fn = os.path.join(options['buildpath'], vrd['coutput']) + ret['csrc'] = fn + with open(fn, 'w') as f: + f.write(ar['modulebody'].replace('\t', 2 * ' ')) + outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) + + if options['dorestdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.rest') + with open(fn, 'w') as f: + f.write('.. -*- rest -*-\n') + f.write('\n'.join(ar['restdoc'])) + outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' % + (options['buildpath'], vrd['modulename'])) + if options['dolatexdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.tex') + ret['ltx'] = fn + with open(fn, 'w') as f: + f.write( + '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version)) + if 'shortlatex' not in options: + f.write( + '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') + f.write('\n'.join(ar['latexdoc'])) + if 'shortlatex' not in options: + f.write('\\end{document}') + outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' % + (options['buildpath'], vrd['modulename'])) + if funcwrappers: + wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('C -*- fortran -*-\n') + f.write( + 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f.write( + 'C It contains Fortran 77 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): + if 0 <= l.find('!') < 66: + # don't split comment lines + lines.append(l + '\n') + elif l and l[0] == ' ': + while len(l) >= 66: + lines.append(l[:66] + '\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn)) + if funcwrappers2: + wn = os.path.join( + options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename'])) + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('! -*- f90 -*-\n') + f.write( + '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f.write( + '! It contains Fortran 90 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): + if 0 <= l.find('!') < 72: + # don't split comment lines + lines.append(l + '\n') + elif len(l) > 72 and l[0] == ' ': + lines.append(l[:72] + '&\n &') + l = l[72:] + while len(l) > 66: + lines.append(l[:66] + '&\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn)) + return ret + +################## Build C/API function ############# + +stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', + 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} + + +def buildapi(rout): + rout, wrap = func2subr.assubr(rout) + args, depargs = getargs2(rout) + capi_maps.depargs = depargs + var = rout['vars'] + + if ismoduleroutine(rout): + outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' % + (rout['modulename'], rout['name'])) + else: + outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name'])) + # Routine + vrd = capi_maps.routsign2map(rout) + rd = dictappend({}, vrd) + for r in rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + + # Args + nth, nthk = 0, 0 + savevrd = {} + for a in args: + vrd = capi_maps.sign2map(a, var[a]) + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + if not isintent_hide(var[a]): + if not isoptional(var[a]): + nth = nth + 1 + vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument' + else: + nthk = nthk + 1 + vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword' + else: + vrd['nth'] = 'hidden' + savevrd[a] = vrd + for r in _rules: + if '_depend' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + vrd = savevrd[a] + for r in _rules: + if '_depend' not in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'check' in var[a]: + for c in var[a]['check']: + vrd['check'] = c + ar = applyrules(check_rules, vrd, var[a]) + rd = dictappend(rd, ar) + if isinstance(rd['cleanupfrompyobj'], list): + rd['cleanupfrompyobj'].reverse() + if isinstance(rd['closepyobjfrom'], list): + rd['closepyobjfrom'].reverse() + rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#', + {'docsign': rd['docsign'], + 'docsignopt': rd['docsignopt'], + 'docsignxa': rd['docsignxa']})) + optargs = stripcomma(replace('#docsignopt##docsignxa#', + {'docsignxa': rd['docsignxashort'], + 'docsignopt': rd['docsignoptshort']} + )) + if optargs == '': + rd['docsignatureshort'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_') + rd['latexdocsignatureshort'] = rd[ + 'latexdocsignatureshort'].replace(',', ', ') + cfs = stripcomma(replace('#callfortran##callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + if len(rd['callfortranappend']) > 1: + rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + else: + rd['callcompaqfortran'] = cfs + rd['callfortran'] = cfs + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = ' + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + + ar = applyrules(routine_rules, rd) + if ismoduleroutine(rout): + outmess('\t\t\t %s\n' % (ar['docshort'])) + else: + outmess('\t\t %s\n' % (ar['docshort'])) + return ar, wrap + + +#################### EOF rules.py ####################### diff --git a/MLPY/Lib/site-packages/numpy/f2py/setup.py b/MLPY/Lib/site-packages/numpy/f2py/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..ab87d20657a0b52d49e46e3ab80492a2fbd693c8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/setup.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +""" +setup.py for installing F2PY + +Usage: + pip install . + +Copyright 2001-2005 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Revision: 1.32 $ +$Date: 2005/01/30 17:22:14 $ +Pearu Peterson + +""" +from numpy.distutils.core import setup +from numpy.distutils.misc_util import Configuration + + +from __version__ import version + + +def configuration(parent_package='', top_path=None): + config = Configuration('f2py', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_dir('tests/src') + config.add_data_files( + 'src/fortranobject.c', + 'src/fortranobject.h') + config.add_data_files('*.pyi') + return config + + +if __name__ == "__main__": + + config = configuration(top_path='') + config = config.todict() + + config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\ + "/F2PY-2-latest.tar.gz" + config['classifiers'] = [ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: NumPy License', + 'Natural Language :: English', + 'Operating System :: OS Independent', + 'Programming Language :: C', + 'Programming Language :: Fortran', + 'Programming Language :: Python', + 'Topic :: Scientific/Engineering', + 'Topic :: Software Development :: Code Generators', + ] + setup(version=version, + description="F2PY - Fortran to Python Interface Generator", + author="Pearu Peterson", + author_email="pearu@cens.ioc.ee", + maintainer="Pearu Peterson", + maintainer_email="pearu@cens.ioc.ee", + license="BSD", + platforms="Unix, Windows (mingw|cygwin), Mac OSX", + long_description="""\ +The Fortran to Python Interface Generator, or F2PY for short, is a +command line tool (f2py) for generating Python C/API modules for +wrapping Fortran 77/90/95 subroutines, accessing common blocks from +Python, and calling Python functions from Fortran (call-backs). +Interfacing subroutines/data from Fortran 90/95 modules is supported.""", + url="http://cens.ioc.ee/projects/f2py2e/", + keywords=['Fortran', 'f2py'], + **config) diff --git a/MLPY/Lib/site-packages/numpy/f2py/src/fortranobject.c b/MLPY/Lib/site-packages/numpy/f2py/src/fortranobject.c new file mode 100644 index 0000000000000000000000000000000000000000..8ec29a591cf2278b61c65553daba56ce3029f506 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/src/fortranobject.c @@ -0,0 +1,1107 @@ +#define FORTRANOBJECT_C +#include "fortranobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/* + This file implements: FortranObject, array_from_pyobj, copy_ND_array + + Author: Pearu Peterson + $Revision: 1.52 $ + $Date: 2005/07/11 07:44:20 $ +*/ + +int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) +{ + if (obj==NULL) { + fprintf(stderr, "Error loading %s\n", name); + if (PyErr_Occurred()) { + PyErr_Print(); + PyErr_Clear(); + } + return -1; + } + return PyDict_SetItemString(dict, name, obj); +} + +/* + * Python-only fallback for thread-local callback pointers + */ +void *F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError("F2PySwapThreadLocalCallbackPtr: PyThreadState_GetDict failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError("F2PySwapThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + value = PyLong_FromVoidPtr((void *)ptr); + if (value == NULL) { + Py_FatalError("F2PySwapThreadLocalCallbackPtr: PyLong_FromVoidPtr failed"); + } + + if (PyDict_SetItemString(local_dict, key, value) != 0) { + Py_FatalError("F2PySwapThreadLocalCallbackPtr: PyDict_SetItemString failed"); + } + + Py_DECREF(value); + + return prev; +} + +void *F2PyGetThreadLocalCallbackPtr(char *key) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError("F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError("F2PyGetThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + return prev; +} + +/************************* FortranObject *******************************/ + +typedef PyObject *(*fortranfunc)(PyObject *,PyObject *,PyObject *,void *); + +PyObject * +PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) { + int i; + PyFortranObject *fp = NULL; + PyObject *v = NULL; + if (init!=NULL) { /* Initialize F90 module objects */ + (*(init))(); + } + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) { + return NULL; + } + if ((fp->dict = PyDict_New()) == NULL) { + Py_DECREF(fp); + return NULL; + } + fp->len = 0; + while (defs[fp->len].name != NULL) { + fp->len++; + } + if (fp->len == 0) { + goto fail; + } + fp->defs = defs; + for (i=0;ilen;i++) { + if (fp->defs[i].rank == -1) { /* Is Fortran routine */ + v = PyFortranObject_NewAsAttr(&(fp->defs[i])); + if (v==NULL) { + goto fail; + } + PyDict_SetItemString(fp->dict,fp->defs[i].name,v); + Py_XDECREF(v); + } else + if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */ + if (fp->defs[i].type == NPY_STRING) { + int n = fp->defs[i].rank-1; + v = PyArray_New(&PyArray_Type, n, fp->defs[i].dims.d, + NPY_STRING, NULL, fp->defs[i].data, fp->defs[i].dims.d[n], + NPY_ARRAY_FARRAY, NULL); + } + else { + v = PyArray_New(&PyArray_Type, fp->defs[i].rank, fp->defs[i].dims.d, + fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, + NULL); + } + if (v==NULL) { + goto fail; + } + PyDict_SetItemString(fp->dict,fp->defs[i].name,v); + Py_XDECREF(v); + } + } + return (PyObject *)fp; + fail: + Py_XDECREF(fp); + return NULL; +} + +PyObject * +PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module routines */ + PyFortranObject *fp = NULL; + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) return NULL; + if ((fp->dict = PyDict_New())==NULL) { + PyObject_Del(fp); + return NULL; + } + fp->len = 1; + fp->defs = defs; + return (PyObject *)fp; +} + +/* Fortran methods */ + +static void +fortran_dealloc(PyFortranObject *fp) { + Py_XDECREF(fp->dict); + PyObject_Del(fp); +} + + +/* Returns number of bytes consumed from buf, or -1 on error. */ +static Py_ssize_t +format_def(char *buf, Py_ssize_t size, FortranDataDef def) +{ + char *p = buf; + int i, n; + + n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + + for (i = 1; i < def.rank; i++) { + n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + } + + if (size <= 0) { + return -1; + } + + *p++ = ')'; + size--; + + if (def.data == NULL) { + static const char notalloc[] = ", not allocated"; + if ((size_t) size < sizeof(notalloc)) { + return -1; + } + memcpy(p, notalloc, sizeof(notalloc)); + p += sizeof(notalloc); + size -= sizeof(notalloc); + } + + return p - buf; +} + +static PyObject * +fortran_doc(FortranDataDef def) +{ + char *buf, *p; + PyObject *s = NULL; + Py_ssize_t n, origsize, size = 100; + + if (def.doc != NULL) { + size += strlen(def.doc); + } + origsize = size; + buf = p = (char *)PyMem_Malloc(size); + if (buf == NULL) { + return PyErr_NoMemory(); + } + + if (def.rank == -1) { + if (def.doc) { + n = strlen(def.doc); + if (n > size) { + goto fail; + } + memcpy(p, def.doc, n); + p += n; + size -= n; + } + else { + n = PyOS_snprintf(p, size, "%s - no docs available", def.name); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + } + } + else { + PyArray_Descr *d = PyArray_DescrFromType(def.type); + n = PyOS_snprintf(p, size, "%s : '%c'-", def.name, d->type); + Py_DECREF(d); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + + if (def.data == NULL) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else if (def.rank > 0) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else { + n = strlen("scalar"); + if (size < n) { + goto fail; + } + memcpy(p, "scalar", n); + p += n; + size -= n; + } + + } + if (size <= 1) { + goto fail; + } + *p++ = '\n'; + size--; + + /* p now points one beyond the last character of the string in buf */ + s = PyUnicode_FromStringAndSize(buf, p - buf); + + PyMem_Free(buf); + return s; + + fail: + fprintf(stderr, "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" + " too long docstring required, increase size\n", + p - buf, origsize); + PyMem_Free(buf); + return NULL; +} + +static FortranDataDef *save_def; /* save pointer of an allocatable array */ +static void set_data(char *d,npy_intp *f) { /* callback from Fortran */ + if (*f) /* In fortran f=allocated(d) */ + save_def->data = d; + else + save_def->data = NULL; + /* printf("set_data: d=%p,f=%d\n",d,*f); */ +} + +static PyObject * +fortran_getattr(PyFortranObject *fp, char *name) { + int i,j,k,flag; + if (fp->dict != NULL) { + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + if (v == NULL && PyErr_Occurred()) { + return NULL; + } + else if (v != NULL) { + Py_INCREF(v); + return v; + } + } + for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); + if (j==0) + if (fp->defs[i].rank!=-1) { /* F90 allocatable array */ + if (fp->defs[i].func==NULL) return NULL; + for(k=0;kdefs[i].rank;++k) + fp->defs[i].dims.d[k]=-1; + save_def = &fp->defs[i]; + (*(fp->defs[i].func))(&fp->defs[i].rank,fp->defs[i].dims.d,set_data,&flag); + if (flag==2) + k = fp->defs[i].rank + 1; + else + k = fp->defs[i].rank; + if (fp->defs[i].data !=NULL) { /* array is allocated */ + PyObject *v = PyArray_New(&PyArray_Type, k, fp->defs[i].dims.d, + fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, + NULL); + if (v==NULL) return NULL; + /* Py_INCREF(v); */ + return v; + } else { /* array is not allocated */ + Py_RETURN_NONE; + } + } + if (strcmp(name,"__dict__")==0) { + Py_INCREF(fp->dict); + return fp->dict; + } + if (strcmp(name,"__doc__")==0) { + PyObject *s = PyUnicode_FromString(""), *s2, *s3; + for (i=0;ilen;i++) { + s2 = fortran_doc(fp->defs[i]); + s3 = PyUnicode_Concat(s, s2); + Py_DECREF(s2); + Py_DECREF(s); + s = s3; + } + if (PyDict_SetItemString(fp->dict, name, s)) + return NULL; + return s; + } + if ((strcmp(name,"_cpointer")==0) && (fp->len==1)) { + PyObject *cobj = F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data),NULL); + if (PyDict_SetItemString(fp->dict, name, cobj)) + return NULL; + return cobj; + } + PyObject *str, *ret; + str = PyUnicode_FromString(name); + ret = PyObject_GenericGetAttr((PyObject *)fp, str); + Py_DECREF(str); + return ret; +} + +static int +fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) { + int i,j,flag; + PyArrayObject *arr = NULL; + for (i=0,j=1;ilen && (j=strcmp(name,fp->defs[i].name));i++); + if (j==0) { + if (fp->defs[i].rank==-1) { + PyErr_SetString(PyExc_AttributeError,"over-writing fortran routine"); + return -1; + } + if (fp->defs[i].func!=NULL) { /* is allocatable array */ + npy_intp dims[F2PY_MAX_DIMS]; + int k; + save_def = &fp->defs[i]; + if (v!=Py_None) { /* set new value (reallocate if needed -- + see f2py generated code for more + details ) */ + for(k=0;kdefs[i].rank;k++) dims[k]=-1; + if ((arr = array_from_pyobj(fp->defs[i].type,dims,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) + return -1; + (*(fp->defs[i].func))(&fp->defs[i].rank,PyArray_DIMS(arr),set_data,&flag); + } else { /* deallocate */ + for(k=0;kdefs[i].rank;k++) dims[k]=0; + (*(fp->defs[i].func))(&fp->defs[i].rank,dims,set_data,&flag); + for(k=0;kdefs[i].rank;k++) dims[k]=-1; + } + memcpy(fp->defs[i].dims.d,dims,fp->defs[i].rank*sizeof(npy_intp)); + } else { /* not allocatable array */ + if ((arr = array_from_pyobj(fp->defs[i].type,fp->defs[i].dims.d,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL) + return -1; + } + if (fp->defs[i].data!=NULL) { /* copy Python object to Fortran array */ + npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d,PyArray_NDIM(arr)); + if (s==-1) + s = PyArray_MultiplyList(PyArray_DIMS(arr),PyArray_NDIM(arr)); + if (s<0 || + (memcpy(fp->defs[i].data,PyArray_DATA(arr),s*PyArray_ITEMSIZE(arr)))==NULL) { + if ((PyObject*)arr!=v) { + Py_DECREF(arr); + } + return -1; + } + if ((PyObject*)arr!=v) { + Py_DECREF(arr); + } + } else return (fp->defs[i].func==NULL?-1:0); + return 0; /* successful */ + } + if (fp->dict == NULL) { + fp->dict = PyDict_New(); + if (fp->dict == NULL) + return -1; + } + if (v == NULL) { + int rv = PyDict_DelItemString(fp->dict, name); + if (rv < 0) + PyErr_SetString(PyExc_AttributeError,"delete non-existing fortran attribute"); + return rv; + } + else + return PyDict_SetItemString(fp->dict, name, v); +} + +static PyObject* +fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) { + int i = 0; + /* printf("fortran call + name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, + fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ + if (fp->defs[i].rank==-1) {/* is Fortran routine */ + if (fp->defs[i].func==NULL) { + PyErr_Format(PyExc_RuntimeError, "no function to call"); + return NULL; + } + else if (fp->defs[i].data==NULL) + /* dummy routine */ + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw,NULL); + else + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp,arg,kw, + (void *)fp->defs[i].data); + } + PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); + return NULL; +} + +static PyObject * +fortran_repr(PyFortranObject *fp) +{ + PyObject *name = NULL, *repr = NULL; + name = PyObject_GetAttrString((PyObject *)fp, "__name__"); + PyErr_Clear(); + if (name != NULL && PyUnicode_Check(name)) { + repr = PyUnicode_FromFormat("", name); + } + else { + repr = PyUnicode_FromString(""); + } + Py_XDECREF(name); + return repr; +} + + +PyTypeObject PyFortran_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name ="fortran", + .tp_basicsize = sizeof(PyFortranObject), + .tp_dealloc = (destructor)fortran_dealloc, + .tp_getattr = (getattrfunc)fortran_getattr, + .tp_setattr = (setattrfunc)fortran_setattr, + .tp_repr = (reprfunc)fortran_repr, + .tp_call = (ternaryfunc)fortran_call, +}; + +/************************* f2py_report_atexit *******************************/ + +#ifdef F2PY_REPORT_ATEXIT +static int passed_time = 0; +static int passed_counter = 0; +static int passed_call_time = 0; +static struct timeb start_time; +static struct timeb stop_time; +static struct timeb start_call_time; +static struct timeb stop_call_time; +static int cb_passed_time = 0; +static int cb_passed_counter = 0; +static int cb_passed_call_time = 0; +static struct timeb cb_start_time; +static struct timeb cb_stop_time; +static struct timeb cb_start_call_time; +static struct timeb cb_stop_call_time; + +extern void f2py_start_clock(void) { ftime(&start_time); } +extern +void f2py_start_call_clock(void) { + f2py_stop_clock(); + ftime(&start_call_time); +} +extern +void f2py_stop_clock(void) { + ftime(&stop_time); + passed_time += 1000*(stop_time.time - start_time.time); + passed_time += stop_time.millitm - start_time.millitm; +} +extern +void f2py_stop_call_clock(void) { + ftime(&stop_call_time); + passed_call_time += 1000*(stop_call_time.time - start_call_time.time); + passed_call_time += stop_call_time.millitm - start_call_time.millitm; + passed_counter += 1; + f2py_start_clock(); +} + +extern void f2py_cb_start_clock(void) { ftime(&cb_start_time); } +extern +void f2py_cb_start_call_clock(void) { + f2py_cb_stop_clock(); + ftime(&cb_start_call_time); +} +extern +void f2py_cb_stop_clock(void) { + ftime(&cb_stop_time); + cb_passed_time += 1000*(cb_stop_time.time - cb_start_time.time); + cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; +} +extern +void f2py_cb_stop_call_clock(void) { + ftime(&cb_stop_call_time); + cb_passed_call_time += 1000*(cb_stop_call_time.time - cb_start_call_time.time); + cb_passed_call_time += cb_stop_call_time.millitm - cb_start_call_time.millitm; + cb_passed_counter += 1; + f2py_cb_start_clock(); +} + +static int f2py_report_on_exit_been_here = 0; +extern +void f2py_report_on_exit(int exit_flag,void *name) { + if (f2py_report_on_exit_been_here) { + fprintf(stderr," %s\n",(char*)name); + return; + } + f2py_report_on_exit_been_here = 1; + fprintf(stderr," /-----------------------\\\n"); + fprintf(stderr," < F2PY performance report >\n"); + fprintf(stderr," \\-----------------------/\n"); + fprintf(stderr,"Overall time spent in ...\n"); + fprintf(stderr,"(a) wrapped (Fortran/C) functions : %8d msec\n", + passed_call_time); + fprintf(stderr,"(b) f2py interface, %6d calls : %8d msec\n", + passed_counter,passed_time); + fprintf(stderr,"(c) call-back (Python) functions : %8d msec\n", + cb_passed_call_time); + fprintf(stderr,"(d) f2py call-back interface, %6d calls : %8d msec\n", + cb_passed_counter,cb_passed_time); + + fprintf(stderr,"(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", + passed_call_time-cb_passed_call_time-cb_passed_time); + fprintf(stderr,"Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); + fprintf(stderr,"Exit status: %d\n",exit_flag); + fprintf(stderr,"Modules : %s\n",(char*)name); +} +#endif + +/********************** report on array copy ****************************/ + +#ifdef F2PY_REPORT_ON_ARRAY_COPY +static void f2py_report_on_array_copy(PyArrayObject* arr) { + const npy_intp arr_size = PyArray_Size((PyObject *)arr); + if (arr_size>F2PY_REPORT_ON_ARRAY_COPY) { + fprintf(stderr,"copied an array: size=%ld, elsize=%"NPY_INTP_FMT"\n", + arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); + } +} +static void f2py_report_on_array_copy_fromany(void) { + fprintf(stderr,"created an array from object\n"); +} + +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR f2py_report_on_array_copy((PyArrayObject *)arr) +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() +#else +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY +#endif + + +/************************* array_from_obj *******************************/ + +/* + * File: array_from_pyobj.c + * + * Description: + * ------------ + * Provides array_from_pyobj function that returns a contiguous array + * object with the given dimensions and required storage order, either + * in row-major (C) or column-major (Fortran) order. The function + * array_from_pyobj is very flexible about its Python object argument + * that can be any number, list, tuple, or array. + * + * array_from_pyobj is used in f2py generated Python extension + * modules. + * + * Author: Pearu Peterson + * Created: 13-16 January 2002 + * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ + */ + +static int check_and_fix_dimensions(const PyArrayObject* arr, + const int rank, + npy_intp *dims); + +static int +count_negative_dimensions(const int rank, + const npy_intp *dims) { + int i=0,r=0; + while (iflags,size); + printf("\tstrides = "); + dump_dims(rank,arr->strides); + printf("\tdimensions = "); + dump_dims(rank,arr->dimensions); +} +#endif + +#define SWAPTYPE(a,b,t) {t c; c = (a); (a) = (b); (b) = c; } + +static int swap_arrays(PyArrayObject* obj1, PyArrayObject* obj2) { + PyArrayObject_fields *arr1 = (PyArrayObject_fields*) obj1, + *arr2 = (PyArrayObject_fields*) obj2; + SWAPTYPE(arr1->data,arr2->data,char*); + SWAPTYPE(arr1->nd,arr2->nd,int); + SWAPTYPE(arr1->dimensions,arr2->dimensions,npy_intp*); + SWAPTYPE(arr1->strides,arr2->strides,npy_intp*); + SWAPTYPE(arr1->base,arr2->base,PyObject*); + SWAPTYPE(arr1->descr,arr2->descr,PyArray_Descr*); + SWAPTYPE(arr1->flags,arr2->flags,int); + /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ + return 0; +} + +#define ARRAY_ISCOMPATIBLE(arr,type_num) \ + ( (PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) \ + ||(PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) \ + ||(PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) \ + ||(PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) \ + ) + +extern +PyArrayObject* array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj) { + /* + * Note about reference counting + * ----------------------------- + * If the caller returns the array to Python, it must be done with + * Py_BuildValue("N",arr). + * Otherwise, if obj!=arr then the caller must call Py_DECREF(arr). + * + * Note on intent(cache,out,..) + * --------------------- + * Don't expect correct data when returning intent(cache) array. + * + */ + char mess[200]; + PyArrayObject *arr = NULL; + PyArray_Descr *descr; + char typechar; + int elsize; + + if ((intent & F2PY_INTENT_HIDE) + || ((intent & F2PY_INTENT_CACHE) && (obj==Py_None)) + || ((intent & F2PY_OPTIONAL) && (obj==Py_None)) + ) { + /* intent(cache), optional, intent(hide) */ + if (count_negative_dimensions(rank,dims) > 0) { + int i; + strcpy(mess, "failed to create intent(cache|hide)|optional array" + "-- must have defined dimensions but got ("); + for(i=0;ielsize = 1; + descr->type = NPY_CHARLTR; + } + elsize = descr->elsize; + typechar = descr->type; + Py_DECREF(descr); + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)obj; + + if (intent & F2PY_INTENT_CACHE) { + /* intent(cache) */ + if (PyArray_ISONESEGMENT(arr) + && PyArray_ITEMSIZE(arr)>=elsize) { + if (check_and_fix_dimensions(arr, rank, dims)) { + return NULL; + } + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + return arr; + } + strcpy(mess, "failed to initialize intent(cache) array"); + if (!PyArray_ISONESEGMENT(arr)) + strcat(mess, " -- input must be in one segment"); + if (PyArray_ITEMSIZE(arr)type,typechar); + if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) + sprintf(mess+strlen(mess)," -- input not %d-aligned", F2PY_GET_ALIGNMENT(intent)); + PyErr_SetString(PyExc_ValueError,mess); + return NULL; + } + + /* here we have always intent(in) or intent(inplace) */ + + { + PyArrayObject * retarr; + retarr = (PyArrayObject *) \ + PyArray_New(&PyArray_Type, PyArray_NDIM(arr), PyArray_DIMS(arr), type_num, + NULL,NULL,1, + !(intent&F2PY_INTENT_C), + NULL); + if (retarr==NULL) + return NULL; + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + if (PyArray_CopyInto(retarr, arr)) { + Py_DECREF(retarr); + return NULL; + } + if (intent & F2PY_INTENT_INPLACE) { + if (swap_arrays(arr,retarr)) + return NULL; /* XXX: set exception */ + Py_XDECREF(retarr); + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + } else { + arr = retarr; + } + } + return arr; + } + + if ((intent & F2PY_INTENT_INOUT) || + (intent & F2PY_INTENT_INPLACE) || + (intent & F2PY_INTENT_CACHE)) { + PyErr_Format(PyExc_TypeError, + "failed to initialize intent(inout|inplace|cache) " + "array, input '%s' object is not an array", + Py_TYPE(obj)->tp_name); + return NULL; + } + + { + PyArray_Descr * descr = PyArray_DescrFromType(type_num); + /* compatibility with NPY_CHAR */ + if (type_num == NPY_STRING) { + PyArray_DESCR_REPLACE(descr); + if (descr == NULL) { + return NULL; + } + descr->elsize = 1; + descr->type = NPY_CHARLTR; + } + F2PY_REPORT_ON_ARRAY_COPY_FROMANY; + arr = (PyArrayObject *) \ + PyArray_FromAny(obj, descr, 0,0, + ((intent & F2PY_INTENT_C)?NPY_ARRAY_CARRAY:NPY_ARRAY_FARRAY) \ + | NPY_ARRAY_FORCECAST, NULL); + if (arr==NULL) + return NULL; + if (check_and_fix_dimensions(arr, rank, dims)) { + return NULL; + } + return arr; + } + +} + +/*****************************************/ +/* Helper functions for array_from_pyobj */ +/*****************************************/ + +static +int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp *dims) +{ + /* + * This function fills in blanks (that are -1's) in dims list using + * the dimensions from arr. It also checks that non-blank dims will + * match with the corresponding values in arr dimensions. + * + * Returns 0 if the function is successful. + * + * If an error condition is detected, an exception is set and 1 is returned. + */ + const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1; +#ifdef DEBUG_COPY_ND_ARRAY + dump_attrs(arr); + printf("check_and_fix_dimensions:init: dims="); + dump_dims(rank,dims); +#endif + if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ + npy_intp new_size = 1; + int free_axe = -1; + int i; + npy_intp d; + /* Fill dims where -1 or 0; check dimensions; calc new_size; */ + for(i=0;i= 0) { + if (d>1 && dims[i]!=d) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); + return 1; + } + if (!dims[i]) dims[i] = 1; + } else { + dims[i] = d ? d : 1; + } + new_size *= dims[i]; + } + for(i=PyArray_NDIM(arr);i1) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be %" NPY_INTP_FMT + " but got 0 (not defined).\n", + i, dims[i]); + return 1; + } else if (free_axe<0) + free_axe = i; + else + dims[i] = 1; + if (free_axe>=0) { + dims[free_axe] = arr_size/new_size; + new_size *= dims[free_axe]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT + " (maybe too many free indices)\n", + new_size, arr_size); + return 1; + } + } else if (rank==PyArray_NDIM(arr)) { + npy_intp new_size = 1; + int i; + npy_intp d; + for (i=0; i=0) { + if (d > 1 && d!=dims[i]) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); + return 1; + } + if (!dims[i]) dims[i] = 1; + } else dims[i] = d; + new_size *= dims[i]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT "\n", + new_size, arr_size); + return 1; + } + } else { /* [[1,2]] -> [[1],[2]] */ + int i,j; + npy_intp d; + int effrank; + npy_intp size; + for (i=0,effrank=0;i1) ++effrank; + if (dims[rank-1]>=0) + if (effrank>rank) { + PyErr_Format(PyExc_ValueError, + "too many axes: %d (effrank=%d), " + "expected rank=%d\n", + PyArray_NDIM(arr), effrank, rank); + return 1; + } + + for (i=0,j=0;i=PyArray_NDIM(arr)) d = 1; + else d = PyArray_DIM(arr,j++); + if (dims[i]>=0) { + if (d>1 && d!=dims[i]) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT + " (real index=%d)\n", + i, dims[i], d, j-1); + return 1; + } + if (!dims[i]) dims[i] = 1; + } else + dims[i] = d; + } + + for (i=rank;i [1,2,3,4] */ + while (j=PyArray_NDIM(arr)) d = 1; + else d = PyArray_DIM(arr,j++); + dims[rank-1] *= d; + } + for (i=0,size=1;i + extern void f2py_start_clock(void); + extern void f2py_stop_clock(void); + extern void f2py_start_call_clock(void); + extern void f2py_stop_call_clock(void); + extern void f2py_cb_start_clock(void); + extern void f2py_cb_stop_clock(void); + extern void f2py_cb_start_call_clock(void); + extern void f2py_cb_stop_call_clock(void); + extern void f2py_report_on_exit(int,void*); +#endif + +#ifdef DMALLOC +#include "dmalloc.h" +#endif + +/* Fortran object interface */ + +/* +123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 + +PyFortranObject represents various Fortran objects: +Fortran (module) routines, COMMON blocks, module data. + +Author: Pearu Peterson +*/ + +#define F2PY_MAX_DIMS 40 + +typedef void (*f2py_set_data_func)(char*,npy_intp*); +typedef void (*f2py_void_func)(void); +typedef void (*f2py_init_func)(int*,npy_intp*,f2py_set_data_func,int*); + + /*typedef void* (*f2py_c_func)(void*,...);*/ + +typedef void *(*f2pycfunc)(void); + +typedef struct { + char *name; /* attribute (array||routine) name */ + int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, + || rank=-1 for Fortran routine */ + struct {npy_intp d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ + int type; /* PyArray_ || not used */ + char *data; /* pointer to array || Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ + char *doc; /* documentation string; only recommended + for routines. */ +} FortranDataDef; + +typedef struct { + PyObject_HEAD + int len; /* Number of attributes */ + FortranDataDef *defs; /* An array of FortranDataDef's */ + PyObject *dict; /* Fortran object attribute dictionary */ +} PyFortranObject; + +#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) +#define PyFortran_Check1(op) (0==strcmp(Py_TYPE(op)->tp_name,"fortran")) + + extern PyTypeObject PyFortran_Type; + extern int F2PyDict_SetItemString(PyObject* dict, char *name, PyObject *obj); + extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init); + extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs); + +PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); +void * F2PyCapsule_AsVoidPtr(PyObject *obj); +int F2PyCapsule_Check(PyObject *ptr); + +extern void *F2PySwapThreadLocalCallbackPtr(char *key, void *ptr); +extern void *F2PyGetThreadLocalCallbackPtr(char *key); + +#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) +#define F2PY_INTENT_IN 1 +#define F2PY_INTENT_INOUT 2 +#define F2PY_INTENT_OUT 4 +#define F2PY_INTENT_HIDE 8 +#define F2PY_INTENT_CACHE 16 +#define F2PY_INTENT_COPY 32 +#define F2PY_INTENT_C 64 +#define F2PY_OPTIONAL 128 +#define F2PY_INTENT_INPLACE 256 +#define F2PY_INTENT_ALIGNED4 512 +#define F2PY_INTENT_ALIGNED8 1024 +#define F2PY_INTENT_ALIGNED16 2048 + +#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) +#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) +#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) +#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) + +#define F2PY_GET_ALIGNMENT(intent) \ + (F2PY_ALIGN4(intent) ? 4 : \ + (F2PY_ALIGN8(intent) ? 8 : \ + (F2PY_ALIGN16(intent) ? 16 : 1) )) +#define F2PY_CHECK_ALIGNMENT(arr, intent) ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) + + extern PyArrayObject* array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj); + extern int copy_ND_array(const PyArrayObject *in, PyArrayObject *out); + +#ifdef DEBUG_COPY_ND_ARRAY + extern void dump_attrs(const PyArrayObject* arr); +#endif + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FORTRANOBJECT_H */ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__init__.py b/MLPY/Lib/site-packages/numpy/f2py/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3fafb035556f853e920ee89004f7e82b1203e52 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf8bdfdd7d72dd66313279f016771bed16e55beb Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac09228319d40b2c31424b0b9c0aebc545705ec9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9af4dc6633c8fc088d44a7848fee97ecaf798498 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..474e564aa1e83532f0e2d52d635db91cf1d12200 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb611371216796c1b2981b968d733d5696dcb92c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_callback.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdd2822a67e24eaba3f2053b3eb0b9b607e5fae5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_common.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_compile_function.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_compile_function.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cd4357bee6c67f8e88fd2713a1deb216b943f43 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_compile_function.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdd56f71bec02a8749109a4a8854f830ac4d424d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..435a118ae3fd543c24fa260ef217e215aae149b7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_kind.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59154a5cf5318392f3a365d89d6a61efc9897a6d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_mixed.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_module_doc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_module_doc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be1babc1f4915864d462431ceec421ef85eb172d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_module_doc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79406fcc586b807c2d51407dd67f2896c70c5cc2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_parameter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16aa64f86d25f80251ce05eb4d21f8ee1da97aa1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3fa45a4a4f78dc77d22f7791ad555daa82af83e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e4dc31e6ad11b1434d585c21226a3cdcce2b417 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_character.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1487cb471a4125f9298fc40cb2e8b43c544e0c92 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_complex.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d945eac86f43e24b16b6b7a4eddb2b6f67e1b222 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_integer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..831ea1cc855af0af6be281a24a093bea2710b1b8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_logical.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdd903663eb32229e982e88c81673880a9a2416d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_return_real.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0720b6f4747c854b901a9fadc80baf6ba3fb6ff4 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2ff3a3094264b57ab55ae6d435553b9ad543348 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_size.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de4f974895b80fbb73bb6735fc6511d1f000d4fa Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/test_string.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/util.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abd99b51af671907a38c6cec62cde1dfd92d8a68 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/__pycache__/util.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/MLPY/Lib/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c new file mode 100644 index 0000000000000000000000000000000000000000..5432407b9d7d61e9532c93d697dd967b4558852a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -0,0 +1,228 @@ +/* + * This file was auto-generated with f2py (version:2_1330) and hand edited by + * Pearu for testing purposes. Do not edit this file unless you know what you + * are doing!!! + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** See f2py2e/cfuncs.py: includes ***********************/ +#include "Python.h" +#include "fortranobject.h" +#include + +static PyObject *wrap_error; +static PyObject *wrap_module; + +/************************************ call ************************************/ +static char doc_f2py_rout_wrap_call[] = "\ +Function signature:\n\ + arr = call(type_num,dims,intent,obj)\n\ +Required arguments:\n" +" type_num : input int\n" +" dims : input int-sequence\n" +" intent : input int\n" +" obj : input python object\n" +"Return objects:\n" +" arr : array"; +static PyObject *f2py_rout_wrap_call(PyObject *capi_self, + PyObject *capi_args) { + PyObject * volatile capi_buildvalue = NULL; + int type_num = 0; + npy_intp *dims = NULL; + PyObject *dims_capi = Py_None; + int rank = 0; + int intent = 0; + PyArrayObject *capi_arr_tmp = NULL; + PyObject *arr_capi = Py_None; + int i; + + if (!PyArg_ParseTuple(capi_args,"iOiO|:wrap.call",\ + &type_num,&dims_capi,&intent,&arr_capi)) + return NULL; + rank = PySequence_Length(dims_capi); + dims = malloc(rank*sizeof(npy_intp)); + for (i=0;ikind, + PyArray_DESCR(arr)->type, + PyArray_TYPE(arr), + PyArray_ITEMSIZE(arr), + PyArray_DESCR(arr)->alignment, + PyArray_FLAGS(arr), + PyArray_ITEMSIZE(arr)); +} + +static PyMethodDef f2py_module_methods[] = { + + {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, + {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "test_array_from_pyobj_ext", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { + PyObject *m,*d, *s; + m = wrap_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap (failed to import numpy)"); + d = PyModule_GetDict(m); + s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" + " arr = call(type_num,dims,intent,obj)\n" + "."); + PyDict_SetItemString(d, "__doc__", s); + wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); + Py_DECREF(s); + +#define ADDCONST(NAME, CONST) \ + s = PyLong_FromLong(CONST); \ + PyDict_SetItemString(d, NAME, s); \ + Py_DECREF(s) + + ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); + ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); + ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); + ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); + ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); + ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); + ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); + ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); + ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); + ADDCONST("NPY_BOOL", NPY_BOOL); + ADDCONST("NPY_BYTE", NPY_BYTE); + ADDCONST("NPY_UBYTE", NPY_UBYTE); + ADDCONST("NPY_SHORT", NPY_SHORT); + ADDCONST("NPY_USHORT", NPY_USHORT); + ADDCONST("NPY_INT", NPY_INT); + ADDCONST("NPY_UINT", NPY_UINT); + ADDCONST("NPY_INTP", NPY_INTP); + ADDCONST("NPY_UINTP", NPY_UINTP); + ADDCONST("NPY_LONG", NPY_LONG); + ADDCONST("NPY_ULONG", NPY_ULONG); + ADDCONST("NPY_LONGLONG", NPY_LONGLONG); + ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); + ADDCONST("NPY_FLOAT", NPY_FLOAT); + ADDCONST("NPY_DOUBLE", NPY_DOUBLE); + ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); + ADDCONST("NPY_CFLOAT", NPY_CFLOAT); + ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); + ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); + ADDCONST("NPY_OBJECT", NPY_OBJECT); + ADDCONST("NPY_STRING", NPY_STRING); + ADDCONST("NPY_UNICODE", NPY_UNICODE); + ADDCONST("NPY_VOID", NPY_VOID); + ADDCONST("NPY_NTYPES", NPY_NTYPES); + ADDCONST("NPY_NOTYPE", NPY_NOTYPE); + ADDCONST("NPY_USERDEF", NPY_USERDEF); + + ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); + ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); + ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); + ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); + ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); + ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); + ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); + ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); + ADDCONST("UPDATEIFCOPY", NPY_ARRAY_UPDATEIFCOPY); + ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); + + ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); + ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); + ADDCONST("CARRAY", NPY_ARRAY_CARRAY); + ADDCONST("FARRAY", NPY_ARRAY_FARRAY); + ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); + ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); + ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); + ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); + +#undef ADDCONST( + + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap"); + +#ifdef F2PY_REPORT_ATEXIT + on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); +#endif + + return m; +} +#ifdef __cplusplus +} +#endif diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap new file mode 100644 index 0000000000000000000000000000000000000000..273c177824c9ca8fea68791e4ba44c5058a79f6d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(rk="double")) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 new file mode 100644 index 0000000000000000000000000000000000000000..bb7822023363bab9bfcf4d5b29eec5f231e523b9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 @@ -0,0 +1,34 @@ + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 new file mode 100644 index 0000000000000000000000000000000000000000..d6da9f4b8bed19b3c84538ae0bdf232e66498fb7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 @@ -0,0 +1,41 @@ + +module mod + +contains + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum + + +end module mod diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 new file mode 100644 index 0000000000000000000000000000000000000000..992147c7bb23ed65bf1a43b431e863abafc4cbd6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 @@ -0,0 +1,19 @@ +subroutine sum_with_use(x, res) + use precision + + implicit none + + real(kind=rk), intent(in) :: x(:) + real(kind=rk), intent(out) :: res + + integer :: i + + !print *, "size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + + end subroutine diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 new file mode 100644 index 0000000000000000000000000000000000000000..8072a240ab4e1cccef43b060e13738eb45a5563d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 @@ -0,0 +1,4 @@ +module precision + integer, parameter :: rk = selected_real_kind(8) + integer, parameter :: ik = selected_real_kind(4) +end module diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/common/block.f b/MLPY/Lib/site-packages/numpy/f2py/tests/src/common/block.f new file mode 100644 index 0000000000000000000000000000000000000000..32a26667d520a782f4be75d3c578857e92c46211 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/common/block.f @@ -0,0 +1,11 @@ + SUBROUTINE INITCB + DOUBLE PRECISION LONG + CHARACTER STRING + INTEGER OK + + COMMON /BLOCK/ LONG, STRING, OK + LONG = 1.0 + STRING = '2' + OK = 3 + RETURN + END diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/kind/foo.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/kind/foo.f90 new file mode 100644 index 0000000000000000000000000000000000000000..57b8b378a32f45c9b6f3db12c12ec03e94cb90ee --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/kind/foo.f90 @@ -0,0 +1,20 @@ + + +subroutine selectedrealkind(p, r, res) + implicit none + + integer, intent(in) :: p, r + !f2py integer :: r=0 + integer, intent(out) :: res + res = selected_real_kind(p, r) + +end subroutine + +subroutine selectedintkind(p, res) + implicit none + + integer, intent(in) :: p + integer, intent(out) :: res + res = selected_int_kind(p) + +end subroutine diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/mixed/foo.f b/MLPY/Lib/site-packages/numpy/f2py/tests/src/mixed/foo.f new file mode 100644 index 0000000000000000000000000000000000000000..a77d1e09e4b348daf854cd508bf7f05b5cc8b5be --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/mixed/foo.f @@ -0,0 +1,5 @@ + subroutine bar11(a) +cf2py intent(out) a + integer a + a = 11 + end diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 new file mode 100644 index 0000000000000000000000000000000000000000..334133eb5808b45268747eb007ac983f0ab01efa --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 @@ -0,0 +1,8 @@ + module foo_fixed + contains + subroutine bar12(a) +!f2py intent(out) a + integer a + a = 12 + end subroutine bar12 + end module foo_fixed diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 new file mode 100644 index 0000000000000000000000000000000000000000..5bfc3d262127be96bb7c442b9d35e9498278eb24 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 @@ -0,0 +1,8 @@ +module foo_free +contains + subroutine bar13(a) + !f2py intent(out) a + integer a + a = 13 + end subroutine bar13 +end module foo_free diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/module_data/mod.mod b/MLPY/Lib/site-packages/numpy/f2py/tests/src/module_data/mod.mod new file mode 100644 index 0000000000000000000000000000000000000000..8670a97e911c48ff2cae2cf83dd14f42f8a7004a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/f2py/tests/src/module_data/mod.mod differ diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f90 new file mode 100644 index 0000000000000000000000000000000000000000..3a6d2199124d22be8b11fc0cb96e4257700e4b37 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f90 @@ -0,0 +1,12 @@ +module mod + integer :: i + integer :: x(4) + real, dimension(2,3) :: a + real, allocatable, dimension(:,:) :: b +contains + subroutine foo + integer :: k + k = 1 + a(1,2) = a(1,2)+3 + end subroutine foo +end module mod diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 new file mode 100644 index 0000000000000000000000000000000000000000..b16af3e8bb5c533c6ef5a051537e471565ca4337 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 @@ -0,0 +1,57 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3._sp + real(dp), parameter :: three_d = 3._dp + integer(ii), parameter :: three_i = 3_ii + integer(il), parameter :: three_l = 3_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + + +subroutine foo_no(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3. + real(dp), parameter :: three_d = 3. + integer(ii), parameter :: three_i = 3 + integer(il), parameter :: three_l = 3 + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + +subroutine foo_sum(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 2._sp + 1._sp + real(dp), parameter :: three_d = 1._dp + 2._dp + integer(ii), parameter :: three_i = 2_ii + 1_ii + integer(il), parameter :: three_l = 1_il + 2_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 new file mode 100644 index 0000000000000000000000000000000000000000..8dbe74de4c1fafb66ca5ed08fbeebc4b36c4926b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 @@ -0,0 +1,15 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + integer(ii), parameter :: two = 2_ii + integer(ii), parameter :: six = three * 1_ii * two + + x(1) = x(1) + x(2) + x(3) * six + return +end subroutine diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 new file mode 100644 index 0000000000000000000000000000000000000000..34756a390028e801d78945bb94d74f220a8b43d6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 @@ -0,0 +1,22 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_long(x) + implicit none + integer, parameter :: ii = selected_int_kind(18) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 new file mode 100644 index 0000000000000000000000000000000000000000..bcaa03bd4f7233eec8a21b0fb9a41a949ecc1938 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Specifically that types of constants without +! compound kind specs are correctly inferred +! adapted Gibbs iteration code from pymc +! for this test case +subroutine foo_non_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + + integer(ii) maxiterates + parameter (maxiterates=2) + + integer(ii) maxseries + parameter (maxseries=2) + + integer(ii) wasize + parameter (wasize=maxiterates*maxseries) + integer(ii), intent(inout) :: x + dimension x(wasize) + + x(1) = x(1) + x(2) + x(3) + x(4) * wasize + return +end subroutine diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 new file mode 100644 index 0000000000000000000000000000000000000000..c4d25bbbd7a2953f2a9d30f905f868645d5bdb84 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_single(x) + implicit none + integer, parameter :: rp = selected_real_kind(6) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_double(x) + implicit none + integer, parameter :: rp = selected_real_kind(15) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/regression/inout.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/regression/inout.f90 new file mode 100644 index 0000000000000000000000000000000000000000..430258a3cfc01c73fd2993435681639d9df684f7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/regression/inout.f90 @@ -0,0 +1,9 @@ +! Check that intent(in out) translates as intent(inout). +! The separation seems to be a common usage. + subroutine foo(x) + implicit none + real(4), intent(in out) :: x + dimension x(3) + x(1) = x(1) + x(2) + x(3) + return + end diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/size/foo.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/size/foo.f90 new file mode 100644 index 0000000000000000000000000000000000000000..2ad165877748ed6084daa804d9d57ee011c8f55a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/size/foo.f90 @@ -0,0 +1,44 @@ + +subroutine foo(a, n, m, b) + implicit none + + real, intent(in) :: a(n, m) + integer, intent(in) :: n, m + real, intent(out) :: b(size(a, 1)) + + integer :: i + + do i = 1, size(b) + b(i) = sum(a(i,:)) + enddo +end subroutine + +subroutine trans(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x,2), size(x,1) ) :: y + integer :: N, M, i, j + N = size(x,1) + M = size(x,2) + DO i=1,N + do j=1,M + y(j,i) = x(i,j) + END DO + END DO +end subroutine trans + +subroutine flatten(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x) ) :: y + integer :: N, M, i, j, k + N = size(x,1) + M = size(x,2) + k = 1 + DO i=1,N + do j=1,M + y(k) = x(i,j) + k = k + 1 + END DO + END DO +end subroutine flatten diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/src/string/char.f90 b/MLPY/Lib/site-packages/numpy/f2py/tests/src/string/char.f90 new file mode 100644 index 0000000000000000000000000000000000000000..242bbef28f21b3fa2a4b340364df6b22a0d647c6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/src/string/char.f90 @@ -0,0 +1,29 @@ +MODULE char_test + +CONTAINS + +SUBROUTINE change_strings(strings, n_strs, out_strings) + IMPLICIT NONE + + ! Inputs + INTEGER, INTENT(IN) :: n_strs + CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings + CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings + +!f2py INTEGER, INTENT(IN) :: n_strs +!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings +!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings + + ! Misc. + INTEGER*4 :: j + + + DO j=1, n_strs + out_strings(1,j) = strings(1,j) + out_strings(2,j) = 'A' + END DO + +END SUBROUTINE change_strings + +END MODULE char_test + diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_abstract_interface.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_abstract_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..9245dd232930dc5173e6a7abccc9fea8280b53a4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_abstract_interface.py @@ -0,0 +1,66 @@ +import textwrap +from . import util +from numpy.f2py import crackfortran + + +class TestAbstractInterface(util.F2PyTest): + suffix = '.f90' + + skip = ['add1', 'add2'] + + code = textwrap.dedent(""" + module ops_module + + abstract interface + subroutine op(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + end subroutine + end interface + + contains + + subroutine foo(x, y, r1, r2) + integer, intent(in) :: x, y + integer, intent(out) :: r1, r2 + procedure (op) add1, add2 + procedure (op), pointer::p + p=>add1 + call p(x, y, r1) + p=>add2 + call p(x, y, r2) + end subroutine + end module + + subroutine add1(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + y + end subroutine + + subroutine add2(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + 2 * y + end subroutine + """) + + def test_abstract_interface(self): + assert self.module.ops_module.foo(3, 5) == (8, 13) + + def test_parse_abstract_interface(self, tmp_path): + # Test gh18403 + f_path = tmp_path / "gh18403_mod.f90" + with f_path.open('w') as ff: + ff.write(textwrap.dedent("""\ + module test + abstract interface + subroutine foo() + end subroutine + end interface + end module test + """)) + mod = crackfortran.crackfortran([str(f_path)]) + assert len(mod) == 1 + assert len(mod[0]['body']) == 1 + assert mod[0]['body'][0]['block'] == 'abstract interface' diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_array_from_pyobj.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_array_from_pyobj.py new file mode 100644 index 0000000000000000000000000000000000000000..536f2d006206d8349e224eb258169af10687c858 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_array_from_pyobj.py @@ -0,0 +1,596 @@ +import os +import sys +import copy +import platform +import pytest + +import numpy as np + +from numpy.testing import assert_, assert_equal +from numpy.core.multiarray import typeinfo +from . import util + +wrap = None + + +def setup_module(): + """ + Build the required testing extension module + + """ + global wrap + + # Check compiler availability first + if not util.has_c_compiler(): + pytest.skip("No C compiler available") + + if wrap is None: + config_code = """ + config.add_extension('test_array_from_pyobj_ext', + sources=['wrapmodule.c', 'fortranobject.c'], + define_macros=[]) + """ + d = os.path.dirname(__file__) + src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'), + os.path.join(d, '..', 'src', 'fortranobject.c'), + os.path.join(d, '..', 'src', 'fortranobject.h')] + wrap = util.build_module_distutils(src, config_code, + 'test_array_from_pyobj_ext') + + +def flags_info(arr): + flags = wrap.array_attrs(arr)[6] + return flags2names(flags) + + +def flags2names(flags): + info = [] + for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', + 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', + 'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', + 'CARRAY', 'FARRAY' + ]: + if abs(flags) & getattr(wrap, flagname, 0): + info.append(flagname) + return info + + +class Intent: + + def __init__(self, intent_list=[]): + self.intent_list = intent_list[:] + flags = 0 + for i in intent_list: + if i == 'optional': + flags |= wrap.F2PY_OPTIONAL + else: + flags |= getattr(wrap, 'F2PY_INTENT_' + i.upper()) + self.flags = flags + + def __getattr__(self, name): + name = name.lower() + if name == 'in_': + name = 'in' + return self.__class__(self.intent_list + [name]) + + def __str__(self): + return 'intent(%s)' % (','.join(self.intent_list)) + + def __repr__(self): + return 'Intent(%r)' % (self.intent_list) + + def is_intent(self, *names): + for name in names: + if name not in self.intent_list: + return False + return True + + def is_intent_exact(self, *names): + return len(self.intent_list) == len(names) and self.is_intent(*names) + +intent = Intent() + +_type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', + 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', + 'FLOAT', 'DOUBLE', 'CFLOAT'] + +_cast_dict = {'BOOL': ['BOOL']} +_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE'] +_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] +_cast_dict['BYTE'] = ['BYTE'] +_cast_dict['UBYTE'] = ['UBYTE'] +_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] +_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] +_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] +_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] + +_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] +_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] + +_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] +_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] + +_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] +_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] + +_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] + +# 32 bit system malloc typically does not provide the alignment required by +# 16 byte long double types this means the inout intent cannot be satisfied +# and several tests fail as the alignment flag can be randomly true or fals +# when numpy gains an aligned allocator the tests could be enabled again +# +# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) and + sys.platform != 'win32' and + (platform.system(), platform.processor()) != ('Darwin', 'arm')): + _type_names.extend(['LONGDOUBLE', 'CDOUBLE', 'CLONGDOUBLE']) + _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + \ + ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] + _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + \ + ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] + _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] + + +class Type: + _type_cache = {} + + def __new__(cls, name): + if isinstance(name, np.dtype): + dtype0 = name + name = None + for n, i in typeinfo.items(): + if not isinstance(i, type) and dtype0.type is i.type: + name = n + break + obj = cls._type_cache.get(name.upper(), None) + if obj is not None: + return obj + obj = object.__new__(cls) + obj._init(name) + cls._type_cache[name.upper()] = obj + return obj + + def _init(self, name): + self.NAME = name.upper() + info = typeinfo[self.NAME] + self.type_num = getattr(wrap, 'NPY_' + self.NAME) + assert_equal(self.type_num, info.num) + self.dtype = np.dtype(info.type) + self.type = info.type + self.elsize = info.bits / 8 + self.dtypechar = info.char + + def cast_types(self): + return [self.__class__(_m) for _m in _cast_dict[self.NAME]] + + def all_types(self): + return [self.__class__(_m) for _m in _type_names] + + def smaller_types(self): + bits = typeinfo[self.NAME].alignment + types = [] + for name in _type_names: + if typeinfo[name].alignment < bits: + types.append(Type(name)) + return types + + def equal_types(self): + bits = typeinfo[self.NAME].alignment + types = [] + for name in _type_names: + if name == self.NAME: + continue + if typeinfo[name].alignment == bits: + types.append(Type(name)) + return types + + def larger_types(self): + bits = typeinfo[self.NAME].alignment + types = [] + for name in _type_names: + if typeinfo[name].alignment > bits: + types.append(Type(name)) + return types + + +class Array: + + def __init__(self, typ, dims, intent, obj): + self.type = typ + self.dims = dims + self.intent = intent + self.obj_copy = copy.deepcopy(obj) + self.obj = obj + + # arr.dtypechar may be different from typ.dtypechar + self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) + + assert_(isinstance(self.arr, np.ndarray), repr(type(self.arr))) + + self.arr_attr = wrap.array_attrs(self.arr) + + if len(dims) > 1: + if self.intent.is_intent('c'): + assert_(intent.flags & wrap.F2PY_INTENT_C) + assert_(not self.arr.flags['FORTRAN'], + repr((self.arr.flags, getattr(obj, 'flags', None)))) + assert_(self.arr.flags['CONTIGUOUS']) + assert_(not self.arr_attr[6] & wrap.FORTRAN) + else: + assert_(not intent.flags & wrap.F2PY_INTENT_C) + assert_(self.arr.flags['FORTRAN']) + assert_(not self.arr.flags['CONTIGUOUS']) + assert_(self.arr_attr[6] & wrap.FORTRAN) + + if obj is None: + self.pyarr = None + self.pyarr_attr = None + return + + if intent.is_intent('cache'): + assert_(isinstance(obj, np.ndarray), repr(type(obj))) + self.pyarr = np.array(obj).reshape(*dims).copy() + else: + self.pyarr = np.array( + np.array(obj, dtype=typ.dtypechar).reshape(*dims), + order=self.intent.is_intent('c') and 'C' or 'F') + assert_(self.pyarr.dtype == typ, + repr((self.pyarr.dtype, typ))) + self.pyarr.setflags(write=self.arr.flags['WRITEABLE']) + assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) + self.pyarr_attr = wrap.array_attrs(self.pyarr) + + if len(dims) > 1: + if self.intent.is_intent('c'): + assert_(not self.pyarr.flags['FORTRAN']) + assert_(self.pyarr.flags['CONTIGUOUS']) + assert_(not self.pyarr_attr[6] & wrap.FORTRAN) + else: + assert_(self.pyarr.flags['FORTRAN']) + assert_(not self.pyarr.flags['CONTIGUOUS']) + assert_(self.pyarr_attr[6] & wrap.FORTRAN) + + assert_(self.arr_attr[1] == self.pyarr_attr[1]) # nd + assert_(self.arr_attr[2] == self.pyarr_attr[2]) # dimensions + if self.arr_attr[1] <= 1: + assert_(self.arr_attr[3] == self.pyarr_attr[3], + repr((self.arr_attr[3], self.pyarr_attr[3], + self.arr.tobytes(), self.pyarr.tobytes()))) # strides + assert_(self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], + repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr + assert_(self.arr_attr[6] == self.pyarr_attr[6], + repr((self.arr_attr[6], self.pyarr_attr[6], + flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), + flags2names(self.arr_attr[6]), intent))) # flags + + if intent.is_intent('cache'): + assert_(self.arr_attr[5][3] >= self.type.elsize, + repr((self.arr_attr[5][3], self.type.elsize))) + else: + assert_(self.arr_attr[5][3] == self.type.elsize, + repr((self.arr_attr[5][3], self.type.elsize))) + assert_(self.arr_equal(self.pyarr, self.arr)) + + if isinstance(self.obj, np.ndarray): + if typ.elsize == Type(obj.dtype).elsize: + if not intent.is_intent('copy') and self.arr_attr[1] <= 1: + assert_(self.has_shared_memory()) + + def arr_equal(self, arr1, arr2): + if arr1.shape != arr2.shape: + return False + return (arr1 == arr2).all() + + def __str__(self): + return str(self.arr) + + def has_shared_memory(self): + """Check that created array shares data with input array. + """ + if self.obj is self.arr: + return True + if not isinstance(self.obj, np.ndarray): + return False + obj_attr = wrap.array_attrs(self.obj) + return obj_attr[0] == self.arr_attr[0] + + +class TestIntent: + + def test_in_out(self): + assert_equal(str(intent.in_.out), 'intent(in,out)') + assert_(intent.in_.c.is_intent('c')) + assert_(not intent.in_.c.is_intent_exact('c')) + assert_(intent.in_.c.is_intent_exact('c', 'in')) + assert_(intent.in_.c.is_intent_exact('in', 'c')) + assert_(not intent.in_.is_intent('c')) + + +class TestSharedMemory: + num2seq = [1, 2] + num23seq = [[1, 2, 3], [4, 5, 6]] + + @pytest.fixture(autouse=True, scope='class', params=_type_names) + def setup_type(self, request): + request.cls.type = Type(request.param) + request.cls.array = lambda self, dims, intent, obj: \ + Array(Type(request.param), dims, intent, obj) + + def test_in_from_2seq(self): + a = self.array([2], intent.in_, self.num2seq) + assert_(not a.has_shared_memory()) + + def test_in_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_, obj) + if t.elsize == self.type.elsize: + assert_( + a.has_shared_memory(), repr((self.type.dtype, t.dtype))) + else: + assert_(not a.has_shared_memory(), repr(t.dtype)) + + @pytest.mark.parametrize('write', ['w', 'ro']) + @pytest.mark.parametrize('order', ['C', 'F']) + @pytest.mark.parametrize('inp', ['2seq', '23seq']) + def test_in_nocopy(self, write, order, inp): + """Test if intent(in) array can be passed without copies + """ + seq = getattr(self, 'num' + inp) + obj = np.array(seq, dtype=self.type.dtype, order=order) + obj.setflags(write=(write == 'w')) + a = self.array(obj.shape, ((order=='C' and intent.in_.c) or intent.in_), obj) + assert a.has_shared_memory() + + def test_inout_2seq(self): + obj = np.array(self.num2seq, dtype=self.type.dtype) + a = self.array([len(self.num2seq)], intent.inout, obj) + assert_(a.has_shared_memory()) + + try: + a = self.array([2], intent.in_.inout, self.num2seq) + except TypeError as msg: + if not str(msg).startswith('failed to initialize intent' + '(inout|inplace|cache) array'): + raise + else: + raise SystemError('intent(inout) should have failed on sequence') + + def test_f_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype, order='F') + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.inout, obj) + assert_(a.has_shared_memory()) + + obj = np.array(self.num23seq, dtype=self.type.dtype, order='C') + shape = (len(self.num23seq), len(self.num23seq[0])) + try: + a = self.array(shape, intent.in_.inout, obj) + except ValueError as msg: + if not str(msg).startswith('failed to initialize intent' + '(inout) array'): + raise + else: + raise SystemError( + 'intent(inout) should have failed on improper array') + + def test_c_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.c.inout, obj) + assert_(a.has_shared_memory()) + + def test_in_copy_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_c_in_from_23seq(self): + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, self.num23seq) + assert_(not a.has_shared_memory()) + + def test_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_f_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order='F') + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, obj) + if t.elsize == self.type.elsize: + assert_(a.has_shared_memory(), repr(t.dtype)) + else: + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_c_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.c, obj) + if t.elsize == self.type.elsize: + assert_(a.has_shared_memory(), repr(t.dtype)) + else: + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_f_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order='F') + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_c_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.c.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) + + def test_in_cache_from_2casttype(self): + for t in self.type.all_types(): + if t.elsize != self.type.elsize: + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq),) + a = self.array(shape, intent.in_.c.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + a = self.array(shape, intent.in_.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + obj = np.array(self.num2seq, dtype=t.dtype, order='F') + a = self.array(shape, intent.in_.c.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + a = self.array(shape, intent.in_.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) + + try: + a = self.array(shape, intent.in_.cache, obj[::-1]) + except ValueError as msg: + if not str(msg).startswith('failed to initialize' + ' intent(cache) array'): + raise + else: + raise SystemError( + 'intent(cache) should have failed on multisegmented array') + + def test_in_cache_from_2casttype_failure(self): + for t in self.type.all_types(): + if t.elsize >= self.type.elsize: + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq),) + try: + self.array(shape, intent.in_.cache, obj) # Should succeed + except ValueError as msg: + if not str(msg).startswith('failed to initialize' + ' intent(cache) array'): + raise + else: + raise SystemError( + 'intent(cache) should have failed on smaller array') + + def test_cache_hidden(self): + shape = (2,) + a = self.array(shape, intent.cache.hide, None) + assert_(a.arr.shape == shape) + + shape = (2, 3) + a = self.array(shape, intent.cache.hide, None) + assert_(a.arr.shape == shape) + + shape = (-1, 3) + try: + a = self.array(shape, intent.cache.hide, None) + except ValueError as msg: + if not str(msg).startswith('failed to create intent' + '(cache|hide)|optional array'): + raise + else: + raise SystemError( + 'intent(cache) should have failed on undefined dimensions') + + def test_hidden(self): + shape = (2,) + a = self.array(shape, intent.hide, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) + + shape = (2, 3) + a = self.array(shape, intent.hide, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) + assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) + + shape = (2, 3) + a = self.array(shape, intent.c.hide, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) + assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) + + shape = (-1, 3) + try: + a = self.array(shape, intent.hide, None) + except ValueError as msg: + if not str(msg).startswith('failed to create intent' + '(cache|hide)|optional array'): + raise + else: + raise SystemError('intent(hide) should have failed' + ' on undefined dimensions') + + def test_optional_none(self): + shape = (2,) + a = self.array(shape, intent.optional, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) + + shape = (2, 3) + a = self.array(shape, intent.optional, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) + assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) + + shape = (2, 3) + a = self.array(shape, intent.c.optional, None) + assert_(a.arr.shape == shape) + assert_(a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))) + assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) + + def test_optional_from_2seq(self): + obj = self.num2seq + shape = (len(obj),) + a = self.array(shape, intent.optional, obj) + assert_(a.arr.shape == shape) + assert_(not a.has_shared_memory()) + + def test_optional_from_23seq(self): + obj = self.num23seq + shape = (len(obj), len(obj[0])) + a = self.array(shape, intent.optional, obj) + assert_(a.arr.shape == shape) + assert_(not a.has_shared_memory()) + + a = self.array(shape, intent.optional.c, obj) + assert_(a.arr.shape == shape) + assert_(not a.has_shared_memory()) + + def test_inplace(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) + a.arr[1][2] = 54 + assert_(obj[1][2] == a.arr[1][2] == + np.array(54, dtype=self.type.dtype), repr((obj, a.arr))) + assert_(a.arr is obj) + assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! + assert_(not obj.flags['CONTIGUOUS']) + + def test_inplace_from_casttype(self): + for t in self.type.cast_types(): + if t is self.type: + continue + obj = np.array(self.num23seq, dtype=t.dtype) + assert_(obj.dtype.type == t.type) + assert_(obj.dtype.type is not self.type.type) + assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert_(obj[1][2] == a.arr[1][2], repr((obj, a.arr))) + a.arr[1][2] = 54 + assert_(obj[1][2] == a.arr[1][2] == + np.array(54, dtype=self.type.dtype), repr((obj, a.arr))) + assert_(a.arr is obj) + assert_(obj.flags['FORTRAN']) # obj attributes changed inplace! + assert_(not obj.flags['CONTIGUOUS']) + assert_(obj.dtype.type is self.type.type) # obj changed inplace! diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_assumed_shape.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_assumed_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..582c3399136f8953291b55d9321afbb8bd4b2512 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_assumed_shape.py @@ -0,0 +1,53 @@ +import os +import pytest +import tempfile + +from numpy.testing import assert_ +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestAssumedShapeSumExample(util.F2PyTest): + sources = [_path('src', 'assumed_shape', 'foo_free.f90'), + _path('src', 'assumed_shape', 'foo_use.f90'), + _path('src', 'assumed_shape', 'precision.f90'), + _path('src', 'assumed_shape', 'foo_mod.f90'), + _path('src', 'assumed_shape', '.f2py_f2cmap'), + ] + + @pytest.mark.slow + def test_all(self): + r = self.module.fsum([1, 2]) + assert_(r == 3, repr(r)) + r = self.module.sum([1, 2]) + assert_(r == 3, repr(r)) + r = self.module.sum_with_use([1, 2]) + assert_(r == 3, repr(r)) + + r = self.module.mod.sum([1, 2]) + assert_(r == 3, repr(r)) + r = self.module.mod.fsum([1, 2]) + assert_(r == 3, repr(r)) + + +class TestF2cmapOption(TestAssumedShapeSumExample): + def setup(self): + # Use a custom file name for .f2py_f2cmap + self.sources = list(self.sources) + f2cmap_src = self.sources.pop(-1) + + self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) + with open(f2cmap_src, 'rb') as f: + self.f2cmap_file.write(f.read()) + self.f2cmap_file.close() + + self.sources.append(self.f2cmap_file.name) + self.options = ["--f2cmap", self.f2cmap_file.name] + + super().setup() + + def teardown(self): + os.unlink(self.f2cmap_file.name) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_block_docstring.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_block_docstring.py new file mode 100644 index 0000000000000000000000000000000000000000..75b206982cbd925d5a210f81785033187daec89f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_block_docstring.py @@ -0,0 +1,23 @@ +import sys +import pytest +from . import util + +from numpy.testing import assert_equal, IS_PYPY + +class TestBlockDocString(util.F2PyTest): + code = """ + SUBROUTINE FOO() + INTEGER BAR(2, 3) + + COMMON /BLOCK/ BAR + RETURN + END + """ + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_block_docstring(self): + expected = "bar : 'i'-array(2,3)\n" + assert_equal(self.module.block.__doc__, expected) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_callback.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_callback.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a3f658df350606df633bc977ed6f5cf1e23f9a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_callback.py @@ -0,0 +1,326 @@ +import math +import textwrap +import sys +import pytest +import threading +import traceback +import time +import random + +import numpy as np +from numpy.testing import assert_, assert_equal, IS_PYPY +from . import util + + +class TestF77Callback(util.F2PyTest): + code = """ + subroutine t(fun,a) + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine func(a) +cf2py intent(in,out) a + integer a + a = a + 11 + end + + subroutine func0(a) +cf2py intent(out) a + integer a + a = 11 + end + + subroutine t2(a) +cf2py intent(callback) fun + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine string_callback(callback, a) + external callback + double precision callback + double precision a + character*1 r +cf2py intent(out) a + r = 'r' + a = callback(r) + end + + subroutine string_callback_array(callback, cu, lencu, a) + external callback + integer callback + integer lencu + character*8 cu(lencu) + integer a +cf2py intent(out) a + + a = callback(cu, lencu) + end + + subroutine hidden_callback(a, r) + external global_f +cf2py intent(callback, hide) global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end + + subroutine hidden_callback2(a, r) + external global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end + """ + + @pytest.mark.parametrize('name', 't,t2'.split(',')) + def test_all(self, name): + self.check_function(name) + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = t(fun,[fun_extra_args]) + + Wrapper for ``t``. + + Parameters + ---------- + fun : call-back function + + Other Parameters + ---------------- + fun_extra_args : input tuple, optional + Default: () + + Returns + ------- + a : int + + Notes + ----- + Call-back functions:: + + def fun(): return a + Return objects: + a : int + """) + assert_equal(self.module.t.__doc__, expected) + + def check_function(self, name): + t = getattr(self.module, name) + r = t(lambda: 4) + assert_(r == 4, repr(r)) + r = t(lambda a: 5, fun_extra_args=(6,)) + assert_(r == 5, repr(r)) + r = t(lambda a: a, fun_extra_args=(6,)) + assert_(r == 6, repr(r)) + r = t(lambda a: 5 + a, fun_extra_args=(7,)) + assert_(r == 12, repr(r)) + r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,)) + assert_(r == 180, repr(r)) + r = t(math.degrees, fun_extra_args=(math.pi,)) + assert_(r == 180, repr(r)) + + r = t(self.module.func, fun_extra_args=(6,)) + assert_(r == 17, repr(r)) + r = t(self.module.func0) + assert_(r == 11, repr(r)) + r = t(self.module.func0._cpointer) + assert_(r == 11, repr(r)) + + class A: + + def __call__(self): + return 7 + + def mth(self): + return 9 + a = A() + r = t(a) + assert_(r == 7, repr(r)) + r = t(a.mth) + assert_(r == 9, repr(r)) + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback(self): + + def callback(code): + if code == 'r': + return 0 + else: + return 1 + + f = getattr(self.module, 'string_callback') + r = f(callback) + assert_(r == 0, repr(r)) + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback_array(self): + # See gh-10027 + cu = np.zeros((1, 8), 'S1') + + def callback(cu, lencu): + if cu.shape != (lencu, 8): + return 1 + if cu.dtype != 'S1': + return 2 + if not np.all(cu == b''): + return 3 + return 0 + + f = getattr(self.module, 'string_callback_array') + res = f(callback, cu, len(cu)) + assert_(res == 0, repr(res)) + + def test_threadsafety(self): + # Segfaults if the callback handling is not threadsafe + + errors = [] + + def cb(): + # Sleep here to make it more likely for another thread + # to call their callback at the same time. + time.sleep(1e-3) + + # Check reentrancy + r = self.module.t(lambda: 123) + assert_(r == 123) + + return 42 + + def runner(name): + try: + for j in range(50): + r = self.module.t(cb) + assert_(r == 42) + self.check_function(name) + except Exception: + errors.append(traceback.format_exc()) + + threads = [threading.Thread(target=runner, args=(arg,)) + for arg in ("t", "t2") for n in range(20)] + + for t in threads: + t.start() + + for t in threads: + t.join() + + errors = "\n\n".join(errors) + if errors: + raise AssertionError(errors) + + def test_hidden_callback(self): + try: + self.module.hidden_callback(2) + except Exception as msg: + assert_(str(msg).startswith('Callback global_f not defined')) + + try: + self.module.hidden_callback2(2) + except Exception as msg: + assert_(str(msg).startswith('cb: Callback global_f not defined')) + + self.module.global_f = lambda x: x + 1 + r = self.module.hidden_callback(2) + assert_(r == 3) + + self.module.global_f = lambda x: x + 2 + r = self.module.hidden_callback(2) + assert_(r == 4) + + del self.module.global_f + try: + self.module.hidden_callback(2) + except Exception as msg: + assert_(str(msg).startswith('Callback global_f not defined')) + + self.module.global_f = lambda x=0: x + 3 + r = self.module.hidden_callback(2) + assert_(r == 5) + + # reproducer of gh18341 + r = self.module.hidden_callback2(2) + assert_(r == 3) + + +class TestF77CallbackPythonTLS(TestF77Callback): + """ + Callback tests using Python thread-local storage instead of + compiler-provided + """ + options = ["-DF2PY_USE_PYTHON_TLS"] + + +class TestF90Callback(util.F2PyTest): + + suffix = '.f90' + + code = textwrap.dedent( + """ + function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) + end function gh17797 + """) + + def test_gh17797(self): + + def incr(x): + return x + 123 + + y = np.array([1, 2, 3], dtype=np.int64) + r = self.module.gh17797(incr, y) + assert r == 123 + 1 + 2 + 3 + + +class TestGH18335(util.F2PyTest): + """The reproduction of the reported issue requires specific input that + extensions may break the issue conditions, so the reproducer is + implemented as a separate test class. Do not extend this test with + other tests! + """ + + suffix = '.f90' + + code = textwrap.dedent( + """ + ! When gh18335_workaround is defined as an extension, + ! the issue cannot be reproduced. + !subroutine gh18335_workaround(f, y) + ! implicit none + ! external f + ! integer(kind=1) :: y(1) + ! call f(y) + !end subroutine gh18335_workaround + + function gh18335(f) result (r) + implicit none + external f + integer(kind=1) :: y(1), r + y(1) = 123 + call f(y) + r = y(1) + end function gh18335 + """) + + def test_gh18335(self): + + def foo(x): + x[0] += 1 + + y = np.array([1, 2, 3], dtype=np.int8) + r = self.module.gh18335(foo) + assert r == 123 + 1 diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_common.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf0eab6da17dd767f6dde7fe8aab2f610e6a90e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_common.py @@ -0,0 +1,25 @@ +import os +import sys +import pytest + +import numpy as np +from . import util + +from numpy.testing import assert_array_equal + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + +class TestCommonBlock(util.F2PyTest): + sources = [_path('src', 'common', 'block.f')] + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_common_block(self): + self.module.initcb() + assert_array_equal(self.module.block.long_bn, + np.array(1.0, dtype=np.float64)) + assert_array_equal(self.module.block.string_bn, + np.array('2', dtype='|S1')) + assert_array_equal(self.module.block.ok, + np.array(3, dtype=np.int32)) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_compile_function.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_compile_function.py new file mode 100644 index 0000000000000000000000000000000000000000..a5b034676ebb345b706e994d705c51443d075da8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_compile_function.py @@ -0,0 +1,125 @@ +"""See https://github.com/numpy/numpy/pull/11937. + +""" +import sys +import os +import uuid +from importlib import import_module +import pytest + +import numpy.f2py + +from numpy.testing import assert_equal +from . import util + + +def setup_module(): + if not util.has_c_compiler(): + pytest.skip("Needs C compiler") + if not util.has_f77_compiler(): + pytest.skip('Needs FORTRAN 77 compiler') + + +# extra_args can be a list (since gh-11937) or string. +# also test absence of extra_args +@pytest.mark.parametrize( + "extra_args", [['--noopt', '--debug'], '--noopt --debug', ''] + ) +@pytest.mark.leaks_references(reason="Imported module seems never deleted.") +def test_f2py_init_compile(extra_args): + # flush through the f2py __init__ compile() function code path as a + # crude test for input handling following migration from + # exec_command() to subprocess.check_output() in gh-11937 + + # the Fortran 77 syntax requires 6 spaces before any commands, but + # more space may be added/ + fsource = """ + integer function foo() + foo = 10 + 5 + return + end + """ + # use various helper functions in util.py to enable robust build / + # compile and reimport cycle in test suite + moddir = util.get_module_dir() + modname = util.get_temp_module_name() + + cwd = os.getcwd() + target = os.path.join(moddir, str(uuid.uuid4()) + '.f') + # try running compile() with and without a source_fn provided so + # that the code path where a temporary file for writing Fortran + # source is created is also explored + for source_fn in [target, None]: + # mimic the path changing behavior used by build_module() in + # util.py, but don't actually use build_module() because it has + # its own invocation of subprocess that circumvents the + # f2py.compile code block under test + try: + os.chdir(moddir) + ret_val = numpy.f2py.compile( + fsource, + modulename=modname, + extra_args=extra_args, + source_fn=source_fn + ) + finally: + os.chdir(cwd) + + # check for compile success return value + assert_equal(ret_val, 0) + + # we are not currently able to import the Python-Fortran + # interface module on Windows / Appveyor, even though we do get + # successful compilation on that platform with Python 3.x + if sys.platform != 'win32': + # check for sensible result of Fortran function; that means + # we can import the module name in Python and retrieve the + # result of the sum operation + return_check = import_module(modname) + calc_result = return_check.foo() + assert_equal(calc_result, 15) + # Removal from sys.modules, is not as such necessary. Even with + # removal, the module (dict) stays alive. + del sys.modules[modname] + + +def test_f2py_init_compile_failure(): + # verify an appropriate integer status value returned by + # f2py.compile() when invalid Fortran is provided + ret_val = numpy.f2py.compile(b"invalid") + assert_equal(ret_val, 1) + + +def test_f2py_init_compile_bad_cmd(): + # verify that usage of invalid command in f2py.compile() returns + # status value of 127 for historic consistency with exec_command() + # error handling + + # patch the sys Python exe path temporarily to induce an OSError + # downstream NOTE: how bad of an idea is this patching? + try: + temp = sys.executable + sys.executable = 'does not exist' + + # the OSError should take precedence over invalid Fortran + ret_val = numpy.f2py.compile(b"invalid") + assert_equal(ret_val, 127) + finally: + sys.executable = temp + + +@pytest.mark.parametrize('fsource', + ['program test_f2py\nend program test_f2py', + b'program test_f2py\nend program test_f2py',]) +def test_compile_from_strings(tmpdir, fsource): + # Make sure we can compile str and bytes gh-12796 + cwd = os.getcwd() + try: + os.chdir(str(tmpdir)) + ret_val = numpy.f2py.compile( + fsource, + modulename='test_compile_from_strings', + extension='.f90') + assert_equal(ret_val, 0) + finally: + os.chdir(cwd) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_crackfortran.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_crackfortran.py new file mode 100644 index 0000000000000000000000000000000000000000..2016b0931545f77c2e03139dbf6d2be2faf48fa2 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_crackfortran.py @@ -0,0 +1,141 @@ +import numpy as np +from numpy.testing import assert_array_equal +from . import util +from numpy.f2py import crackfortran +import tempfile +import textwrap + + +class TestNoSpace(util.F2PyTest): + # issue gh-15035: add handling for endsubroutine, endfunction with no space + # between "end" and the block name + code = """ + subroutine subb(k) + real(8), intent(inout) :: k(:) + k=k+1 + endsubroutine + + subroutine subc(w,k) + real(8), intent(in) :: w(:) + real(8), intent(out) :: k(size(w)) + k=w+1 + endsubroutine + + function t0(value) + character value + character t0 + t0 = value + endfunction + """ + + def test_module(self): + k = np.array([1, 2, 3], dtype=np.float64) + w = np.array([1, 2, 3], dtype=np.float64) + self.module.subb(k) + assert_array_equal(k, w + 1) + self.module.subc([w, k]) + assert_array_equal(k, w + 1) + assert self.module.t0(23) == b'2' + +class TestPublicPrivate(): + def test_defaultPrivate(self, tmp_path): + f_path = tmp_path / "mod.f90" + with f_path.open('w') as ff: + ff.write(textwrap.dedent("""\ + module foo + private + integer :: a + public :: setA + integer :: b + contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA + end module foo + """)) + mod = crackfortran.crackfortran([str(f_path)]) + assert len(mod) == 1 + mod = mod[0] + assert 'private' in mod['vars']['a']['attrspec'] + assert 'public' not in mod['vars']['a']['attrspec'] + assert 'private' in mod['vars']['b']['attrspec'] + assert 'public' not in mod['vars']['b']['attrspec'] + assert 'private' not in mod['vars']['seta']['attrspec'] + assert 'public' in mod['vars']['seta']['attrspec'] + + def test_defaultPublic(self, tmp_path): + f_path = tmp_path / "mod.f90" + with f_path.open('w') as ff: + ff.write(textwrap.dedent("""\ + module foo + public + integer, private :: a + public :: setA + contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA + end module foo + """)) + mod = crackfortran.crackfortran([str(f_path)]) + assert len(mod) == 1 + mod = mod[0] + assert 'private' in mod['vars']['a']['attrspec'] + assert 'public' not in mod['vars']['a']['attrspec'] + assert 'private' not in mod['vars']['seta']['attrspec'] + assert 'public' in mod['vars']['seta']['attrspec'] + +class TestExternal(util.F2PyTest): + # issue gh-17859: add external attribute support + code = """ + integer(8) function external_as_statement(fcn) + implicit none + external fcn + integer(8) :: fcn + external_as_statement = fcn(0) + end + + integer(8) function external_as_attribute(fcn) + implicit none + integer(8), external :: fcn + external_as_attribute = fcn(0) + end + """ + + def test_external_as_statement(self): + def incr(x): + return x + 123 + r = self.module.external_as_statement(incr) + assert r == 123 + + def test_external_as_attribute(self): + def incr(x): + return x + 123 + r = self.module.external_as_attribute(incr) + assert r == 123 + +class TestCrackFortran(util.F2PyTest): + + suffix = '.f90' + + code = textwrap.dedent(""" + subroutine gh2848( & + ! first 2 parameters + par1, par2,& + ! last 2 parameters + par3, par4) + + integer, intent(in) :: par1, par2 + integer, intent(out) :: par3, par4 + + par3 = par1 + par4 = par2 + + end subroutine gh2848 + """) + + def test_gh2848(self): + r = self.module.gh2848(1, 2) + assert r == (1, 2) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_kind.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_kind.py new file mode 100644 index 0000000000000000000000000000000000000000..59e396517fc92aabc44192dacc09d4fb1d58681a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_kind.py @@ -0,0 +1,32 @@ +import os +import pytest + +from numpy.testing import assert_ +from numpy.f2py.crackfortran import ( + _selected_int_kind_func as selected_int_kind, + _selected_real_kind_func as selected_real_kind + ) +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestKind(util.F2PyTest): + sources = [_path('src', 'kind', 'foo.f90')] + + @pytest.mark.slow + def test_all(self): + selectedrealkind = self.module.selectedrealkind + selectedintkind = self.module.selectedintkind + + for i in range(40): + assert_(selectedintkind(i) in [selected_int_kind(i), -1], + 'selectedintkind(%s): expected %r but got %r' % + (i, selected_int_kind(i), selectedintkind(i))) + + for i in range(20): + assert_(selectedrealkind(i) in [selected_real_kind(i), -1], + 'selectedrealkind(%s): expected %r but got %r' % + (i, selected_real_kind(i), selectedrealkind(i))) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_mixed.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_mixed.py new file mode 100644 index 0000000000000000000000000000000000000000..f2afdde76fafee4430f530426cfb41d602b587ad --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_mixed.py @@ -0,0 +1,35 @@ +import os +import textwrap +import pytest + +from numpy.testing import assert_, assert_equal, IS_PYPY +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestMixed(util.F2PyTest): + sources = [_path('src', 'mixed', 'foo.f'), + _path('src', 'mixed', 'foo_fixed.f90'), + _path('src', 'mixed', 'foo_free.f90')] + + def test_all(self): + assert_(self.module.bar11() == 11) + assert_(self.module.foo_fixed.bar12() == 12) + assert_(self.module.foo_free.bar13() == 13) + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = bar11() + + Wrapper for ``bar11``. + + Returns + ------- + a : int + """) + assert_equal(self.module.bar11.__doc__, expected) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_module_doc.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_module_doc.py new file mode 100644 index 0000000000000000000000000000000000000000..1580277640732cce08b7e6056a4877a1f1611863 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_module_doc.py @@ -0,0 +1,30 @@ +import os +import sys +import pytest +import textwrap + +from . import util +from numpy.testing import assert_equal, IS_PYPY + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestModuleDocString(util.F2PyTest): + sources = [_path('src', 'module_data', 'module_data_docstring.f90')] + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_module_docstring(self): + assert_equal(self.module.mod.__doc__, + textwrap.dedent('''\ + i : 'i'-scalar + x : 'i'-array(4) + a : 'f'-array(2,3) + b : 'f'-array(-1,-1), not allocated\x00 + foo()\n + Wrapper for ``foo``.\n\n''') + ) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_parameter.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_parameter.py new file mode 100644 index 0000000000000000000000000000000000000000..4337df270ff946705ca699ec0a1a51f8d5b3cc09 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_parameter.py @@ -0,0 +1,116 @@ +import os +import pytest + +import numpy as np +from numpy.testing import assert_raises, assert_equal + +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestParameters(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [_path('src', 'parameter', 'constant_real.f90'), + _path('src', 'parameter', 'constant_integer.f90'), + _path('src', 'parameter', 'constant_both.f90'), + _path('src', 'parameter', 'constant_compound.f90'), + _path('src', 'parameter', 'constant_non_compound.f90'), + ] + + @pytest.mark.slow + def test_constant_real_single(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + assert_raises(ValueError, self.module.foo_single, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo_single(x) + assert_equal(x, [0 + 1 + 2*3, 1, 2]) + + @pytest.mark.slow + def test_constant_real_double(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + assert_raises(ValueError, self.module.foo_double, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_double(x) + assert_equal(x, [0 + 1 + 2*3, 1, 2]) + + @pytest.mark.slow + def test_constant_compound_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + assert_raises(ValueError, self.module.foo_compound_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_compound_int(x) + assert_equal(x, [0 + 1 + 2*6, 1, 2]) + + @pytest.mark.slow + def test_constant_non_compound_int(self): + # check values + x = np.arange(4, dtype=np.int32) + self.module.foo_non_compound_int(x) + assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3]) + + @pytest.mark.slow + def test_constant_integer_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + assert_raises(ValueError, self.module.foo_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_int(x) + assert_equal(x, [0 + 1 + 2*3, 1, 2]) + + @pytest.mark.slow + def test_constant_integer_long(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int64)[::2] + assert_raises(ValueError, self.module.foo_long, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int64) + self.module.foo_long(x) + assert_equal(x, [0 + 1 + 2*3, 1, 2]) + + @pytest.mark.slow + def test_constant_both(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + assert_raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo(x) + assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) + + @pytest.mark.slow + def test_constant_no(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + assert_raises(ValueError, self.module.foo_no, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_no(x) + assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) + + @pytest.mark.slow + def test_constant_sum(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + assert_raises(ValueError, self.module.foo_sum, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_sum(x) + assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3]) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_quoted_character.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_quoted_character.py new file mode 100644 index 0000000000000000000000000000000000000000..b0055f0a478913ca99fbcfb5044207ce1a456290 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_quoted_character.py @@ -0,0 +1,32 @@ +"""See https://github.com/numpy/numpy/pull/10676. + +""" +import sys +import pytest + +from numpy.testing import assert_equal +from . import util + + +class TestQuotedCharacter(util.F2PyTest): + code = """ + SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) + CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR + PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", + 1 OPENPAR="(", CLOSEPAR=")") + CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 +Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 + OUT1 = SINGLE + OUT2 = DOUBLE + OUT3 = SEMICOL + OUT4 = EXCLA + OUT5 = OPENPAR + OUT6 = CLOSEPAR + RETURN + END + """ + + @pytest.mark.skipif(sys.platform=='win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_quoted_character(self): + assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')')) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_regression.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..36f09a9c550b0c5569ed73f6c4ad7e7259b158e1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_regression.py @@ -0,0 +1,55 @@ +import os +import pytest + +import numpy as np +from numpy.testing import assert_, assert_raises, assert_equal, assert_string_equal + +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestIntentInOut(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [_path('src', 'regression', 'inout.f90')] + + @pytest.mark.slow + def test_inout(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + assert_raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo(x) + assert_equal(x, [3, 1, 2]) + + +class TestNumpyVersionAttribute(util.F2PyTest): + # Check that th attribute __f2py_numpy_version__ is present + # in the compiled module and that has the value np.__version__. + sources = [_path('src', 'regression', 'inout.f90')] + + @pytest.mark.slow + def test_numpy_version_attribute(self): + + # Check that self.module has an attribute named "__f2py_numpy_version__" + assert_(hasattr(self.module, "__f2py_numpy_version__"), + msg="Fortran module does not have __f2py_numpy_version__") + + # Check that the attribute __f2py_numpy_version__ is a string + assert_(isinstance(self.module.__f2py_numpy_version__, str), + msg="__f2py_numpy_version__ is not a string") + + # Check that __f2py_numpy_version__ has the value numpy.__version__ + assert_string_equal(np.__version__, self.module.__f2py_numpy_version__) + + +def test_include_path(): + incdir = np.f2py.get_include() + fnames_in_dir = os.listdir(incdir) + for fname in ('fortranobject.c', 'fortranobject.h'): + assert fname in fnames_in_dir + diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_character.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_character.py new file mode 100644 index 0000000000000000000000000000000000000000..e6a0eb32e2c9e7aa5b81ee2c8d576b475d9089a6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_character.py @@ -0,0 +1,145 @@ +import pytest + +from numpy import array +from numpy.testing import assert_ +from . import util +import platform +IS_S390X = platform.machine() == 's390x' + + +class TestReturnCharacter(util.F2PyTest): + + def check_function(self, t, tname): + if tname in ['t0', 't1', 's0', 's1']: + assert_(t(23) == b'2') + r = t('ab') + assert_(r == b'a', repr(r)) + r = t(array('ab')) + assert_(r == b'a', repr(r)) + r = t(array(77, 'u1')) + assert_(r == b'M', repr(r)) + #assert_(_raises(ValueError, t, array([77,87]))) + #assert_(_raises(ValueError, t, array(77))) + elif tname in ['ts', 'ss']: + assert_(t(23) == b'23 ', repr(t(23))) + assert_(t('123456789abcdef') == b'123456789a') + elif tname in ['t5', 's5']: + assert_(t(23) == b'23 ', repr(t(23))) + assert_(t('ab') == b'ab ', repr(t('ab'))) + assert_(t('123456789abcdef') == b'12345') + else: + raise NotImplementedError + + +class TestF77ReturnCharacter(TestReturnCharacter): + code = """ + function t0(value) + character value + character t0 + t0 = value + end + function t1(value) + character*1 value + character*1 t1 + t1 = value + end + function t5(value) + character*5 value + character*5 t5 + t5 = value + end + function ts(value) + character*(*) value + character*(*) ts + ts = value + end + + subroutine s0(t0,value) + character value + character t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + character*1 value + character*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s5(t5,value) + character*5 value + character*5 t5 +cf2py intent(out) t5 + t5 = value + end + subroutine ss(ts,value) + character*(*) value + character*10 ts +cf2py intent(out) ts + ts = value + end + """ + + @pytest.mark.xfail(IS_S390X, reason="calback returns ' '") + @pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestF90ReturnCharacter(TestReturnCharacter): + suffix = ".f90" + code = """ +module f90_return_char + contains + function t0(value) + character :: value + character :: t0 + t0 = value + end function t0 + function t1(value) + character(len=1) :: value + character(len=1) :: t1 + t1 = value + end function t1 + function t5(value) + character(len=5) :: value + character(len=5) :: t5 + t5 = value + end function t5 + function ts(value) + character(len=*) :: value + character(len=10) :: ts + ts = value + end function ts + + subroutine s0(t0,value) + character :: value + character :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + character(len=1) :: value + character(len=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s5(t5,value) + character(len=5) :: value + character(len=5) :: t5 +!f2py intent(out) t5 + t5 = value + end subroutine s5 + subroutine ss(ts,value) + character(len=*) :: value + character(len=10) :: ts +!f2py intent(out) ts + ts = value + end subroutine ss +end module f90_return_char + """ + + @pytest.mark.xfail(IS_S390X, reason="calback returns ' '") + @pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_complex.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_complex.py new file mode 100644 index 0000000000000000000000000000000000000000..7177dc137f4e421b59112e9d03ca25fdeabfa17a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_complex.py @@ -0,0 +1,163 @@ +import pytest + +from numpy import array +from numpy.testing import assert_, assert_raises +from . import util + + +class TestReturnComplex(util.F2PyTest): + + def check_function(self, t, tname): + if tname in ['t0', 't8', 's0', 's8']: + err = 1e-5 + else: + err = 0.0 + assert_(abs(t(234j) - 234.0j) <= err) + assert_(abs(t(234.6) - 234.6) <= err) + assert_(abs(t(234) - 234.0) <= err) + assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err) + #assert_( abs(t('234')-234.)<=err) + #assert_( abs(t('234.6')-234.6)<=err) + assert_(abs(t(-234) + 234.) <= err) + assert_(abs(t([234]) - 234.) <= err) + assert_(abs(t((234,)) - 234.) <= err) + assert_(abs(t(array(234)) - 234.) <= err) + assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err) + assert_(abs(t(array([234])) - 234.) <= err) + assert_(abs(t(array([[234]])) - 234.) <= err) + assert_(abs(t(array([234], 'b')) + 22.) <= err) + assert_(abs(t(array([234], 'h')) - 234.) <= err) + assert_(abs(t(array([234], 'i')) - 234.) <= err) + assert_(abs(t(array([234], 'l')) - 234.) <= err) + assert_(abs(t(array([234], 'q')) - 234.) <= err) + assert_(abs(t(array([234], 'f')) - 234.) <= err) + assert_(abs(t(array([234], 'd')) - 234.) <= err) + assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err) + assert_(abs(t(array([234], 'D')) - 234.) <= err) + + #assert_raises(TypeError, t, array([234], 'a1')) + assert_raises(TypeError, t, 'abc') + + assert_raises(IndexError, t, []) + assert_raises(IndexError, t, ()) + + assert_raises(TypeError, t, t) + assert_raises(TypeError, t, {}) + + try: + r = t(10 ** 400) + assert_(repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r)) + except OverflowError: + pass + + +class TestF77ReturnComplex(TestReturnComplex): + code = """ + function t0(value) + complex value + complex t0 + t0 = value + end + function t8(value) + complex*8 value + complex*8 t8 + t8 = value + end + function t16(value) + complex*16 value + complex*16 t16 + t16 = value + end + function td(value) + double complex value + double complex td + td = value + end + + subroutine s0(t0,value) + complex value + complex t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s8(t8,value) + complex*8 value + complex*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine s16(t16,value) + complex*16 value + complex*16 t16 +cf2py intent(out) t16 + t16 = value + end + subroutine sd(td,value) + double complex value + double complex td +cf2py intent(out) td + td = value + end + """ + + @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestF90ReturnComplex(TestReturnComplex): + suffix = ".f90" + code = """ +module f90_return_complex + contains + function t0(value) + complex :: value + complex :: t0 + t0 = value + end function t0 + function t8(value) + complex(kind=4) :: value + complex(kind=4) :: t8 + t8 = value + end function t8 + function t16(value) + complex(kind=8) :: value + complex(kind=8) :: t16 + t16 = value + end function t16 + function td(value) + double complex :: value + double complex :: td + td = value + end function td + + subroutine s0(t0,value) + complex :: value + complex :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s8(t8,value) + complex(kind=4) :: value + complex(kind=4) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine s16(t16,value) + complex(kind=8) :: value + complex(kind=8) :: t16 +!f2py intent(out) t16 + t16 = value + end subroutine s16 + subroutine sd(td,value) + double complex :: value + double complex :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_complex + """ + + @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_complex, name), name) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_integer.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_integer.py new file mode 100644 index 0000000000000000000000000000000000000000..e3a9b954c23496d583932133dff075a2e4b9a4e0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_integer.py @@ -0,0 +1,175 @@ +import pytest + +from numpy import array +from numpy.testing import assert_, assert_raises +from . import util + + +class TestReturnInteger(util.F2PyTest): + + def check_function(self, t, tname): + assert_(t(123) == 123, repr(t(123))) + assert_(t(123.6) == 123) + assert_(t('123') == 123) + assert_(t(-123) == -123) + assert_(t([123]) == 123) + assert_(t((123,)) == 123) + assert_(t(array(123)) == 123) + assert_(t(array([123])) == 123) + assert_(t(array([[123]])) == 123) + assert_(t(array([123], 'b')) == 123) + assert_(t(array([123], 'h')) == 123) + assert_(t(array([123], 'i')) == 123) + assert_(t(array([123], 'l')) == 123) + assert_(t(array([123], 'B')) == 123) + assert_(t(array([123], 'f')) == 123) + assert_(t(array([123], 'd')) == 123) + + #assert_raises(ValueError, t, array([123],'S3')) + assert_raises(ValueError, t, 'abc') + + assert_raises(IndexError, t, []) + assert_raises(IndexError, t, ()) + + assert_raises(Exception, t, t) + assert_raises(Exception, t, {}) + + if tname in ['t8', 's8']: + assert_raises(OverflowError, t, 100000000000000000000000) + assert_raises(OverflowError, t, 10000000011111111111111.23) + + +class TestF77ReturnInteger(TestReturnInteger): + code = """ + function t0(value) + integer value + integer t0 + t0 = value + end + function t1(value) + integer*1 value + integer*1 t1 + t1 = value + end + function t2(value) + integer*2 value + integer*2 t2 + t2 = value + end + function t4(value) + integer*4 value + integer*4 t4 + t4 = value + end + function t8(value) + integer*8 value + integer*8 t8 + t8 = value + end + + subroutine s0(t0,value) + integer value + integer t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + integer*1 value + integer*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + integer*2 value + integer*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + integer*4 value + integer*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + integer*8 value + integer*8 t8 +cf2py intent(out) t8 + t8 = value + end + """ + + @pytest.mark.parametrize('name', + 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestF90ReturnInteger(TestReturnInteger): + suffix = ".f90" + code = """ +module f90_return_integer + contains + function t0(value) + integer :: value + integer :: t0 + t0 = value + end function t0 + function t1(value) + integer(kind=1) :: value + integer(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + integer(kind=2) :: value + integer(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + integer(kind=4) :: value + integer(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + integer(kind=8) :: value + integer(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + integer :: value + integer :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + integer(kind=1) :: value + integer(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + integer(kind=2) :: value + integer(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + integer(kind=4) :: value + integer(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + integer(kind=8) :: value + integer(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_integer + """ + + @pytest.mark.parametrize('name', + 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_integer, name), name) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_logical.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_logical.py new file mode 100644 index 0000000000000000000000000000000000000000..41f1b0d2a6e537228bf7445ffbbd229e55a7156c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_logical.py @@ -0,0 +1,185 @@ +import pytest + +from numpy import array +from numpy.testing import assert_, assert_raises +from . import util + + +class TestReturnLogical(util.F2PyTest): + + def check_function(self, t): + assert_(t(True) == 1, repr(t(True))) + assert_(t(False) == 0, repr(t(False))) + assert_(t(0) == 0) + assert_(t(None) == 0) + assert_(t(0.0) == 0) + assert_(t(0j) == 0) + assert_(t(1j) == 1) + assert_(t(234) == 1) + assert_(t(234.6) == 1) + assert_(t(234.6 + 3j) == 1) + assert_(t('234') == 1) + assert_(t('aaa') == 1) + assert_(t('') == 0) + assert_(t([]) == 0) + assert_(t(()) == 0) + assert_(t({}) == 0) + assert_(t(t) == 1) + assert_(t(-234) == 1) + assert_(t(10 ** 100) == 1) + assert_(t([234]) == 1) + assert_(t((234,)) == 1) + assert_(t(array(234)) == 1) + assert_(t(array([234])) == 1) + assert_(t(array([[234]])) == 1) + assert_(t(array([234], 'b')) == 1) + assert_(t(array([234], 'h')) == 1) + assert_(t(array([234], 'i')) == 1) + assert_(t(array([234], 'l')) == 1) + assert_(t(array([234], 'f')) == 1) + assert_(t(array([234], 'd')) == 1) + assert_(t(array([234 + 3j], 'F')) == 1) + assert_(t(array([234], 'D')) == 1) + assert_(t(array(0)) == 0) + assert_(t(array([0])) == 0) + assert_(t(array([[0]])) == 0) + assert_(t(array([0j])) == 0) + assert_(t(array([1])) == 1) + assert_raises(ValueError, t, array([0, 0])) + + +class TestF77ReturnLogical(TestReturnLogical): + code = """ + function t0(value) + logical value + logical t0 + t0 = value + end + function t1(value) + logical*1 value + logical*1 t1 + t1 = value + end + function t2(value) + logical*2 value + logical*2 t2 + t2 = value + end + function t4(value) + logical*4 value + logical*4 t4 + t4 = value + end +c function t8(value) +c logical*8 value +c logical*8 t8 +c t8 = value +c end + + subroutine s0(t0,value) + logical value + logical t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + logical*1 value + logical*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + logical*2 value + logical*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + logical*4 value + logical*4 t4 +cf2py intent(out) t4 + t4 = value + end +c subroutine s8(t8,value) +c logical*8 value +c logical*8 t8 +cf2py intent(out) t8 +c t8 = value +c end + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', 't0,t1,t2,t4,s0,s1,s2,s4'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name)) + + +class TestF90ReturnLogical(TestReturnLogical): + suffix = ".f90" + code = """ +module f90_return_logical + contains + function t0(value) + logical :: value + logical :: t0 + t0 = value + end function t0 + function t1(value) + logical(kind=1) :: value + logical(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + logical(kind=2) :: value + logical(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + logical(kind=4) :: value + logical(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + logical(kind=8) :: value + logical(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + logical :: value + logical :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + logical(kind=1) :: value + logical(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + logical(kind=2) :: value + logical(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + logical(kind=4) :: value + logical(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + logical(kind=8) :: value + logical(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_logical + """ + + @pytest.mark.slow + @pytest.mark.parametrize('name', + 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_real.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_real.py new file mode 100644 index 0000000000000000000000000000000000000000..70c449fea0ca41cce9536e0b4179bf3c1fcde0c5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_return_real.py @@ -0,0 +1,203 @@ +import platform +import pytest + +from numpy import array +from numpy.testing import assert_, assert_raises +from . import util + + +class TestReturnReal(util.F2PyTest): + + def check_function(self, t, tname): + if tname in ['t0', 't4', 's0', 's4']: + err = 1e-5 + else: + err = 0.0 + assert_(abs(t(234) - 234.0) <= err) + assert_(abs(t(234.6) - 234.6) <= err) + assert_(abs(t('234') - 234) <= err) + assert_(abs(t('234.6') - 234.6) <= err) + assert_(abs(t(-234) + 234) <= err) + assert_(abs(t([234]) - 234) <= err) + assert_(abs(t((234,)) - 234.) <= err) + assert_(abs(t(array(234)) - 234.) <= err) + assert_(abs(t(array([234])) - 234.) <= err) + assert_(abs(t(array([[234]])) - 234.) <= err) + assert_(abs(t(array([234], 'b')) + 22) <= err) + assert_(abs(t(array([234], 'h')) - 234.) <= err) + assert_(abs(t(array([234], 'i')) - 234.) <= err) + assert_(abs(t(array([234], 'l')) - 234.) <= err) + assert_(abs(t(array([234], 'B')) - 234.) <= err) + assert_(abs(t(array([234], 'f')) - 234.) <= err) + assert_(abs(t(array([234], 'd')) - 234.) <= err) + if tname in ['t0', 't4', 's0', 's4']: + assert_(t(1e200) == t(1e300)) # inf + + #assert_raises(ValueError, t, array([234], 'S1')) + assert_raises(ValueError, t, 'abc') + + assert_raises(IndexError, t, []) + assert_raises(IndexError, t, ()) + + assert_raises(Exception, t, t) + assert_raises(Exception, t, {}) + + try: + r = t(10 ** 400) + assert_(repr(r) in ['inf', 'Infinity'], repr(r)) + except OverflowError: + pass + + + +@pytest.mark.skipif( + platform.system() == 'Darwin', + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation") +class TestCReturnReal(TestReturnReal): + suffix = ".pyf" + module_name = "c_ext_return_real" + code = """ +python module c_ext_return_real +usercode \'\'\' +float t4(float value) { return value; } +void s4(float *t4, float value) { *t4 = value; } +double t8(double value) { return value; } +void s8(double *t8, double value) { *t8 = value; } +\'\'\' +interface + function t4(value) + real*4 intent(c) :: t4,value + end + function t8(value) + real*8 intent(c) :: t8,value + end + subroutine s4(t4,value) + intent(c) s4 + real*4 intent(out) :: t4 + real*4 intent(c) :: value + end + subroutine s8(t8,value) + intent(c) s8 + real*8 intent(out) :: t8 + real*8 intent(c) :: value + end +end interface +end python module c_ext_return_real + """ + + @pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestF77ReturnReal(TestReturnReal): + code = """ + function t0(value) + real value + real t0 + t0 = value + end + function t4(value) + real*4 value + real*4 t4 + t4 = value + end + function t8(value) + real*8 value + real*8 t8 + t8 = value + end + function td(value) + double precision value + double precision td + td = value + end + + subroutine s0(t0,value) + real value + real t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s4(t4,value) + real*4 value + real*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + real*8 value + real*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine sd(td,value) + double precision value + double precision td +cf2py intent(out) td + td = value + end + """ + + @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestF90ReturnReal(TestReturnReal): + suffix = ".f90" + code = """ +module f90_return_real + contains + function t0(value) + real :: value + real :: t0 + t0 = value + end function t0 + function t4(value) + real(kind=4) :: value + real(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + real(kind=8) :: value + real(kind=8) :: t8 + t8 = value + end function t8 + function td(value) + double precision :: value + double precision :: td + td = value + end function td + + subroutine s0(t0,value) + real :: value + real :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s4(t4,value) + real(kind=4) :: value + real(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + real(kind=8) :: value + real(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine sd(td,value) + double precision :: value + double precision :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_real + """ + + @pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(',')) + def test_all(self, name): + self.check_function(getattr(self.module.f90_return_real, name), name) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_semicolon_split.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_semicolon_split.py new file mode 100644 index 0000000000000000000000000000000000000000..f5a93c39dadfd87516cb15505fde18c37835b618 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_semicolon_split.py @@ -0,0 +1,63 @@ +import platform +import pytest + +from . import util +from numpy.testing import assert_equal + +@pytest.mark.skipif( + platform.system() == 'Darwin', + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation") +class TestMultiline(util.F2PyTest): + suffix = ".pyf" + module_name = "multiline" + code = """ +python module {module} + usercode ''' +void foo(int* x) {{ + char dummy = ';'; + *x = 42; +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + end subroutine foo + end interface +end python module {module} + """.format(module=module_name) + + def test_multiline(self): + assert_equal(self.module.foo(), 42) + + +@pytest.mark.skipif( + platform.system() == 'Darwin', + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation") +class TestCallstatement(util.F2PyTest): + suffix = ".pyf" + module_name = "callstatement" + code = """ +python module {module} + usercode ''' +void foo(int* x) {{ +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + callprotoargument int* + callstatement {{ & + ; & + x = 42; & + }} + end subroutine foo + end interface +end python module {module} + """.format(module=module_name) + + def test_callstatement(self): + assert_equal(self.module.foo(), 42) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_size.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_size.py new file mode 100644 index 0000000000000000000000000000000000000000..2774038abe58305fbaf21555de41413643fd4366 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_size.py @@ -0,0 +1,49 @@ +import os +import pytest + +from numpy.testing import assert_equal +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + + +class TestSizeSumExample(util.F2PyTest): + sources = [_path('src', 'size', 'foo.f90')] + + @pytest.mark.slow + def test_all(self): + r = self.module.foo([[]]) + assert_equal(r, [0], repr(r)) + + r = self.module.foo([[1, 2]]) + assert_equal(r, [3], repr(r)) + + r = self.module.foo([[1, 2], [3, 4]]) + assert_equal(r, [3, 7], repr(r)) + + r = self.module.foo([[1, 2], [3, 4], [5, 6]]) + assert_equal(r, [3, 7, 11], repr(r)) + + @pytest.mark.slow + def test_transpose(self): + r = self.module.trans([[]]) + assert_equal(r.T, [[]], repr(r)) + + r = self.module.trans([[1, 2]]) + assert_equal(r, [[1], [2]], repr(r)) + + r = self.module.trans([[1, 2, 3], [4, 5, 6]]) + assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r)) + + @pytest.mark.slow + def test_flatten(self): + r = self.module.flatten([[]]) + assert_equal(r, [], repr(r)) + + r = self.module.flatten([[1, 2]]) + assert_equal(r, [1, 2], repr(r)) + + r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) + assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r)) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/test_string.py b/MLPY/Lib/site-packages/numpy/f2py/tests/test_string.py new file mode 100644 index 0000000000000000000000000000000000000000..f89d4f7ed7aab3bf7a1398f7a4bcbd73dd35cd96 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/test_string.py @@ -0,0 +1,22 @@ +import os +import pytest + +from numpy.testing import assert_array_equal +import numpy as np +from . import util + + +def _path(*a): + return os.path.join(*((os.path.dirname(__file__),) + a)) + +class TestString(util.F2PyTest): + sources = [_path('src', 'string', 'char.f90')] + + @pytest.mark.slow + def test_char(self): + strings = np.array(['ab', 'cd', 'ef'], dtype='c').T + inp, out = self.module.char_test.change_strings(strings, strings.shape[1]) + assert_array_equal(inp, strings) + expected = strings.copy() + expected[1, :] = 'AAA' + assert_array_equal(out, expected) diff --git a/MLPY/Lib/site-packages/numpy/f2py/tests/util.py b/MLPY/Lib/site-packages/numpy/f2py/tests/util.py new file mode 100644 index 0000000000000000000000000000000000000000..599eea4925149c3a5235069bc9c9a3c43d1117a8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/tests/util.py @@ -0,0 +1,359 @@ +""" +Utility functions for + +- building and importing modules on test time, using a temporary location +- detecting if compilers are present + +""" +import os +import sys +import subprocess +import tempfile +import shutil +import atexit +import textwrap +import re +import pytest + +from numpy.compat import asbytes, asstr +from numpy.testing import temppath +from importlib import import_module + +# +# Maintaining a temporary module directory +# + +_module_dir = None +_module_num = 5403 + + +def _cleanup(): + global _module_dir + if _module_dir is not None: + try: + sys.path.remove(_module_dir) + except ValueError: + pass + try: + shutil.rmtree(_module_dir) + except (IOError, OSError): + pass + _module_dir = None + + +def get_module_dir(): + global _module_dir + if _module_dir is None: + _module_dir = tempfile.mkdtemp() + atexit.register(_cleanup) + if _module_dir not in sys.path: + sys.path.insert(0, _module_dir) + return _module_dir + + +def get_temp_module_name(): + # Assume single-threaded, and the module dir usable only by this thread + global _module_num + d = get_module_dir() + name = "_test_ext_module_%d" % _module_num + _module_num += 1 + if name in sys.modules: + # this should not be possible, but check anyway + raise RuntimeError("Temporary module name already in use.") + return name + + +def _memoize(func): + memo = {} + + def wrapper(*a, **kw): + key = repr((a, kw)) + if key not in memo: + try: + memo[key] = func(*a, **kw) + except Exception as e: + memo[key] = e + raise + ret = memo[key] + if isinstance(ret, Exception): + raise ret + return ret + wrapper.__name__ = func.__name__ + return wrapper + +# +# Building modules +# + + +@_memoize +def build_module(source_files, options=[], skip=[], only=[], module_name=None): + """ + Compile and import a f2py module, built from the given files. + + """ + + code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; " + "f2py2e.main()" % repr(sys.path)) + + d = get_module_dir() + + # Copy files + dst_sources = [] + f2py_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError("%s is not a file" % fn) + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + base, ext = os.path.splitext(dst) + if ext in ('.f90', '.f', '.c', '.pyf'): + f2py_sources.append(dst) + + # Prepare options + if module_name is None: + module_name = get_temp_module_name() + f2py_opts = ['-c', '-m', module_name] + options + f2py_sources + if skip: + f2py_opts += ['skip:'] + skip + if only: + f2py_opts += ['only:'] + only + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, '-c', code] + f2py_opts + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError("Running f2py failed: %s\n%s" + % (cmd[4:], asstr(out))) + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Import + return import_module(module_name) + + +@_memoize +def build_code(source_code, options=[], skip=[], only=[], suffix=None, + module_name=None): + """ + Compile and import Fortran code using f2py. + + """ + if suffix is None: + suffix = '.f' + with temppath(suffix=suffix) as path: + with open(path, 'w') as f: + f.write(source_code) + return build_module([path], options=options, skip=skip, only=only, + module_name=module_name) + +# +# Check if compilers are available at all... +# + +_compiler_status = None + + +def _get_compiler_status(): + global _compiler_status + if _compiler_status is not None: + return _compiler_status + + _compiler_status = (False, False, False) + + # XXX: this is really ugly. But I don't know how to invoke Distutils + # in a safer way... + code = textwrap.dedent("""\ + import os + import sys + sys.path = %(syspath)s + + def configuration(parent_name='',top_path=None): + global config + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + return config + + from numpy.distutils.core import setup + setup(configuration=configuration) + + config_cmd = config.get_config_cmd() + have_c = config_cmd.try_compile('void foo() {}') + print('COMPILERS:%%d,%%d,%%d' %% (have_c, + config.have_f77c(), + config.have_f90c())) + sys.exit(99) + """) + code = code % dict(syspath=repr(sys.path)) + + tmpdir = tempfile.mkdtemp() + try: + script = os.path.join(tmpdir, 'setup.py') + + with open(script, 'w') as f: + f.write(code) + + cmd = [sys.executable, 'setup.py', 'config'] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=tmpdir) + out, err = p.communicate() + finally: + shutil.rmtree(tmpdir) + + m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out) + if m: + _compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))), + bool(int(m.group(3)))) + # Finished + return _compiler_status + + +def has_c_compiler(): + return _get_compiler_status()[0] + + +def has_f77_compiler(): + return _get_compiler_status()[1] + + +def has_f90_compiler(): + return _get_compiler_status()[2] + +# +# Building with distutils +# + + +@_memoize +def build_module_distutils(source_files, config_code, module_name, **kw): + """ + Build a module via distutils and import it. + + """ + from numpy.distutils.misc_util import Configuration + from numpy.distutils.core import setup + + d = get_module_dir() + + # Copy files + dst_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError("%s is not a file" % fn) + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + # Build script + config_code = textwrap.dedent(config_code).replace("\n", "\n ") + + code = textwrap.dedent("""\ + import os + import sys + sys.path = %(syspath)s + + def configuration(parent_name='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + %(config_code)s + return config + + if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) + """) % dict(config_code=config_code, syspath=repr(sys.path)) + + script = os.path.join(d, get_temp_module_name() + '.py') + dst_sources.append(script) + with open(script, 'wb') as f: + f.write(asbytes(code)) + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, script, 'build_ext', '-i'] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError("Running distutils build failed: %s\n%s" + % (cmd[4:], asstr(out))) + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Import + __import__(module_name) + return sys.modules[module_name] + +# +# Unittest convenience +# + + +class F2PyTest: + code = None + sources = None + options = [] + skip = [] + only = [] + suffix = '.f' + module = None + module_name = None + + def setup(self): + if sys.platform == 'win32': + pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)') + + if self.module is not None: + return + + # Check compiler availability first + if not has_c_compiler(): + pytest.skip("No C compiler available") + + codes = [] + if self.sources: + codes.extend(self.sources) + if self.code is not None: + codes.append(self.suffix) + + needs_f77 = False + needs_f90 = False + for fn in codes: + if fn.endswith('.f'): + needs_f77 = True + elif fn.endswith('.f90'): + needs_f90 = True + if needs_f77 and not has_f77_compiler(): + pytest.skip("No Fortran 77 compiler available") + if needs_f90 and not has_f90_compiler(): + pytest.skip("No Fortran 90 compiler available") + + # Build the module + if self.code is not None: + self.module = build_code(self.code, options=self.options, + skip=self.skip, only=self.only, + suffix=self.suffix, + module_name=self.module_name) + + if self.sources is not None: + self.module = build_module(self.sources, options=self.options, + skip=self.skip, only=self.only, + module_name=self.module_name) diff --git a/MLPY/Lib/site-packages/numpy/f2py/use_rules.py b/MLPY/Lib/site-packages/numpy/f2py/use_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..672d4811dd95b777198617d165e97525b88f7c4a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/f2py/use_rules.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +""" + +Build 'use others module data' mechanism for f2py2e. + +Unfinished. + +Copyright 2000 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Date: 2000/09/10 12:35:43 $ +Pearu Peterson + +""" +__version__ = "$Revision: 1.3 $"[10:-1] + +f2py_version = 'See `f2py -v`' + + +from .auxfuncs import ( + applyrules, dictappend, gentitle, hasnote, outmess +) + + +usemodule_rules = { + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ +\t #name# = get_#name#()\\n\\ +Arguments:\\n\\ +#docstr#\"; +extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); +static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { +/*#decl#*/ +\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; +printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); +\treturn Py_BuildValue(\"\"); +capi_fail: +\treturn NULL; +} +""", + 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', + 'need': ['F_MODFUNC'] +} + +################ + + +def buildusevars(m, r): + ret = {} + outmess( + '\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name'])) + varsmap = {} + revmap = {} + if 'map' in r: + for k in r['map'].keys(): + if r['map'][k] in revmap: + outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( + r['map'][k], k, revmap[r['map'][k]])) + else: + revmap[r['map'][k]] = k + if 'only' in r and r['only']: + for v in r['map'].keys(): + if r['map'][v] in m['vars']: + + if revmap[r['map'][v]] == v: + varsmap[v] = r['map'][v] + else: + outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' % + (v, r['map'][v])) + else: + outmess( + '\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v])) + else: + for v in m['vars'].keys(): + if v in revmap: + varsmap[v] = revmap[v] + else: + varsmap[v] = v + for v in varsmap.keys(): + ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) + return ret + + +def buildusevar(name, realname, vars, usemodulename): + outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( + name, realname)) + ret = {} + vrd = {'name': name, + 'realname': realname, + 'REALNAME': realname.upper(), + 'usemodulename': usemodulename, + 'USEMODULENAME': usemodulename.upper(), + 'texname': name.replace('_', '\\_'), + 'begintitle': gentitle('%s=>%s' % (name, realname)), + 'endtitle': gentitle('end of %s=>%s' % (name, realname)), + 'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename) + } + nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', + 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} + vrd['texnamename'] = name + for i in nummap.keys(): + vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i]) + if hasnote(vars[realname]): + vrd['note'] = vars[realname]['note'] + rd = dictappend({}, vrd) + + print(name, realname, vars[realname]) + ret = applyrules(usemodule_rules, rd) + return ret diff --git a/MLPY/Lib/site-packages/numpy/fft/__init__.py b/MLPY/Lib/site-packages/numpy/fft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9fcfbd46e0a0d2ea4ed5bd562b2464f14243e962 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/fft/__init__.py @@ -0,0 +1,212 @@ +""" +Discrete Fourier Transform (:mod:`numpy.fft`) +============================================= + +.. currentmodule:: numpy.fft + +The SciPy module `scipy.fft` is a more comprehensive superset +of ``numpy.fft``, which includes only a basic set of routines. + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. + +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the default normalization by :math:`1/n`. + +Type Promotion +-------------- + +`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and +``complex128`` arrays respectively. For an FFT implementation that does not +promote input arrays, see `scipy.fftpack`. + +Normalization +------------- + +The argument ``norm`` indicates which direction of the pair of direct/inverse +transforms is scaled and with what normalization factor. +The default normalization (``"backward"``) has the direct (forward) transforms +unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is +possible to obtain unitary transforms by setting the keyword argument ``norm`` +to ``"ortho"`` so that both direct and inverse transforms are scaled by +:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to +``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse +transforms unscaled (i.e. exactly opposite to the default ``"backward"``). +`None` is an alias of the default option ``"backward"`` for backward +compatibility. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" + +from . import _pocketfft, helper +from ._pocketfft import * +from .helper import * + +__all__ = _pocketfft.__all__.copy() +__all__ += helper.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/fft/__init__.pyi b/MLPY/Lib/site-packages/numpy/fft/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..31b252a954f157f92f0329ba1ce825d8840e4f26 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/fft/__init__.pyi @@ -0,0 +1,22 @@ +from typing import Any, List + +__all__: List[str] + +def fft(a, n=..., axis=..., norm=...): ... +def ifft(a, n=..., axis=..., norm=...): ... +def rfft(a, n=..., axis=..., norm=...): ... +def irfft(a, n=..., axis=..., norm=...): ... +def hfft(a, n=..., axis=..., norm=...): ... +def ihfft(a, n=..., axis=..., norm=...): ... +def fftn(a, s=..., axes=..., norm=...): ... +def ifftn(a, s=..., axes=..., norm=...): ... +def rfftn(a, s=..., axes=..., norm=...): ... +def irfftn(a, s=..., axes=..., norm=...): ... +def fft2(a, s=..., axes=..., norm=...): ... +def ifft2(a, s=..., axes=..., norm=...): ... +def rfft2(a, s=..., axes=..., norm=...): ... +def irfft2(a, s=..., axes=..., norm=...): ... +def fftshift(x, axes=...): ... +def ifftshift(x, axes=...): ... +def fftfreq(n, d=...): ... +def rfftfreq(n, d=...): ... diff --git a/MLPY/Lib/site-packages/numpy/fft/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/fft/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93150975589e629423abea1531d45e0aedc26ca5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/fft/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7652b535999add8840d1f88ec2543cd74ef555e1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/fft/__pycache__/helper.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/fft/__pycache__/helper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3eb9c7abaee708c37ca5fb541b7e379d45e3c923 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/fft/__pycache__/helper.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/fft/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/fft/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e88682439a094f7c89135e6d65f71726fa20a08 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/fft/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/fft/_pocketfft.py b/MLPY/Lib/site-packages/numpy/fft/_pocketfft.py new file mode 100644 index 0000000000000000000000000000000000000000..f79e860b13fcfabab48ed2ad4d0252edb9128afe --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/fft/_pocketfft.py @@ -0,0 +1,1424 @@ +""" +Discrete Fourier Transforms + +Routines in this module: + +fft(a, n=None, axis=-1, norm="backward") +ifft(a, n=None, axis=-1, norm="backward") +rfft(a, n=None, axis=-1, norm="backward") +irfft(a, n=None, axis=-1, norm="backward") +hfft(a, n=None, axis=-1, norm="backward") +ihfft(a, n=None, axis=-1, norm="backward") +fftn(a, s=None, axes=None, norm="backward") +ifftn(a, s=None, axes=None, norm="backward") +rfftn(a, s=None, axes=None, norm="backward") +irfftn(a, s=None, axes=None, norm="backward") +fft2(a, s=None, axes=(-2,-1), norm="backward") +ifft2(a, s=None, axes=(-2, -1), norm="backward") +rfft2(a, s=None, axes=(-2,-1), norm="backward") +irfft2(a, s=None, axes=(-2, -1), norm="backward") + +i = inverse transform +r = transform of purely real data +h = Hermite transform +n = n-dimensional transform +2 = 2-dimensional transform +(Note: 2D routines are just nD routines with different default +behavior.) + +""" +__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', + 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] + +import functools + +from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt +from . import _pocketfft_internal as pfi +from numpy.core.multiarray import normalize_axis_index +from numpy.core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.fft') + + +# `inv_norm` is a float by which the result of the transform needs to be +# divided. This replaces the original, more intuitive 'fct` parameter to avoid +# divisions by zero (or alternatively additional checks) in the case of +# zero-length axes during its computation. +def _raw_fft(a, n, axis, is_real, is_forward, inv_norm): + axis = normalize_axis_index(axis, a.ndim) + if n is None: + n = a.shape[axis] + + fct = 1/inv_norm + + if a.shape[axis] != n: + s = list(a.shape) + index = [slice(None)]*len(s) + if s[axis] > n: + index[axis] = slice(0, n) + a = a[tuple(index)] + else: + index[axis] = slice(0, s[axis]) + s[axis] = n + z = zeros(s, a.dtype.char) + z[tuple(index)] = a + a = z + + if axis == a.ndim-1: + r = pfi.execute(a, is_real, is_forward, fct) + else: + a = swapaxes(a, axis, -1) + r = pfi.execute(a, is_real, is_forward, fct) + r = swapaxes(r, axis, -1) + return r + + +def _get_forward_norm(n, norm): + if n < 1: + raise ValueError(f"Invalid number of FFT data points ({n}) specified.") + + if norm is None or norm == "backward": + return 1 + elif norm == "ortho": + return sqrt(n) + elif norm == "forward": + return n + raise ValueError(f'Invalid norm value {norm}; should be "backward",' + '"ortho" or "forward".') + + +def _get_backward_norm(n, norm): + if n < 1: + raise ValueError(f"Invalid number of FFT data points ({n}) specified.") + + if norm is None or norm == "backward": + return n + elif norm == "ortho": + return sqrt(n) + elif norm == "forward": + return 1 + raise ValueError(f'Invalid norm value {norm}; should be "backward", ' + '"ortho" or "forward".') + + +_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward", + "ortho": "ortho", "forward": "backward"} + + +def _swap_direction(norm): + try: + return _SWAP_DIRECTION_MAP[norm] + except KeyError: + raise ValueError(f'Invalid norm value {norm}; should be "backward", ' + '"ortho" or "forward".') from None + + +def _fft_dispatcher(a, n=None, axis=None, norm=None): + return (a,) + + +@array_function_dispatch(_fft_dispatcher) +def fft(a, n=None, axis=-1, norm=None): + """ + Compute the one-dimensional discrete Fourier Transform. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [CT]. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : for definition of the DFT and conventions used. + ifft : The inverse of `fft`. + fft2 : The two-dimensional FFT. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier + Transform (DFT) can be calculated efficiently, by using symmetries in the + calculated terms. The symmetry is highest when `n` is a power of 2, and + the transform is therefore most efficient for these sizes. + + The DFT is defined, with the conventions used in this implementation, in + the documentation for the `numpy.fft` module. + + References + ---------- + .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + + Examples + -------- + >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, + 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, + -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, + 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part, as described in + the `numpy.fft` documentation: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = np.fft.fft(np.sin(t)) + >>> freq = np.fft.fftfreq(t.shape[-1]) + >>> plt.plot(freq, sp.real, freq, sp.imag) + [, ] + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + inv_norm = _get_forward_norm(n, norm) + output = _raw_fft(a, n, axis, False, True, inv_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ifft(a, n=None, axis=-1, norm=None): + """ + Compute the one-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(a)) == a`` to within numerical accuracy. + For a general description of the algorithm and definitions, + see `numpy.fft`. + + The input should be ordered in the same way as is returned by `fft`, + i.e., + + * ``a[0]`` should contain the zero frequency term, + * ``a[1:n//2]`` should contain the positive-frequency terms, + * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``A[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `numpy.fft` for details. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : An introduction, with definitions and general explanations. + fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse + ifft2 : The two-dimensional inverse FFT. + ifftn : The n-dimensional inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + Examples + -------- + >>> np.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) + >>> s = np.fft.ifft(n) + >>> plt.plot(t, s.real, label='real') + [] + >>> plt.plot(t, s.imag, '--', label='imaginary') + [] + >>> plt.legend() + + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + inv_norm = _get_backward_norm(n, norm) + output = _raw_fft(a, n, axis, False, False, inv_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def rfft(a, n=None, axis=-1, norm=None): + """ + Compute the one-dimensional discrete Fourier Transform for real input. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + a : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + irfft : The inverse of `rfft`. + fft : The one-dimensional FFT of general (complex) input. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e. the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> np.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary + >>> np.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + inv_norm = _get_forward_norm(n, norm) + output = _raw_fft(a, n, axis, True, True, inv_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def irfft(a, n=None, axis=-1, norm=None): + """ + Computes the inverse of `rfft`. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e. the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is taken to be + ``2*(m-1)`` where ``m`` is the length of the input along the axis + specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. + fft : The one-dimensional FFT. + irfft2 : The inverse of the two-dimensional FFT of real input. + irfftn : The inverse of the *n*-dimensional FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `a`, where `a` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + correct length of the real input **must** be given. + + Examples + -------- + >>> np.fft.ifft([1, -1j, -1, 1j]) + array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary + >>> np.fft.irfft([1, -1j, -1]) + array([0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + inv_norm = _get_backward_norm(n, norm) + output = _raw_fft(a, n, axis, True, False, inv_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def hfft(a, n=None, axis=-1, norm=None): + """ + Compute the FFT of a signal that has Hermitian symmetry, i.e., a real + spectrum. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. For `n` output + points, ``n//2 + 1`` input points are necessary. If the input is + longer than this, it is cropped. If it is shorter than this, it is + padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)`` + where ``m`` is the length of the input along the axis specified by + `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*m - 2`` where ``m`` is the length of the transformed axis of + the input. To get an odd number of output points, `n` must be + specified, for instance as ``2*m - 1`` in the typical case, + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See also + -------- + rfft : Compute the one-dimensional FFT for real input. + ihfft : The inverse of `hfft`. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd. + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `hfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + shape of the full signal **must** be given. + + Examples + -------- + >>> signal = np.array([1, 2, 3, 4, 3, 2]) + >>> np.fft.fft(signal) + array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary + >>> np.fft.hfft(signal[:4]) # Input first half of signal + array([15., -4., 0., -1., 0., -4.]) + >>> np.fft.hfft(signal, 6) # Input entire signal and truncate + array([15., -4., 0., -1., 0., -4.]) + + + >>> signal = np.array([[1, 1.j], [-1.j, 2]]) + >>> np.conj(signal.T) - signal # check Hermitian symmetry + array([[ 0.-0.j, -0.+0.j], # may vary + [ 0.+0.j, 0.-0.j]]) + >>> freq_spectrum = np.fft.hfft(signal) + >>> freq_spectrum + array([[ 1., 1.], + [ 2., -2.]]) + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + new_norm = _swap_direction(norm) + output = irfft(conjugate(a), n, axis, norm=new_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ihfft(a, n=None, axis=-1, norm=None): + """ + Compute the inverse FFT of a signal that has Hermitian symmetry. + + Parameters + ---------- + a : array_like + Input array. + n : int, optional + Length of the inverse FFT, the number of points along + transformation axis in the input to use. If `n` is smaller than + the length of the input, the input is cropped. If it is larger, + the input is padded with zeros. If `n` is not given, the length of + the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is ``n//2 + 1``. + + See also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd: + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + Examples + -------- + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> np.fft.ifft(spectrum) + array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary + >>> np.fft.ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + new_norm = _swap_direction(norm) + output = conjugate(rfft(a, n, axis, norm=new_norm)) + return output + + +def _cook_nd_args(a, s=None, axes=None, invreal=0): + if s is None: + shapeless = 1 + if axes is None: + s = list(a.shape) + else: + s = take(a.shape, axes) + else: + shapeless = 0 + s = list(s) + if axes is None: + axes = list(range(-len(s), 0)) + if len(s) != len(axes): + raise ValueError("Shape and axes have different lengths.") + if invreal and shapeless: + s[-1] = (a.shape[axes[-1]] - 1) * 2 + return s, axes + + +def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None): + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + itl = list(range(len(axes))) + itl.reverse() + for ii in itl: + a = function(a, n=s[ii], axis=axes[ii], norm=norm) + return a + + +def _fftn_dispatcher(a, s=None, axes=None, norm=None): + return (a,) + + +@array_function_dispatch(_fftn_dispatcher) +def fftn(a, s=None, axes=None, norm=None): + """ + Compute the N-dimensional discrete Fourier Transform. + + This function computes the *N*-dimensional discrete Fourier Transform over + any number of axes in an *M*-dimensional array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the transform over that axis is + performed multiple times. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. + fft : The one-dimensional FFT, with definitions and conventions used. + rfftn : The *n*-dimensional FFT of real input. + fft2 : The two-dimensional FFT. + fftshift : Shifts zero-frequency terms to centre of array + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + See `numpy.fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.mgrid[:3, :3, :3][0] + >>> np.fft.fftn(a, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) + >>> FS = np.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, fft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def ifftn(a, s=None, axes=None, norm=None): + """ + Compute the N-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform over any number of axes in an M-dimensional array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(a)) == a`` to within numerical accuracy. + For a description of the definitions and conventions used, see `numpy.fft`. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e. it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. + ifft : The one-dimensional inverse FFT. + ifft2 : The two-dimensional inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + See `numpy.fft` for definitions and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> a = np.eye(4) + >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) + >>> im = np.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, ifft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def fft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional discrete Fourier Transform. + + This function computes the *n*-dimensional discrete Fourier Transform + over any axes in an *M*-dimensional array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + a : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifft2 : The inverse two-dimensional FFT. + fft : The one-dimensional FFT. + fftn : The *n*-dimensional FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For two-dimensional input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `numpy.fft` for + definitions and conventions used. + + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.fft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary + 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ]]) + + """ + return _raw_fftnd(a, s, axes, fft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def ifft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the 2-dimensional discrete Fourier + Transform over any number of axes in an M-dimensional array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e. it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the *n*-dimensional FFT. + fft : The one-dimensional FFT. + ifft : The one-dimensional inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `numpy.fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> a = 4 * np.eye(4) + >>> np.fft.ifft2(a) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + return _raw_fftnd(a, s, axes, ifft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def rfftn(a, s=None, axes=None, norm=None): + """ + Compute the N-dimensional discrete Fourier Transform for real input. + + This function computes the N-dimensional discrete Fourier Transform over + any number of axes in an M-dimensional real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + a : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT + of real input. + fft : The one-dimensional FFT, with definitions and conventions used. + rfft : The one-dimensional FFT of real input. + fftn : The n-dimensional FFT. + rfft2 : The two-dimensional FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.ones((2, 2, 2)) + >>> np.fft.rfftn(a) + array([[[8.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + >>> np.fft.rfftn(a, axes=(2, 0)) + array([[[4.+0.j, 0.+0.j], # may vary + [4.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + a = rfft(a, s[-1], axes[-1], norm) + for ii in range(len(axes)-1): + a = fft(a, s[ii], axes[ii], norm) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def rfft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional FFT of a real array. + + Parameters + ---------- + a : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + axes : sequence of ints, optional + Axes over which to compute the FFT. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + rfftn : Compute the N-dimensional discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.rfft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]]) + """ + return rfftn(a, s, axes, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def irfftn(a, s=None, axes=None, norm=None): + """ + Computes the inverse of `rfftn`. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform for real input over any number of axes in an + M-dimensional array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e. as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + a : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to + be ``2*(m-1)`` where ``m`` is the length of the input along that axis. + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + rfftn : The forward n-dimensional FFT of real input, + of which `ifftn` is the inverse. + fft : The one-dimensional FFT, with definitions and conventions used. + irfft : The inverse of the one-dimensional FFT of real input. + irfft2 : The inverse of the two-dimensional FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + The correct interpretation of the hermitian input depends on the shape of + the original data, as given by `s`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfftn` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. When performing the + final complex to real transform, the last value is thus treated as purely + real. To avoid losing information, the correct shape of the real input + **must** be given. + + Examples + -------- + >>> a = np.zeros((3, 2, 2)) + >>> a[0, 0, 0] = 3 * 2 * 2 + >>> np.fft.irfftn(a) + array([[[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes, invreal=1) + for ii in range(len(axes)-1): + a = ifft(a, s[ii], axes[ii], norm) + a = irfft(a, s[-1], axes[-1], norm) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def irfft2(a, s=None, axes=(-2, -1), norm=None): + """ + Computes the inverse of `rfft2`. + + Parameters + ---------- + a : array_like + The input array + s : sequence of ints, optional + Shape of the real output to the inverse FFT. + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default is the last two axes. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + rfft2 : The forward two-dimensional FFT of real input, + of which `irfft2` is the inverse. + rfft : The one-dimensional FFT for real input. + irfft : The inverse of the one-dimensional FFT of real input. + irfftn : Compute the inverse of the N-dimensional FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> A = np.fft.rfft2(a) + >>> np.fft.irfft2(A, s=a.shape) + array([[0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1.], + [2., 2., 2., 2., 2.], + [3., 3., 3., 3., 3.], + [4., 4., 4., 4., 4.]]) + """ + return irfftn(a, s, axes, norm) diff --git a/MLPY/Lib/site-packages/numpy/fft/_pocketfft_internal.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/fft/_pocketfft_internal.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c12bfa503f6718844222a2044ac0324d922bf5f1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/fft/_pocketfft_internal.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/fft/helper.py b/MLPY/Lib/site-packages/numpy/fft/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..876d13537430350caa5d6308145883476f99c615 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/fft/helper.py @@ -0,0 +1,221 @@ +""" +Discrete Fourier Transforms - helper.py + +""" +from numpy.core import integer, empty, arange, asarray, roll +from numpy.core.overrides import array_function_dispatch, set_module + +# Created by Pearu Peterson, September 2002 + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] + +integer_types = (int, integer) + + +def _fftshift_dispatcher(x, axes=None): + return (x,) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def fftshift(x, axes=None): + """ + Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., ..., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [dim // 2 for dim in x.shape] + elif isinstance(axes, integer_types): + shift = x.shape[axes] // 2 + else: + shift = [x.shape[ax] // 2 for ax in axes] + + return roll(x, shift, axes) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def ifftshift(x, axes=None): + """ + The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [-(dim // 2) for dim in x.shape] + elif isinstance(axes, integer_types): + shift = -(x.shape[axes] // 2) + else: + shift = [-(x.shape[ax] // 2) for ax in axes] + + return roll(x, shift, axes) + + +@set_module('numpy.fft') +def fftfreq(n, d=1.0): + """ + Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = np.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = np.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + results = empty(n, int) + N = (n-1)//2 + 1 + p1 = arange(0, N, dtype=int) + results[:N] = p1 + p2 = arange(-(n//2), 0, dtype=int) + results[N:] = p2 + return results * val + + +@set_module('numpy.fft') +def rfftfreq(n, d=1.0): + """ + Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = np.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = np.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., ..., -30., -20., -10.]) + >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0/(n*d) + N = n//2 + 1 + results = arange(0, N, dtype=int) + return results * val diff --git a/MLPY/Lib/site-packages/numpy/fft/setup.py b/MLPY/Lib/site-packages/numpy/fft/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..198eadbe73c9aab5cec12ea087f043f82cca9d54 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/fft/setup.py @@ -0,0 +1,22 @@ +import sys + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('fft', parent_package, top_path) + + config.add_subpackage('tests') + + # AIX needs to be told to use large file support - at all times + defs = [('_LARGE_FILES', None)] if sys.platform[:3] == "aix" else [] + # Configure pocketfft_internal + config.add_extension('_pocketfft_internal', + sources=['_pocketfft.c'], + define_macros=defs, + ) + + config.add_data_files('*.pyi') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/MLPY/Lib/site-packages/numpy/fft/tests/__init__.py b/MLPY/Lib/site-packages/numpy/fft/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..220ec90afc5b928ccdd0bae7fda084ec54818f22 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f78bb285af208a3a4d5cef5367e9144f7d75b265 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a76d33ddf6e3279d4b26e35bb9e2a372db157cf7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/fft/tests/test_helper.py b/MLPY/Lib/site-packages/numpy/fft/tests/test_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..e538e025159c71ccad6f54f5aeb70fbf593450dd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/fft/tests/test_helper.py @@ -0,0 +1,167 @@ +"""Test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 + +""" +import numpy as np +from numpy.testing import assert_array_almost_equal +from numpy import fft, pi + + +class TestFFTShift: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + + def test_inverse(self): + for n in [1, 4, 9, 100, 211]: + x = np.random.random((n,)) + assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self): + freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) + assert_array_almost_equal(fft.fftshift(freqs, axes=0), + fft.fftshift(freqs, axes=(0,))) + assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + + assert_array_almost_equal(fft.fftshift(freqs), shifted) + assert_array_almost_equal(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self): + """ Test 2D input, which has uneven dimension sizes """ + freqs = [ + [0, 1], + [2, 3], + [4, 5] + ] + + # shift in dimension 0 + shift_dim0 = [ + [4, 5], + [0, 1], + [2, 3] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = [ + [1, 0], + [3, 2], + [5, 4] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1) + assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = [ + [5, 4], + [1, 0], + [3, 2] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs) + assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) + + def test_equal_to_original(self): + """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """ + from numpy.core import asarray, concatenate, arange, take + + def original_fftshift(x, axes=None): + """ How fftshift was implemented in v1.14""" + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + def original_ifftshift(x, axes=None): + """ How ifftshift was implemented in v1.14 """ + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = n - (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + # create possible 2d array combinations and try all possible keywords + # compare output to original functions + for i in range(16): + for j in range(16): + for axes_keyword in [0, 1, None, (0,), (0, 1)]: + inp = np.random.rand(i, j) + + assert_array_almost_equal(fft.fftshift(inp, axes_keyword), + original_fftshift(inp, axes_keyword)) + + assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), + original_ifftshift(inp, axes_keyword)) + + +class TestFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + assert_array_almost_equal(9*fft.fftfreq(9), x) + assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + assert_array_almost_equal(10*fft.fftfreq(10), x) + assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) + + +class TestRFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4] + assert_array_almost_equal(9*fft.rfftfreq(9), x) + assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, 5] + assert_array_almost_equal(10*fft.rfftfreq(10), x) + assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) + + +class TestIRFFTN: + + def test_not_last_axis_success(self): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j*ai + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) diff --git a/MLPY/Lib/site-packages/numpy/fft/tests/test_pocketfft.py b/MLPY/Lib/site-packages/numpy/fft/tests/test_pocketfft.py new file mode 100644 index 0000000000000000000000000000000000000000..aa2c33dfd7c7dbb5a17ef9da6b8ea81f4ab8a994 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/fft/tests/test_pocketfft.py @@ -0,0 +1,307 @@ +import numpy as np +import pytest +from numpy.random import random +from numpy.testing import ( + assert_array_equal, assert_raises, assert_allclose + ) +import threading +import queue + + +def fft1(x): + L = len(x) + phase = -2j*np.pi*(np.arange(L)/float(L)) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x*np.exp(phase), axis=1) + + +class TestFFTShift: + + def test_fft_n(self): + assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) + + +class TestFFT1D: + + def test_identity(self): + maxlen = 512 + x = random(maxlen) + 1j*random(maxlen) + xr = random(maxlen) + for i in range(1, maxlen): + assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], + atol=1e-12) + assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i), + xr[0:i], atol=1e-12) + + def test_fft(self): + x = random(30) + 1j*random(30) + assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) + assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6) + assert_allclose(fft1(x) / np.sqrt(30), + np.fft.fft(x, norm="ortho"), atol=1e-6) + assert_allclose(fft1(x) / 30., + np.fft.fft(x, norm="forward"), atol=1e-6) + + @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) + def test_ifft(self, norm): + x = random(30) + 1j*random(30) + assert_allclose( + x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), + atol=1e-6) + # Ensure we get the correct error message + with pytest.raises(ValueError, + match='Invalid number of FFT data points'): + np.fft.ifft([], norm=norm) + + def test_fft2(self): + x = random((30, 20)) + 1j*random((30, 20)) + assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), + np.fft.fft2(x), atol=1e-6) + assert_allclose(np.fft.fft2(x), + np.fft.fft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20), + np.fft.fft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / (30. * 20.), + np.fft.fft2(x, norm="forward"), atol=1e-6) + + def test_ifft2(self): + x = random((30, 20)) + 1j*random((30, 20)) + assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), + np.fft.ifft2(x), atol=1e-6) + assert_allclose(np.fft.ifft2(x), + np.fft.ifft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20), + np.fft.ifft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * (30. * 20.), + np.fft.ifft2(x, norm="forward"), atol=1e-6) + + def test_fftn(self): + x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + assert_allclose( + np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), + np.fft.fftn(x), atol=1e-6) + assert_allclose(np.fft.fftn(x), + np.fft.fftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), + np.fft.fftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.), + np.fft.fftn(x, norm="forward"), atol=1e-6) + + def test_ifftn(self): + x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + assert_allclose( + np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), + np.fft.ifftn(x), atol=1e-6) + assert_allclose(np.fft.ifftn(x), + np.fft.ifftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), + np.fft.ifftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.), + np.fft.ifftn(x, norm="forward"), atol=1e-6) + + def test_rfft(self): + x = random(30) + for n in [x.size, 2*x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + assert_allclose( + np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)], + np.fft.rfft(x, n=n, norm=norm), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n), + np.fft.rfft(x, n=n, norm="backward"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / np.sqrt(n), + np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / n, + np.fft.rfft(x, n=n, norm="forward"), atol=1e-6) + + def test_irfft(self): + x = random(30) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfft2(self): + x = random((30, 20)) + assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6) + assert_allclose(np.fft.rfft2(x), + np.fft.rfft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20), + np.fft.rfft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / (30. * 20.), + np.fft.rfft2(x, norm="forward"), atol=1e-6) + + def test_irfft2(self): + x = random((30, 20)) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfftn(self): + x = random((30, 20, 10)) + assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) + assert_allclose(np.fft.rfftn(x), + np.fft.rfftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), + np.fft.rfftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), + np.fft.rfftn(x, norm="forward"), atol=1e-6) + + def test_irfftn(self): + x = random((30, 20, 10)) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_hfft(self): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm), + np.fft.hfft(x_herm, norm="backward"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30), + np.fft.hfft(x_herm, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / 30., + np.fft.hfft(x_herm, norm="forward"), atol=1e-6) + + def test_ihfft(self): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="backward"), norm="backward"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="ortho"), norm="ortho"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="forward"), norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_axes(self, op): + x = random((30, 20, 10)) + axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] + for a in axes: + op_tr = op(np.transpose(x, a)) + tr_op = np.transpose(op(x, axes=a), a) + assert_allclose(op_tr, tr_op, atol=1e-6) + + def test_all_1d_norm_preserving(self): + # verify that round-trip transforms are norm-preserving + x = random(30) + x_norm = np.linalg.norm(x) + n = x.size * 2 + func_pairs = [(np.fft.fft, np.fft.ifft), + (np.fft.rfft, np.fft.irfft), + # hfft: order so the first function takes x.size samples + # (necessary for comparison to x_norm above) + (np.fft.ihfft, np.fft.hfft), + ] + for forw, back in func_pairs: + for n in [x.size, 2*x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + tmp = forw(x, n=n, norm=norm) + tmp = back(tmp, n=n, norm=norm) + assert_allclose(x_norm, + np.linalg.norm(tmp), atol=1e-6) + + @pytest.mark.parametrize("dtype", [np.half, np.single, np.double, + np.longdouble]) + def test_dtypes(self, dtype): + # make sure that all input precisions are accepted and internally + # converted to 64bit + x = random(30).astype(dtype) + assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6) + assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6) + + +@pytest.mark.parametrize( + "dtype", + [np.float32, np.float64, np.complex64, np.complex128]) +@pytest.mark.parametrize("order", ["F", 'non-contiguous']) +@pytest.mark.parametrize( + "fft", + [np.fft.fft, np.fft.fft2, np.fft.fftn, + np.fft.ifft, np.fft.ifft2, np.fft.ifftn]) +def test_fft_with_order(dtype, order, fft): + # Check that FFT/IFFT produces identical results for C, Fortran and + # non contiguous arrays + rng = np.random.RandomState(42) + X = rng.rand(8, 7, 13).astype(dtype, copy=False) + # See discussion in pull/14178 + _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps + if order == 'F': + Y = np.asfortranarray(X) + else: + # Make a non contiguous array + Y = X[::-1] + X = np.ascontiguousarray(X[::-1]) + + if fft.__name__.endswith('fft'): + for axis in range(3): + X_res = fft(X, axis=axis) + Y_res = fft(Y, axis=axis) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + elif fft.__name__.endswith(('fft2', 'fftn')): + axes = [(0, 1), (1, 2), (0, 2)] + if fft.__name__.endswith('fftn'): + axes.extend([(0,), (1,), (2,), None]) + for ax in axes: + X_res = fft(X, axes=ax) + Y_res = fft(Y, axes=ax) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + else: + raise ValueError() + + +class TestFFTThreadSafe: + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + # Make sure all threads returned the correct value + for i in range(self.threads): + assert_array_equal(q.get(timeout=5), expected, + 'Function returned wrong value in multithreaded context') + + def test_fft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.fft, a) + + def test_ifft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.ifft, a) + + def test_rfft(self): + a = np.ones(self.input_shape) + self._test_mtsame(np.fft.rfft, a) + + def test_irfft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.irfft, a) diff --git a/MLPY/Lib/site-packages/numpy/lib/__init__.py b/MLPY/Lib/site-packages/numpy/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..00addac10bbf150e1b863472ce08691e5ef33040 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/__init__.py @@ -0,0 +1,61 @@ +""" +**Note:** almost all functions in the ``numpy.lib`` namespace +are also present in the main ``numpy`` namespace. Please use the +functions as ``np.`` where possible. + +``numpy.lib`` is mostly a space for implementing functions that don't +belong in core or in another NumPy submodule with a clear purpose +(e.g. ``random``, ``fft``, ``linalg``, ``ma``). + +Most contains basic functions that are used by several submodules and are +useful to have in the main name-space. + +""" +import math + +from numpy.version import version as __version__ + +# Public submodules +# Note: recfunctions and (maybe) format are public too, but not imported +from . import mixins +from . import scimath as emath + +# Private submodules +from .type_check import * +from .index_tricks import * +from .function_base import * +from .nanfunctions import * +from .shape_base import * +from .stride_tricks import * +from .twodim_base import * +from .ufunclike import * +from .histograms import * + +from .polynomial import * +from .utils import * +from .arraysetops import * +from .npyio import * +from .arrayterator import Arrayterator +from .arraypad import * +from ._version import * +from numpy.core._multiarray_umath import tracemalloc_domain + +__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator'] +__all__ += type_check.__all__ +__all__ += index_tricks.__all__ +__all__ += function_base.__all__ +__all__ += shape_base.__all__ +__all__ += stride_tricks.__all__ +__all__ += twodim_base.__all__ +__all__ += ufunclike.__all__ +__all__ += arraypad.__all__ +__all__ += polynomial.__all__ +__all__ += utils.__all__ +__all__ += arraysetops.__all__ +__all__ += npyio.__all__ +__all__ += nanfunctions.__all__ +__all__ += histograms.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/lib/__init__.pyi b/MLPY/Lib/site-packages/numpy/lib/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c5387e9787503169deb84244e3497b34fc721da0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/__init__.pyi @@ -0,0 +1,233 @@ +import math as math +from typing import Any, List + +from numpy import ( + ndenumerate as ndenumerate, + ndindex as ndindex, +) + +from numpy.version import version + +from numpy.lib import ( + format as format, + mixins as mixins, + scimath as scimath, + stride_tricks as stride_stricks, +) + +from numpy.lib._version import ( + NumpyVersion as NumpyVersion, +) + +from numpy.lib.arraypad import ( + pad as pad, +) + +from numpy.lib.arraysetops import ( + ediff1d as ediff1d, + intersect1d as intersect1d, + setxor1d as setxor1d, + union1d as union1d, + setdiff1d as setdiff1d, + unique as unique, + in1d as in1d, + isin as isin, +) + +from numpy.lib.arrayterator import ( + Arrayterator as Arrayterator, +) + +from numpy.lib.function_base import ( + select as select, + piecewise as piecewise, + trim_zeros as trim_zeros, + copy as copy, + iterable as iterable, + percentile as percentile, + diff as diff, + gradient as gradient, + angle as angle, + unwrap as unwrap, + sort_complex as sort_complex, + disp as disp, + flip as flip, + rot90 as rot90, + extract as extract, + place as place, + vectorize as vectorize, + asarray_chkfinite as asarray_chkfinite, + average as average, + bincount as bincount, + digitize as digitize, + cov as cov, + corrcoef as corrcoef, + msort as msort, + median as median, + sinc as sinc, + hamming as hamming, + hanning as hanning, + bartlett as bartlett, + blackman as blackman, + kaiser as kaiser, + trapz as trapz, + i0 as i0, + add_newdoc as add_newdoc, + add_docstring as add_docstring, + meshgrid as meshgrid, + delete as delete, + insert as insert, + append as append, + interp as interp, + add_newdoc_ufunc as add_newdoc_ufunc, + quantile as quantile, +) + +from numpy.lib.index_tricks import ( + ravel_multi_index as ravel_multi_index, + unravel_index as unravel_index, + mgrid as mgrid, + ogrid as ogrid, + r_ as r_, + c_ as c_, + s_ as s_, + index_exp as index_exp, + ix_ as ix_, + fill_diagonal as fill_diagonal, + diag_indices as diag_indices, + diag_indices_from as diag_indices_from, +) + +from numpy.lib.nanfunctions import ( + nansum as nansum, + nanmax as nanmax, + nanmin as nanmin, + nanargmax as nanargmax, + nanargmin as nanargmin, + nanmean as nanmean, + nanmedian as nanmedian, + nanpercentile as nanpercentile, + nanvar as nanvar, + nanstd as nanstd, + nanprod as nanprod, + nancumsum as nancumsum, + nancumprod as nancumprod, + nanquantile as nanquantile, +) + +from numpy.lib.npyio import ( + savetxt as savetxt, + loadtxt as loadtxt, + genfromtxt as genfromtxt, + recfromtxt as recfromtxt, + recfromcsv as recfromcsv, + load as load, + loads as loads, + save as save, + savez as savez, + savez_compressed as savez_compressed, + packbits as packbits, + unpackbits as unpackbits, + fromregex as fromregex, + DataSource as DataSource, +) + +from numpy.lib.polynomial import ( + poly as poly, + roots as roots, + polyint as polyint, + polyder as polyder, + polyadd as polyadd, + polysub as polysub, + polymul as polymul, + polydiv as polydiv, + polyval as polyval, + polyfit as polyfit, + RankWarning as RankWarning, + poly1d as poly1d, +) + +from numpy.lib.shape_base import ( + column_stack as column_stack, + row_stack as row_stack, + dstack as dstack, + array_split as array_split, + split as split, + hsplit as hsplit, + vsplit as vsplit, + dsplit as dsplit, + apply_over_axes as apply_over_axes, + expand_dims as expand_dims, + apply_along_axis as apply_along_axis, + kron as kron, + tile as tile, + get_array_wrap as get_array_wrap, + take_along_axis as take_along_axis, + put_along_axis as put_along_axis, +) + +from numpy.lib.stride_tricks import ( + broadcast_to as broadcast_to, + broadcast_arrays as broadcast_arrays, + broadcast_shapes as broadcast_shapes, +) + +from numpy.lib.twodim_base import ( + diag as diag, + diagflat as diagflat, + eye as eye, + fliplr as fliplr, + flipud as flipud, + tri as tri, + triu as triu, + tril as tril, + vander as vander, + histogram2d as histogram2d, + mask_indices as mask_indices, + tril_indices as tril_indices, + tril_indices_from as tril_indices_from, + triu_indices as triu_indices, + triu_indices_from as triu_indices_from, +) + +from numpy.lib.type_check import ( + mintypecode as mintypecode, + asfarray as asfarray, + real as real, + imag as imag, + iscomplex as iscomplex, + isreal as isreal, + iscomplexobj as iscomplexobj, + isrealobj as isrealobj, + nan_to_num as nan_to_num, + real_if_close as real_if_close, + typename as typename, + common_type as common_type, +) + +from numpy.lib.ufunclike import ( + fix as fix, + isposinf as isposinf, + isneginf as isneginf, +) + +from numpy.lib.utils import ( + issubclass_ as issubclass_, + issubsctype as issubsctype, + issubdtype as issubdtype, + deprecate as deprecate, + deprecate_with_doc as deprecate_with_doc, + get_include as get_include, + info as info, + source as source, + who as who, + lookfor as lookfor, + byte_bounds as byte_bounds, + safe_eval as safe_eval, +) + +__all__: List[str] + +__version__ = version +emath = scimath +tracemalloc_domain: int diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aed7bc5d17f5f5ababf0146b1499d1676cae0e06 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/_datasource.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/_datasource.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e42d46053e97175d258c8c5a7c8f9b803ec9ab8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/_datasource.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/_iotools.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/_iotools.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b96739bd227f490161f000d34bb862cd8552217b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/_iotools.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/_version.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/_version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74b36b14f30ed590f38fbc8e977b9bdbd61a51a2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/_version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/arraypad.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/arraypad.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eb6604d89c2581db7679851179484819b40905d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/arraypad.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/arraysetops.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/arraysetops.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..383ddfedf69534eb280478ddb5b66e3562008e7e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/arraysetops.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/arrayterator.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/arrayterator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75c3a242a31b2213ca6b05802cd5da98314fbcc0 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/arrayterator.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/format.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/format.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22907f6586067227ed556a14e9de6d8dc78a0a9a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/format.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/function_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/function_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca30b4355f0c45397575672e9267282257f11787 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/function_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/histograms.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/histograms.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61dfea7b6f3a4e31de023e2b764c80f42d2657e9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/histograms.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/index_tricks.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/index_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abc12200e543ac938b536f79c8624c8fa5a5fa55 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/index_tricks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/mixins.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/mixins.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce2608225f47efde6bab10256a3dc68bf150774a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/mixins.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/nanfunctions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/nanfunctions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a5a0fba670fcfcee06b500bce6f86da60de1b90 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/nanfunctions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/npyio.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/npyio.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b19308908f6a574c855cfec7b56e15baba3b6be Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/npyio.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/polynomial.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/polynomial.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60d1874014e49c3238728d4cce2448f46cd54cd5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/polynomial.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/recfunctions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/recfunctions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f0226be7f30a750d0a378676269c8e541355da7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/recfunctions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/scimath.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/scimath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f10d4115c3ceab8c01960978c19108c4eb40d7c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/scimath.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59e135b3d476dc532774442429e577d2a8852d5d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/shape_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/shape_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19e19396fcee8c17f0490fd1eb99200726195af9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/shape_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81435f2b36ded4fcfb2ed2af08d98057b54a95f7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/twodim_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/twodim_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..617e284155881f18000e013b5d9ae428ad4dbd73 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/twodim_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/type_check.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/type_check.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f1beaa9e269a79f78051f206133cb9534f5675b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/type_check.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/ufunclike.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/ufunclike.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11afd4d1d77b7dbec62a7c9b6f2ec024d1426d97 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/ufunclike.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/user_array.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/user_array.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91bce3e52eaa4777bdb69cddaff61c9fc13afec6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/user_array.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08418300a9d6b9bf2f2106670eae695cc3c209f8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/_datasource.py b/MLPY/Lib/site-packages/numpy/lib/_datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..dc7585552cf5dcf62f7d49d9f700550c777b7a79 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/_datasource.py @@ -0,0 +1,704 @@ +"""A file interface for handling local and remote data files. + +The goal of datasource is to abstract some of the file system operations +when dealing with data files so the researcher doesn't have to know all the +low-level details. Through datasource, a researcher can obtain and use a +file with one function call, regardless of location of the file. + +DataSource is meant to augment standard python libraries, not replace them. +It should work seamlessly with standard file IO operations and the os +module. + +DataSource files can originate locally or remotely: + +- local files : '/home/guido/src/local/data.txt' +- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' + +DataSource files can also be compressed or uncompressed. Currently only +gzip, bz2 and xz are supported. + +Example:: + + >>> # Create a DataSource, use os.curdir (default) for local storage. + >>> from numpy import DataSource + >>> ds = DataSource() + >>> + >>> # Open a remote file. + >>> # DataSource downloads the file, stores it locally in: + >>> # './www.google.com/index.html' + >>> # opens the file and returns a file object. + >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP + >>> + >>> # Use the file as you normally would + >>> fp.read() # doctest: +SKIP + >>> fp.close() # doctest: +SKIP + +""" +import os +import io + +from numpy.core.overrides import set_module + + +_open = open + + +def _check_mode(mode, encoding, newline): + """Check mode and that encoding and newline are compatible. + + Parameters + ---------- + mode : str + File open mode. + encoding : str + File encoding. + newline : str + Newline for text files. + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + +# Using a class instead of a module-level dictionary +# to reduce the initial 'import numpy' overhead by +# deferring the import of lzma, bz2 and gzip until needed + +# TODO: .zip support, .tar support? +class _FileOpeners: + """ + Container for different methods to open (un-)compressed files. + + `_FileOpeners` contains a dictionary that holds one method for each + supported file format. Attribute lookup is implemented in such a way + that an instance of `_FileOpeners` itself can be indexed with the keys + of that dictionary. Currently uncompressed files as well as files + compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. + + Notes + ----- + `_file_openers`, an instance of `_FileOpeners`, is made available for + use in the `_datasource` module. + + Examples + -------- + >>> import gzip + >>> np.lib._datasource._file_openers.keys() + [None, '.bz2', '.gz', '.xz', '.lzma'] + >>> np.lib._datasource._file_openers['.gz'] is gzip.open + True + + """ + + def __init__(self): + self._loaded = False + self._file_openers = {None: io.open} + + def _load(self): + if self._loaded: + return + + try: + import bz2 + self._file_openers[".bz2"] = bz2.open + except ImportError: + pass + + try: + import gzip + self._file_openers[".gz"] = gzip.open + except ImportError: + pass + + try: + import lzma + self._file_openers[".xz"] = lzma.open + self._file_openers[".lzma"] = lzma.open + except (ImportError, AttributeError): + # There are incompatible backports of lzma that do not have the + # lzma.open attribute, so catch that as well as ImportError. + pass + + self._loaded = True + + def keys(self): + """ + Return the keys of currently supported file openers. + + Parameters + ---------- + None + + Returns + ------- + keys : list + The keys are None for uncompressed files and the file extension + strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression + methods. + + """ + self._load() + return list(self._file_openers.keys()) + + def __getitem__(self, key): + self._load() + return self._file_openers[key] + +_file_openers = _FileOpeners() + +def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): + """ + Open `path` with `mode` and return the file object. + + If ``path`` is an URL, it will be downloaded, stored in the + `DataSource` `destpath` directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. + mode : str, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to + append. Available modes depend on the type of object specified by + path. Default is 'r'. + destpath : str, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + The opened file. + + Notes + ----- + This is a convenience function that instantiates a `DataSource` and + returns the file object from ``DataSource.open(path)``. + + """ + + ds = DataSource(destpath) + return ds.open(path, mode, encoding=encoding, newline=newline) + + +@set_module('numpy') +class DataSource: + """ + DataSource(destpath='.') + + A generic data source file (file, http, ftp, ...). + + DataSources can be local files or remote files/URLs. The files may + also be compressed or uncompressed. DataSource hides some of the + low-level details of downloading the file, allowing you to simply pass + in a valid file path (or URL) and obtain a file object. + + Parameters + ---------- + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Notes + ----- + URLs require a scheme string (``http://``) to be used, without it they + will fail:: + + >>> repos = np.DataSource() + >>> repos.exists('www.google.com/index.html') + False + >>> repos.exists('http://www.google.com/index.html') + True + + Temporary directories are deleted when the DataSource is deleted. + + Examples + -------- + :: + + >>> ds = np.DataSource('/home/guido') + >>> urlname = 'http://www.google.com/' + >>> gfile = ds.open('http://www.google.com/') + >>> ds.abspath(urlname) + '/home/guido/www.google.com/index.html' + + >>> ds = np.DataSource(None) # use with temporary file + >>> ds.open('/home/guido/foobar.txt') + + >>> ds.abspath('/home/guido/foobar.txt') + '/tmp/.../home/guido/foobar.txt' + + """ + + def __init__(self, destpath=os.curdir): + """Create a DataSource with a local path at destpath.""" + if destpath: + self._destpath = os.path.abspath(destpath) + self._istmpdest = False + else: + import tempfile # deferring import to improve startup time + self._destpath = tempfile.mkdtemp() + self._istmpdest = True + + def __del__(self): + # Remove temp directories + if hasattr(self, '_istmpdest') and self._istmpdest: + import shutil + + shutil.rmtree(self._destpath) + + def _iszip(self, filename): + """Test if the filename is a zip file by looking at the file extension. + + """ + fname, ext = os.path.splitext(filename) + return ext in _file_openers.keys() + + def _iswritemode(self, mode): + """Test if the given mode will open a file for writing.""" + + # Currently only used to test the bz2 files. + _writemodes = ("w", "+") + for c in mode: + if c in _writemodes: + return True + return False + + def _splitzipext(self, filename): + """Split zip extension from filename and return filename. + + *Returns*: + base, zip_ext : {tuple} + + """ + + if self._iszip(filename): + return os.path.splitext(filename) + else: + return filename, None + + def _possible_names(self, filename): + """Return a tuple containing compressed filename variations.""" + names = [filename] + if not self._iszip(filename): + for zipext in _file_openers.keys(): + if zipext: + names.append(filename+zipext) + return names + + def _isurl(self, path): + """Test if path is a net location. Tests the scheme and netloc.""" + + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # BUG : URLs require a scheme string ('http://') to be used. + # www.google.com will fail. + # Should we prepend the scheme for those that don't have it and + # test that also? Similar to the way we append .gz and test for + # for compressed versions of files. + + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + return bool(scheme and netloc) + + def _cache(self, path): + """Cache the file specified by path. + + Creates a copy of the file in the datasource cache. + + """ + # We import these here because importing them is slow and + # a significant fraction of numpy's total import time. + import shutil + from urllib.request import urlopen + from urllib.error import URLError + + upath = self.abspath(path) + + # ensure directory exists + if not os.path.exists(os.path.dirname(upath)): + os.makedirs(os.path.dirname(upath)) + + # TODO: Doesn't handle compressed files! + if self._isurl(path): + with urlopen(path) as openedurl: + with _open(upath, 'wb') as f: + shutil.copyfileobj(openedurl, f) + else: + shutil.copyfile(path, upath) + return upath + + def _findfile(self, path): + """Searches for ``path`` and returns full path if found. + + If path is an URL, _findfile will cache a local copy and return the + path to the cached file. If path is a local file, _findfile will + return a path to that local file. + + The search will include possible compressed versions of the file + and return the first occurrence found. + + """ + + # Build list of possible local file paths + if not self._isurl(path): + # Valid local paths + filelist = self._possible_names(path) + # Paths in self._destpath + filelist += self._possible_names(self.abspath(path)) + else: + # Cached URLs in self._destpath + filelist = self._possible_names(self.abspath(path)) + # Remote URLs + filelist = filelist + self._possible_names(path) + + for name in filelist: + if self.exists(name): + if self._isurl(name): + name = self._cache(name) + return name + return None + + def abspath(self, path): + """ + Return absolute path of file in the DataSource directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + Notes + ----- + The functionality is based on `os.path.abspath`. + + """ + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # TODO: This should be more robust. Handles case where path includes + # the destpath, but not other sub-paths. Failing case: + # path = /home/guido/datafile.txt + # destpath = /home/alex/ + # upath = self.abspath(path) + # upath == '/home/alex/home/guido/datafile.txt' + + # handle case where path includes self._destpath + splitpath = path.split(self._destpath, 2) + if len(splitpath) > 1: + path = splitpath[1] + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + netloc = self._sanitize_relative_path(netloc) + upath = self._sanitize_relative_path(upath) + return os.path.join(self._destpath, netloc, upath) + + def _sanitize_relative_path(self, path): + """Return a sanitised relative path for which + os.path.abspath(os.path.join(base, path)).startswith(base) + """ + last = None + path = os.path.normpath(path) + while path != last: + last = path + # Note: os.path.join treats '/' as os.sep on Windows + path = path.lstrip(os.sep).lstrip('/') + path = path.lstrip(os.pardir).lstrip('..') + drive, path = os.path.splitdrive(path) # for Windows + return path + + def exists(self, path): + """ + Test if path exists. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + + # First test for local path + if os.path.exists(path): + return True + + # We import this here because importing urllib is slow and + # a significant fraction of numpy's total import time. + from urllib.request import urlopen + from urllib.error import URLError + + # Test cached url + upath = self.abspath(path) + if os.path.exists(upath): + return True + + # Test remote url + if self._isurl(path): + try: + netfile = urlopen(path) + netfile.close() + del(netfile) + return True + except URLError: + return False + return False + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object. + + If `path` is an URL, it will be downloaded, stored in the + `DataSource` directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + + # TODO: There is no support for opening a file for writing which + # doesn't exist yet (creating a file). Should there be? + + # TODO: Add a ``subdir`` parameter for specifying the subdirectory + # used to store URLs in self._destpath. + + if self._isurl(path) and self._iswritemode(mode): + raise ValueError("URLs are not writeable") + + # NOTE: _findfile will fail on a new file opened for writing. + found = self._findfile(path) + if found: + _fname, ext = self._splitzipext(found) + if ext == 'bz2': + mode.replace("+", "") + return _file_openers[ext](found, mode=mode, + encoding=encoding, newline=newline) + else: + raise IOError("%s not found." % path) + + +class Repository (DataSource): + """ + Repository(baseurl, destpath='.') + + A data repository where multiple DataSource's share a base + URL/directory. + + `Repository` extends `DataSource` by prepending a base URL (or + directory) to all the files it handles. Use `Repository` when you will + be working with multiple files from one base URL. Initialize + `Repository` with the base URL, then refer to each file by its filename + only. + + Parameters + ---------- + baseurl : str + Path to the local directory or remote location that contains the + data files. + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Examples + -------- + To analyze all files in the repository, do something like this + (note: this is not self-contained code):: + + >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') + >>> for filename in filelist: + ... fp = repos.open(filename) + ... fp.analyze() + ... fp.close() + + Similarly you could use a URL for a repository:: + + >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') + + """ + + def __init__(self, baseurl, destpath=os.curdir): + """Create a Repository with a shared url or directory of baseurl.""" + DataSource.__init__(self, destpath=destpath) + self._baseurl = baseurl + + def __del__(self): + DataSource.__del__(self) + + def _fullpath(self, path): + """Return complete path for path. Prepends baseurl if necessary.""" + splitpath = path.split(self._baseurl, 2) + if len(splitpath) == 1: + result = os.path.join(self._baseurl, path) + else: + result = path # path contains baseurl already + return result + + def _findfile(self, path): + """Extend DataSource method to prepend baseurl to ``path``.""" + return DataSource._findfile(self, self._fullpath(path)) + + def abspath(self, path): + """ + Return absolute path of file in the Repository directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + """ + return DataSource.abspath(self, self._fullpath(path)) + + def exists(self, path): + """ + Test if path exists prepending Repository base URL to path. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + return DataSource.exists(self, self._fullpath(path)) + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object prepending Repository base URL. + + If `path` is an URL, it will be downloaded, stored in the + DataSource directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. This may, but does not have to, + include the `baseurl` with which the `Repository` was + initialized. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + return DataSource.open(self, self._fullpath(path), mode, + encoding=encoding, newline=newline) + + def listdir(self): + """ + List files in the source Repository. + + Returns + ------- + files : list of str + List of file names (not containing a directory part). + + Notes + ----- + Does not currently work for remote repositories. + + """ + if self._isurl(self._baseurl): + raise NotImplementedError( + "Directory listing of URLs, not supported yet.") + else: + return os.listdir(self._baseurl) diff --git a/MLPY/Lib/site-packages/numpy/lib/_iotools.py b/MLPY/Lib/site-packages/numpy/lib/_iotools.py new file mode 100644 index 0000000000000000000000000000000000000000..769a54ffd6d04a375dd4ab54b76826b7777986ae --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/_iotools.py @@ -0,0 +1,898 @@ +"""A collection of functions designed to help I/O with ascii files. + +""" +__docformat__ = "restructuredtext en" + +import numpy as np +import numpy.core.numeric as nx +from numpy.compat import asbytes, asunicode + + +def _decode_line(line, encoding=None): + """Decode bytes from binary input streams. + + Defaults to decoding from 'latin1'. That differs from the behavior of + np.compat.asunicode that decodes from 'ascii'. + + Parameters + ---------- + line : str or bytes + Line to be decoded. + encoding : str + Encoding used to decode `line`. + + Returns + ------- + decoded_line : unicode + Unicode in Python 2, a str (unicode) in Python 3. + + """ + if type(line) is bytes: + if encoding is None: + encoding = "latin1" + line = line.decode(encoding) + + return line + + +def _is_string_like(obj): + """ + Check whether obj behaves like a string. + """ + try: + obj + '' + except (TypeError, ValueError): + return False + return True + + +def _is_bytes_like(obj): + """ + Check whether obj behaves like a bytes object. + """ + try: + obj + b'' + except (TypeError, ValueError): + return False + return True + + +def has_nested_fields(ndtype): + """ + Returns whether one or several fields of a dtype are nested. + + Parameters + ---------- + ndtype : dtype + Data-type of a structured array. + + Raises + ------ + AttributeError + If `ndtype` does not have a `names` attribute. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) + >>> np.lib._iotools.has_nested_fields(dt) + False + + """ + for name in ndtype.names or (): + if ndtype[name].names is not None: + return True + return False + + +def flatten_dtype(ndtype, flatten_base=False): + """ + Unpack a structured data-type by collapsing nested fields and/or fields + with a shape. + + Note that the field names are lost. + + Parameters + ---------- + ndtype : dtype + The datatype to collapse + flatten_base : bool, optional + If True, transform a field with a shape into several fields. Default is + False. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ... ('block', int, (2, 3))]) + >>> np.lib._iotools.flatten_dtype(dt) + [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] + >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) + [dtype('S4'), + dtype('float64'), + dtype('float64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64')] + + """ + names = ndtype.names + if names is None: + if flatten_base: + return [ndtype.base] * int(np.prod(ndtype.shape)) + return [ndtype.base] + else: + types = [] + for field in names: + info = ndtype.fields[field] + flat_dt = flatten_dtype(info[0], flatten_base) + types.extend(flat_dt) + return types + + +class LineSplitter: + """ + Object to split a string at a given delimiter or at given places. + + Parameters + ---------- + delimiter : str, int, or sequence of ints, optional + If a string, character used to delimit consecutive fields. + If an integer or a sequence of integers, width(s) of each field. + comments : str, optional + Character used to mark the beginning of a comment. Default is '#'. + autostrip : bool, optional + Whether to strip each individual field. Default is True. + + """ + + def autostrip(self, method): + """ + Wrapper to strip each member of the output of `method`. + + Parameters + ---------- + method : function + Function that takes a single argument and returns a sequence of + strings. + + Returns + ------- + wrapped : function + The result of wrapping `method`. `wrapped` takes a single input + argument and returns a list of strings that are stripped of + white-space. + + """ + return lambda input: [_.strip() for _ in method(input)] + + def __init__(self, delimiter=None, comments='#', autostrip=True, + encoding=None): + delimiter = _decode_line(delimiter) + comments = _decode_line(comments) + + self.comments = comments + + # Delimiter is a character + if (delimiter is None) or isinstance(delimiter, str): + delimiter = delimiter or None + _handyman = self._delimited_splitter + # Delimiter is a list of field widths + elif hasattr(delimiter, '__iter__'): + _handyman = self._variablewidth_splitter + idx = np.cumsum([0] + list(delimiter)) + delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] + # Delimiter is a single integer + elif int(delimiter): + (_handyman, delimiter) = ( + self._fixedwidth_splitter, int(delimiter)) + else: + (_handyman, delimiter) = (self._delimited_splitter, None) + self.delimiter = delimiter + if autostrip: + self._handyman = self.autostrip(_handyman) + else: + self._handyman = _handyman + self.encoding = encoding + + def _delimited_splitter(self, line): + """Chop off comments, strip, and split at delimiter. """ + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip(" \r\n") + if not line: + return [] + return line.split(self.delimiter) + + def _fixedwidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip("\r\n") + if not line: + return [] + fixed = self.delimiter + slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] + return [line[s] for s in slices] + + def _variablewidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + if not line: + return [] + slices = self.delimiter + return [line[s] for s in slices] + + def __call__(self, line): + return self._handyman(_decode_line(line, self.encoding)) + + +class NameValidator: + """ + Object to validate a list of strings to use as field names. + + The strings are stripped of any non alphanumeric character, and spaces + are replaced by '_'. During instantiation, the user can define a list + of names to exclude, as well as a list of invalid characters. Names in + the exclusion list are appended a '_' character. + + Once an instance has been created, it can be called with a list of + names, and a list of valid names will be created. The `__call__` + method accepts an optional keyword "default" that sets the default name + in case of ambiguity. By default this is 'f', so that names will + default to `f0`, `f1`, etc. + + Parameters + ---------- + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default + list ['return', 'file', 'print']. Excluded names are appended an + underscore: for example, `file` becomes `file_` if supplied. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + case_sensitive : {True, False, 'upper', 'lower'}, optional + * If True, field names are case-sensitive. + * If False or 'upper', field names are converted to upper case. + * If 'lower', field names are converted to lower case. + + The default value is True. + replace_space : '_', optional + Character(s) used in replacement of white spaces. + + Notes + ----- + Calling an instance of `NameValidator` is the same as calling its + method `validate`. + + Examples + -------- + >>> validator = np.lib._iotools.NameValidator() + >>> validator(['file', 'field2', 'with space', 'CaSe']) + ('file_', 'field2', 'with_space', 'CaSe') + + >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], + ... deletechars='q', + ... case_sensitive=False) + >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) + ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') + + """ + + defaultexcludelist = ['return', 'file', 'print'] + defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + + def __init__(self, excludelist=None, deletechars=None, + case_sensitive=None, replace_space='_'): + # Process the exclusion list .. + if excludelist is None: + excludelist = [] + excludelist.extend(self.defaultexcludelist) + self.excludelist = excludelist + # Process the list of characters to delete + if deletechars is None: + delete = self.defaultdeletechars + else: + delete = set(deletechars) + delete.add('"') + self.deletechars = delete + # Process the case option ..... + if (case_sensitive is None) or (case_sensitive is True): + self.case_converter = lambda x: x + elif (case_sensitive is False) or case_sensitive.startswith('u'): + self.case_converter = lambda x: x.upper() + elif case_sensitive.startswith('l'): + self.case_converter = lambda x: x.lower() + else: + msg = 'unrecognized case_sensitive value %s.' % case_sensitive + raise ValueError(msg) + + self.replace_space = replace_space + + def validate(self, names, defaultfmt="f%i", nbfields=None): + """ + Validate a list of strings as field names for a structured array. + + Parameters + ---------- + names : sequence of str + Strings to be validated. + defaultfmt : str, optional + Default format string, used if validating a given string + reduces its length to zero. + nbfields : integer, optional + Final number of validated names, used to expand or shrink the + initial list of names. + + Returns + ------- + validatednames : list of str + The list of validated field names. + + Notes + ----- + A `NameValidator` instance can be called directly, which is the + same as calling `validate`. For examples, see `NameValidator`. + + """ + # Initial checks .............. + if (names is None): + if (nbfields is None): + return None + names = [] + if isinstance(names, str): + names = [names, ] + if nbfields is not None: + nbnames = len(names) + if (nbnames < nbfields): + names = list(names) + [''] * (nbfields - nbnames) + elif (nbnames > nbfields): + names = names[:nbfields] + # Set some shortcuts ........... + deletechars = self.deletechars + excludelist = self.excludelist + case_converter = self.case_converter + replace_space = self.replace_space + # Initializes some variables ... + validatednames = [] + seen = dict() + nbempty = 0 + + for item in names: + item = case_converter(item).strip() + if replace_space: + item = item.replace(' ', replace_space) + item = ''.join([c for c in item if c not in deletechars]) + if item == '': + item = defaultfmt % nbempty + while item in names: + nbempty += 1 + item = defaultfmt % nbempty + nbempty += 1 + elif item in excludelist: + item += '_' + cnt = seen.get(item, 0) + if cnt > 0: + validatednames.append(item + '_%d' % cnt) + else: + validatednames.append(item) + seen[item] = cnt + 1 + return tuple(validatednames) + + def __call__(self, names, defaultfmt="f%i", nbfields=None): + return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) + + +def str2bool(value): + """ + Tries to transform a string supposed to represent a boolean to a boolean. + + Parameters + ---------- + value : str + The string that is transformed to a boolean. + + Returns + ------- + boolval : bool + The boolean representation of `value`. + + Raises + ------ + ValueError + If the string is not 'True' or 'False' (case independent) + + Examples + -------- + >>> np.lib._iotools.str2bool('TRUE') + True + >>> np.lib._iotools.str2bool('false') + False + + """ + value = value.upper() + if value == 'TRUE': + return True + elif value == 'FALSE': + return False + else: + raise ValueError("Invalid boolean") + + +class ConverterError(Exception): + """ + Exception raised when an error occurs in a converter for string values. + + """ + pass + + +class ConverterLockError(ConverterError): + """ + Exception raised when an attempt is made to upgrade a locked converter. + + """ + pass + + +class ConversionWarning(UserWarning): + """ + Warning issued when a string converter has a problem. + + Notes + ----- + In `genfromtxt` a `ConversionWarning` is issued if raising exceptions + is explicitly suppressed with the "invalid_raise" keyword. + + """ + pass + + +class StringConverter: + """ + Factory class for function transforming a string into another object + (int, float). + + After initialization, an instance can be called to transform a string + into another object. If the string is recognized as representing a + missing value, a default value is returned. + + Attributes + ---------- + func : function + Function used for the conversion. + default : any + Default value to return when the input corresponds to a missing + value. + type : type + Type of the output. + _status : int + Integer representing the order of the conversion. + _mapper : sequence of tuples + Sequence of tuples (dtype, function, default value) to evaluate in + order. + _locked : bool + Holds `locked` parameter. + + Parameters + ---------- + dtype_or_func : {None, dtype, function}, optional + If a `dtype`, specifies the input data type, used to define a basic + function and a default value for missing data. For example, when + `dtype` is float, the `func` attribute is set to `float` and the + default value to `np.nan`. If a function, this function is used to + convert a string to another object. In this case, it is recommended + to give an associated default value as input. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, `StringConverter` + tries to supply a reasonable default value. + missing_values : {None, sequence of str}, optional + ``None`` or sequence of strings indicating a missing value. If ``None`` + then missing values are indicated by empty entries. The default is + ``None``. + locked : bool, optional + Whether the StringConverter should be locked to prevent automatic + upgrade or not. Default is False. + + """ + _mapper = [(nx.bool_, str2bool, False), + (nx.int_, int, -1),] + + # On 32-bit systems, we need to make sure that we explicitly include + # nx.int64 since ns.int_ is nx.int32. + if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize: + _mapper.append((nx.int64, int, -1)) + + _mapper.extend([(nx.float64, float, nx.nan), + (nx.complex128, complex, nx.nan + 0j), + (nx.longdouble, nx.longdouble, nx.nan), + # If a non-default dtype is passed, fall back to generic + # ones (should only be used for the converter) + (nx.integer, int, -1), + (nx.floating, float, nx.nan), + (nx.complexfloating, complex, nx.nan + 0j), + # Last, try with the string types (must be last, because + # `_mapper[-1]` is used as default in some cases) + (nx.unicode_, asunicode, '???'), + (nx.string_, asbytes, '???'), + ]) + + @classmethod + def _getdtype(cls, val): + """Returns the dtype of the input variable.""" + return np.array(val).dtype + + @classmethod + def _getsubdtype(cls, val): + """Returns the type of the dtype of the input variable.""" + return np.array(val).dtype.type + + @classmethod + def _dtypeortype(cls, dtype): + """Returns dtype for datetime64 and type of dtype otherwise.""" + + # This is a bit annoying. We want to return the "general" type in most + # cases (ie. "string" rather than "S10"), but we want to return the + # specific type for datetime64 (ie. "datetime64[us]" rather than + # "datetime64"). + if dtype.type == np.datetime64: + return dtype + return dtype.type + + @classmethod + def upgrade_mapper(cls, func, default=None): + """ + Upgrade the mapper of a StringConverter by adding a new function and + its corresponding default. + + The input function (or sequence of functions) and its associated + default value (if any) is inserted in penultimate position of the + mapper. The corresponding type is estimated from the dtype of the + default value. + + Parameters + ---------- + func : var + Function, or sequence of functions + + Examples + -------- + >>> import dateutil.parser + >>> import datetime + >>> dateparser = dateutil.parser.parse + >>> defaultdate = datetime.date(2000, 1, 1) + >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) + """ + # Func is a single functions + if hasattr(func, '__call__'): + cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) + return + elif hasattr(func, '__iter__'): + if isinstance(func[0], (tuple, list)): + for _ in func: + cls._mapper.insert(-1, _) + return + if default is None: + default = [None] * len(func) + else: + default = list(default) + default.append([None] * (len(func) - len(default))) + for fct, dft in zip(func, default): + cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) + + @classmethod + def _find_map_entry(cls, dtype): + # if a converter for the specific dtype is available use that + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if dtype.type == deftype: + return i, (deftype, func, default_def) + + # otherwise find an inexact match + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if np.issubdtype(dtype.type, deftype): + return i, (deftype, func, default_def) + + raise LookupError + + def __init__(self, dtype_or_func=None, default=None, missing_values=None, + locked=False): + # Defines a lock for upgrade + self._locked = bool(locked) + # No input dtype: minimal initialization + if dtype_or_func is None: + self.func = str2bool + self._status = 0 + self.default = default or False + dtype = np.dtype('bool') + else: + # Is the input a np.dtype ? + try: + self.func = None + dtype = np.dtype(dtype_or_func) + except TypeError: + # dtype_or_func must be a function, then + if not hasattr(dtype_or_func, '__call__'): + errmsg = ("The input argument `dtype` is neither a" + " function nor a dtype (got '%s' instead)") + raise TypeError(errmsg % type(dtype_or_func)) + # Set the function + self.func = dtype_or_func + # If we don't have a default, try to guess it or set it to + # None + if default is None: + try: + default = self.func('0') + except ValueError: + default = None + dtype = self._getdtype(default) + + # find the best match in our mapper + try: + self._status, (_, func, default_def) = self._find_map_entry(dtype) + except LookupError: + # no match + self.default = default + _, func, _ = self._mapper[-1] + self._status = 0 + else: + # use the found default only if we did not already have one + if default is None: + self.default = default_def + else: + self.default = default + + # If the input was a dtype, set the function to the last we saw + if self.func is None: + self.func = func + + # If the status is 1 (int), change the function to + # something more robust. + if self.func == self._mapper[1][1]: + if issubclass(dtype.type, np.uint64): + self.func = np.uint64 + elif issubclass(dtype.type, np.int64): + self.func = np.int64 + else: + self.func = lambda x: int(float(x)) + # Store the list of strings corresponding to missing values. + if missing_values is None: + self.missing_values = {''} + else: + if isinstance(missing_values, str): + missing_values = missing_values.split(",") + self.missing_values = set(list(missing_values) + ['']) + + self._callingfunction = self._strict_call + self.type = self._dtypeortype(dtype) + self._checked = False + self._initial_default = default + + def _loose_call(self, value): + try: + return self.func(value) + except ValueError: + return self.default + + def _strict_call(self, value): + try: + + # We check if we can convert the value using the current function + new_value = self.func(value) + + # In addition to having to check whether func can convert the + # value, we also have to make sure that we don't get overflow + # errors for integers. + if self.func is int: + try: + np.array(value, dtype=self.type) + except OverflowError: + raise ValueError + + # We're still here so we can now return the new value + return new_value + + except ValueError: + if value.strip() in self.missing_values: + if not self._status: + self._checked = False + return self.default + raise ValueError("Cannot convert string '%s'" % value) + + def __call__(self, value): + return self._callingfunction(value) + + def _do_upgrade(self): + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + errmsg = "Could not find a valid conversion function" + raise ConverterError(errmsg) + elif _status < _statusmax - 1: + _status += 1 + self.type, self.func, default = self._mapper[_status] + self._status = _status + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + + def upgrade(self, value): + """ + Find the best converter for a given string, and return the result. + + The supplied string `value` is converted by testing different + converters in order. First the `func` method of the + `StringConverter` instance is tried, if this fails other available + converters are tried. The order in which these other converters + are tried is determined by the `_status` attribute of the instance. + + Parameters + ---------- + value : str + The string to convert. + + Returns + ------- + out : any + The result of converting `value` with the appropriate converter. + + """ + self._checked = True + try: + return self._strict_call(value) + except ValueError: + self._do_upgrade() + return self.upgrade(value) + + def iterupgrade(self, value): + self._checked = True + if not hasattr(value, '__iter__'): + value = (value,) + _strict_call = self._strict_call + try: + for _m in value: + _strict_call(_m) + except ValueError: + self._do_upgrade() + self.iterupgrade(value) + + def update(self, func, default=None, testing_value=None, + missing_values='', locked=False): + """ + Set StringConverter attributes directly. + + Parameters + ---------- + func : function + Conversion function. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, + `StringConverter` tries to supply a reasonable default value. + testing_value : str, optional + A string representing a standard input value of the converter. + This string is used to help defining a reasonable default + value. + missing_values : {sequence of str, None}, optional + Sequence of strings indicating a missing value. If ``None``, then + the existing `missing_values` are cleared. The default is `''`. + locked : bool, optional + Whether the StringConverter should be locked to prevent + automatic upgrade or not. Default is False. + + Notes + ----- + `update` takes the same parameters as the constructor of + `StringConverter`, except that `func` does not accept a `dtype` + whereas `dtype_or_func` in the constructor does. + + """ + self.func = func + self._locked = locked + + # Don't reset the default to None if we can avoid it + if default is not None: + self.default = default + self.type = self._dtypeortype(self._getdtype(default)) + else: + try: + tester = func(testing_value or '1') + except (TypeError, ValueError): + tester = None + self.type = self._dtypeortype(self._getdtype(tester)) + + # Add the missing values to the existing set or clear it. + if missing_values is None: + # Clear all missing values even though the ctor initializes it to + # set(['']) when the argument is None. + self.missing_values = set() + else: + if not np.iterable(missing_values): + missing_values = [missing_values] + if not all(isinstance(v, str) for v in missing_values): + raise TypeError("missing_values must be strings or unicode") + self.missing_values.update(missing_values) + + +def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): + """ + Convenience function to create a `np.dtype` object. + + The function processes the input `dtype` and matches it with the given + names. + + Parameters + ---------- + ndtype : var + Definition of the dtype. Can be any string or dictionary recognized + by the `np.dtype` function, or a sequence of types. + names : str or sequence, optional + Sequence of strings to use as field names for a structured dtype. + For convenience, `names` can be a string of a comma-separated list + of names. + defaultfmt : str, optional + Format string used to define missing names, such as ``"f%i"`` + (default) or ``"fields_%02i"``. + validationargs : optional + A series of optional arguments used to initialize a + `NameValidator`. + + Examples + -------- + >>> np.lib._iotools.easy_dtype(float) + dtype('float64') + >>> np.lib._iotools.easy_dtype("i4, f8") + dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") + dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") + dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") + dtype([('a', ' 9 in principle): + + - Released version: '1.8.0', '1.8.1', etc. + - Alpha: '1.8.0a1', '1.8.0a2', etc. + - Beta: '1.8.0b1', '1.8.0b2', etc. + - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. + - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) + - Development versions after a1: '1.8.0a1.dev-f1234afa', + '1.8.0b2.dev-f1234afa', + '1.8.1rc1.dev-f1234afa', etc. + - Development versions (no git hash available): '1.8.0.dev-Unknown' + + Comparing needs to be done against a valid version string or other + `NumpyVersion` instance. Note that all development versions of the same + (pre-)release compare equal. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + vstring : str + NumPy version string (``np.__version__``). + + Examples + -------- + >>> from numpy.lib import NumpyVersion + >>> if NumpyVersion(np.__version__) < '1.7.0': + ... print('skip') + >>> # skip + + >>> NumpyVersion('1.7') # raises ValueError, add ".0" + Traceback (most recent call last): + ... + ValueError: Not a valid numpy version string + + """ + + def __init__(self, vstring): + self.vstring = vstring + ver_main = re.match(r'\d+\.\d+\.\d+', vstring) + if not ver_main: + raise ValueError("Not a valid numpy version string") + + self.version = ver_main.group() + self.major, self.minor, self.bugfix = [int(x) for x in + self.version.split('.')] + if len(vstring) == ver_main.end(): + self.pre_release = 'final' + else: + alpha = re.match(r'a\d', vstring[ver_main.end():]) + beta = re.match(r'b\d', vstring[ver_main.end():]) + rc = re.match(r'rc\d', vstring[ver_main.end():]) + pre_rel = [m for m in [alpha, beta, rc] if m is not None] + if pre_rel: + self.pre_release = pre_rel[0].group() + else: + self.pre_release = '' + + self.is_devversion = bool(re.search(r'.dev', vstring)) + + def _compare_version(self, other): + """Compare major.minor.bugfix""" + if self.major == other.major: + if self.minor == other.minor: + if self.bugfix == other.bugfix: + vercmp = 0 + elif self.bugfix > other.bugfix: + vercmp = 1 + else: + vercmp = -1 + elif self.minor > other.minor: + vercmp = 1 + else: + vercmp = -1 + elif self.major > other.major: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare_pre_release(self, other): + """Compare alpha/beta/rc/final.""" + if self.pre_release == other.pre_release: + vercmp = 0 + elif self.pre_release == 'final': + vercmp = 1 + elif other.pre_release == 'final': + vercmp = -1 + elif self.pre_release > other.pre_release: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare(self, other): + if not isinstance(other, (str, NumpyVersion)): + raise ValueError("Invalid object to compare with NumpyVersion.") + + if isinstance(other, str): + other = NumpyVersion(other) + + vercmp = self._compare_version(other) + if vercmp == 0: + # Same x.y.z version, check for alpha/beta/rc + vercmp = self._compare_pre_release(other) + if vercmp == 0: + # Same version and same pre-release, check if dev version + if self.is_devversion is other.is_devversion: + vercmp = 0 + elif self.is_devversion: + vercmp = -1 + else: + vercmp = 1 + + return vercmp + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __eq__(self, other): + return self._compare(other) == 0 + + def __ne__(self, other): + return self._compare(other) != 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def __repr__(self): + return "NumpyVersion(%s)" % self.vstring diff --git a/MLPY/Lib/site-packages/numpy/lib/_version.pyi b/MLPY/Lib/site-packages/numpy/lib/_version.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3754c6010f4de847853e7c6dc9911e3b4f396c9b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/_version.pyi @@ -0,0 +1,19 @@ +from typing import Union, List + +__all__: List[str] + +class NumpyVersion: + vstring: str + version: str + major: int + minor: int + bugfix: int + pre_release: str + is_devversion: bool + def __init__(self, vstring: str) -> None: ... + def __lt__(self, other: Union[str, NumpyVersion]) -> bool: ... + def __le__(self, other: Union[str, NumpyVersion]) -> bool: ... + def __eq__(self, other: Union[str, NumpyVersion]) -> bool: ... # type: ignore[override] + def __ne__(self, other: Union[str, NumpyVersion]) -> bool: ... # type: ignore[override] + def __gt__(self, other: Union[str, NumpyVersion]) -> bool: ... + def __ge__(self, other: Union[str, NumpyVersion]) -> bool: ... diff --git a/MLPY/Lib/site-packages/numpy/lib/arraypad.py b/MLPY/Lib/site-packages/numpy/lib/arraypad.py new file mode 100644 index 0000000000000000000000000000000000000000..c30f1a348239f0dacdd1f4da8426737996669c2b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/arraypad.py @@ -0,0 +1,876 @@ +""" +The arraypad module contains a group of functions to pad values onto the edges +of an n-dimensional array. + +""" +import numpy as np +from numpy.core.overrides import array_function_dispatch +from numpy.lib.index_tricks import ndindex + + +__all__ = ['pad'] + + +############################################################################### +# Private utility functions. + + +def _round_if_needed(arr, dtype): + """ + Rounds arr inplace if destination dtype is integer. + + Parameters + ---------- + arr : ndarray + Input array. + dtype : dtype + The dtype of the destination array. + """ + if np.issubdtype(dtype, np.integer): + arr.round(out=arr) + + +def _slice_at_axis(sl, axis): + """ + Construct tuple of slices to slice an array in the given dimension. + + Parameters + ---------- + sl : slice + The slice for the given dimension. + axis : int + The axis to which `sl` is applied. All other dimensions are left + "unsliced". + + Returns + ------- + sl : tuple of slices + A tuple with slices matching `shape` in length. + + Examples + -------- + >>> _slice_at_axis(slice(None, 3, -1), 1) + (slice(None, None, None), slice(None, 3, -1), (...,)) + """ + return (slice(None),) * axis + (sl,) + (...,) + + +def _view_roi(array, original_area_slice, axis): + """ + Get a view of the current region of interest during iterative padding. + + When padding multiple dimensions iteratively corner values are + unnecessarily overwritten multiple times. This function reduces the + working area for the first dimensions so that corners are excluded. + + Parameters + ---------- + array : ndarray + The array with the region of interest. + original_area_slice : tuple of slices + Denotes the area with original values of the unpadded array. + axis : int + The currently padded dimension assuming that `axis` is padded before + `axis` + 1. + + Returns + ------- + roi : ndarray + The region of interest of the original `array`. + """ + axis += 1 + sl = (slice(None),) * axis + original_area_slice[axis:] + return array[sl] + + +def _pad_simple(array, pad_width, fill_value=None): + """ + Pad array on all sides with either a single value or undefined values. + + Parameters + ---------- + array : ndarray + Array to grow. + pad_width : sequence of tuple[int, int] + Pad width on both sides for each dimension in `arr`. + fill_value : scalar, optional + If provided the padded area is filled with this value, otherwise + the pad area left undefined. + + Returns + ------- + padded : ndarray + The padded array with the same dtype as`array`. Its order will default + to C-style if `array` is not F-contiguous. + original_area_slice : tuple + A tuple of slices pointing to the area of the original array. + """ + # Allocate grown array + new_shape = tuple( + left + size + right + for size, (left, right) in zip(array.shape, pad_width) + ) + order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order + padded = np.empty(new_shape, dtype=array.dtype, order=order) + + if fill_value is not None: + padded.fill(fill_value) + + # Copy old array into correct space + original_area_slice = tuple( + slice(left, left + size) + for size, (left, right) in zip(array.shape, pad_width) + ) + padded[original_area_slice] = array + + return padded, original_area_slice + + +def _set_pad_area(padded, axis, width_pair, value_pair): + """ + Set empty-padded area in given dimension. + + Parameters + ---------- + padded : ndarray + Array with the pad area which is modified inplace. + axis : int + Dimension with the pad area to set. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + value_pair : tuple of scalars or ndarrays + Values inserted into the pad area on each side. It must match or be + broadcastable to the shape of `arr`. + """ + left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) + padded[left_slice] = value_pair[0] + + right_slice = _slice_at_axis( + slice(padded.shape[axis] - width_pair[1], None), axis) + padded[right_slice] = value_pair[1] + + +def _get_edges(padded, axis, width_pair): + """ + Retrieve edge values from empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the edges are considered. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + + Returns + ------- + left_edge, right_edge : ndarray + Edge values of the valid area in `padded` in the given dimension. Its + shape will always match `padded` except for the dimension given by + `axis` which will have a length of 1. + """ + left_index = width_pair[0] + left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis) + left_edge = padded[left_slice] + + right_index = padded.shape[axis] - width_pair[1] + right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis) + right_edge = padded[right_slice] + + return left_edge, right_edge + + +def _get_linear_ramps(padded, axis, width_pair, end_value_pair): + """ + Construct linear ramps for empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the ramps are constructed. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + end_value_pair : (scalar, scalar) + End values for the linear ramps which form the edge of the fully padded + array. These values are included in the linear ramps. + + Returns + ------- + left_ramp, right_ramp : ndarray + Linear ramps to set on both sides of `padded`. + """ + edge_pair = _get_edges(padded, axis, width_pair) + + left_ramp, right_ramp = ( + np.linspace( + start=end_value, + stop=edge.squeeze(axis), # Dimension is replaced by linspace + num=width, + endpoint=False, + dtype=padded.dtype, + axis=axis + ) + for end_value, edge, width in zip( + end_value_pair, edge_pair, width_pair + ) + ) + + # Reverse linear space in appropriate dimension + right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] + + return left_ramp, right_ramp + + +def _get_stats(padded, axis, width_pair, length_pair, stat_func): + """ + Calculate statistic for the empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the statistic is calculated. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + length_pair : 2-element sequence of None or int + Gives the number of values in valid area from each side that is + taken into account when calculating the statistic. If None the entire + valid area in `padded` is considered. + stat_func : function + Function to compute statistic. The expected signature is + ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``. + + Returns + ------- + left_stat, right_stat : ndarray + Calculated statistic for both sides of `padded`. + """ + # Calculate indices of the edges of the area with original values + left_index = width_pair[0] + right_index = padded.shape[axis] - width_pair[1] + # as well as its length + max_length = right_index - left_index + + # Limit stat_lengths to max_length + left_length, right_length = length_pair + if left_length is None or max_length < left_length: + left_length = max_length + if right_length is None or max_length < right_length: + right_length = max_length + + if (left_length == 0 or right_length == 0) \ + and stat_func in {np.amax, np.amin}: + # amax and amin can't operate on an empty array, + # raise a more descriptive warning here instead of the default one + raise ValueError("stat_length of 0 yields no value for padding") + + # Calculate statistic for the left side + left_slice = _slice_at_axis( + slice(left_index, left_index + left_length), axis) + left_chunk = padded[left_slice] + left_stat = stat_func(left_chunk, axis=axis, keepdims=True) + _round_if_needed(left_stat, padded.dtype) + + if left_length == right_length == max_length: + # return early as right_stat must be identical to left_stat + return left_stat, left_stat + + # Calculate statistic for the right side + right_slice = _slice_at_axis( + slice(right_index - right_length, right_index), axis) + right_chunk = padded[right_slice] + right_stat = stat_func(right_chunk, axis=axis, keepdims=True) + _round_if_needed(right_stat, padded.dtype) + + return left_stat, right_stat + + +def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): + """ + Pad `axis` of `arr` with reflection. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + method : str + Controls method of reflection; options are 'even' or 'odd'. + include_edge : bool + If true, edge value is included in reflection, otherwise the edge + value forms the symmetric axis to the reflection. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + old_length = padded.shape[axis] - right_pad - left_pad + + if include_edge: + # Edge is included, we need to offset the pad amount by 1 + edge_offset = 1 + else: + edge_offset = 0 # Edge is not included, no need to offset pad amount + old_length -= 1 # but must be omitted from the chunk + + if left_pad > 0: + # Pad with reflected values on left side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, left_pad) + # Slice right to left, stop on or next to edge, start relative to stop + stop = left_pad - edge_offset + start = stop + chunk_length + left_slice = _slice_at_axis(slice(start, stop, -1), axis) + left_chunk = padded[left_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) + left_chunk = 2 * padded[edge_slice] - left_chunk + + # Insert chunk into padded area + start = left_pad - chunk_length + stop = left_pad + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = left_chunk + # Adjust pointer to left edge for next iteration + left_pad -= chunk_length + + if right_pad > 0: + # Pad with reflected values on right side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, right_pad) + # Slice right to left, start on or next to edge, stop relative to start + start = -right_pad + edge_offset - 2 + stop = start - chunk_length + right_slice = _slice_at_axis(slice(start, stop, -1), axis) + right_chunk = padded[right_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis( + slice(-right_pad - 1, -right_pad), axis) + right_chunk = 2 * padded[edge_slice] - right_chunk + + # Insert chunk into padded area + start = padded.shape[axis] - right_pad + stop = start + chunk_length + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = right_chunk + # Adjust pointer to right edge for next iteration + right_pad -= chunk_length + + return left_pad, right_pad + + +def _set_wrap_both(padded, axis, width_pair): + """ + Pad `axis` of `arr` with wrapped values. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + period = padded.shape[axis] - right_pad - left_pad + + # If the current dimension of `arr` doesn't contain enough valid values + # (not part of the undefined pad area) we need to pad multiple times. + # Each time the pad area shrinks on both sides which is communicated with + # these variables. + new_left_pad = 0 + new_right_pad = 0 + + if left_pad > 0: + # Pad with wrapped values on left side + # First slice chunk from right side of the non-pad area. + # Use min(period, left_pad) to ensure that chunk is not larger than + # pad area + right_slice = _slice_at_axis( + slice(-right_pad - min(period, left_pad), + -right_pad if right_pad != 0 else None), + axis + ) + right_chunk = padded[right_slice] + + if left_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) + new_left_pad = left_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(None, left_pad), axis) + padded[pad_area] = right_chunk + + if right_pad > 0: + # Pad with wrapped values on right side + # First slice chunk from left side of the non-pad area. + # Use min(period, right_pad) to ensure that chunk is not larger than + # pad area + left_slice = _slice_at_axis( + slice(left_pad, left_pad + min(period, right_pad),), axis) + left_chunk = padded[left_slice] + + if right_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis( + slice(-right_pad, -right_pad + period), axis) + new_right_pad = right_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(-right_pad, None), axis) + padded[pad_area] = left_chunk + + return new_left_pad, new_right_pad + + +def _as_pairs(x, ndim, as_index=False): + """ + Broadcast `x` to an array with the shape (`ndim`, 2). + + A helper function for `pad` that prepares and validates arguments like + `pad_width` for iteration in pairs. + + Parameters + ---------- + x : {None, scalar, array-like} + The object to broadcast to the shape (`ndim`, 2). + ndim : int + Number of pairs the broadcasted `x` will have. + as_index : bool, optional + If `x` is not None, try to round each element of `x` to an integer + (dtype `np.intp`) and ensure every element is positive. + + Returns + ------- + pairs : nested iterables, shape (`ndim`, 2) + The broadcasted version of `x`. + + Raises + ------ + ValueError + If `as_index` is True and `x` contains negative elements. + Or if `x` is not broadcastable to the shape (`ndim`, 2). + """ + if x is None: + # Pass through None as a special case, otherwise np.round(x) fails + # with an AttributeError + return ((None, None),) * ndim + + x = np.array(x) + if as_index: + x = np.round(x).astype(np.intp, copy=False) + + if x.ndim < 3: + # Optimization: Possibly use faster paths for cases where `x` has + # only 1 or 2 elements. `np.broadcast_to` could handle these as well + # but is currently slower + + if x.size == 1: + # x was supplied as a single value + x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2 + if as_index and x < 0: + raise ValueError("index can't contain negative values") + return ((x[0], x[0]),) * ndim + + if x.size == 2 and x.shape != (2, 1): + # x was supplied with a single value for each side + # but except case when each dimension has a single value + # which should be broadcasted to a pair, + # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]] + x = x.ravel() # Ensure x[0], x[1] works + if as_index and (x[0] < 0 or x[1] < 0): + raise ValueError("index can't contain negative values") + return ((x[0], x[1]),) * ndim + + if as_index and x.min() < 0: + raise ValueError("index can't contain negative values") + + # Converting the array with `tolist` seems to improve performance + # when iterating and indexing the result (see usage in `pad`) + return np.broadcast_to(x, (ndim, 2)).tolist() + + +def _pad_dispatcher(array, pad_width, mode=None, **kwargs): + return (array,) + + +############################################################################### +# Public functions + + +@array_function_dispatch(_pad_dispatcher, module='numpy') +def pad(array, pad_width, mode='constant', **kwargs): + """ + Pad an array. + + Parameters + ---------- + array : array_like of rank N + The array to pad. + pad_width : {sequence, array_like, int} + Number of values padded to the edges of each axis. + ((before_1, after_1), ... (before_N, after_N)) unique pad widths + for each axis. + ((before, after),) yields same before and after pad for each axis. + (pad,) or int is a shortcut for before = after = pad width for all + axes. + mode : str or function, optional + One of the following string values or a user supplied function. + + 'constant' (default) + Pads with a constant value. + 'edge' + Pads with the edge values of array. + 'linear_ramp' + Pads with the linear ramp between end_value and the + array edge value. + 'maximum' + Pads with the maximum value of all or part of the + vector along each axis. + 'mean' + Pads with the mean value of all or part of the + vector along each axis. + 'median' + Pads with the median value of all or part of the + vector along each axis. + 'minimum' + Pads with the minimum value of all or part of the + vector along each axis. + 'reflect' + Pads with the reflection of the vector mirrored on + the first and last values of the vector along each + axis. + 'symmetric' + Pads with the reflection of the vector mirrored + along the edge of the array. + 'wrap' + Pads with the wrap of the vector along the axis. + The first values are used to pad the end and the + end values are used to pad the beginning. + 'empty' + Pads with undefined values. + + .. versionadded:: 1.17 + + + Padding function, see Notes. + stat_length : sequence or int, optional + Used in 'maximum', 'mean', 'median', and 'minimum'. Number of + values at edge of each axis used to calculate the statistic value. + + ((before_1, after_1), ... (before_N, after_N)) unique statistic + lengths for each axis. + + ((before, after),) yields same before and after statistic lengths + for each axis. + + (stat_length,) or int is a shortcut for before = after = statistic + length for all axes. + + Default is ``None``, to use the entire axis. + constant_values : sequence or scalar, optional + Used in 'constant'. The values to set the padded values for each + axis. + + ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants + for each axis. + + ``((before, after),)`` yields same before and after constants for each + axis. + + ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for + all axes. + + Default is 0. + end_values : sequence or scalar, optional + Used in 'linear_ramp'. The values used for the ending value of the + linear_ramp and that will form the edge of the padded array. + + ``((before_1, after_1), ... (before_N, after_N))`` unique end values + for each axis. + + ``((before, after),)`` yields same before and after end values for each + axis. + + ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for + all axes. + + Default is 0. + reflect_type : {'even', 'odd'}, optional + Used in 'reflect', and 'symmetric'. The 'even' style is the + default with an unaltered reflection around the edge value. For + the 'odd' style, the extended part of the array is created by + subtracting the reflected values from two times the edge value. + + Returns + ------- + pad : ndarray + Padded array of rank equal to `array` with shape increased + according to `pad_width`. + + Notes + ----- + .. versionadded:: 1.7.0 + + For an array with rank greater than 1, some of the padding of later + axes is calculated from padding of previous axes. This is easiest to + think about with a rank 2 array where the corners of the padded array + are calculated by using padded values from the first axis. + + The padding function, if used, should modify a rank 1 array in-place. It + has the following signature:: + + padding_func(vector, iaxis_pad_width, iaxis, kwargs) + + where + + vector : ndarray + A rank 1 array already padded with zeros. Padded values are + vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. + iaxis_pad_width : tuple + A 2-tuple of ints, iaxis_pad_width[0] represents the number of + values padded at the beginning of vector where + iaxis_pad_width[1] represents the number of values padded at + the end of vector. + iaxis : int + The axis currently being calculated. + kwargs : dict + Any keyword arguments the function requires. + + Examples + -------- + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) + array([4, 4, 1, ..., 6, 6, 6]) + + >>> np.pad(a, (2, 3), 'edge') + array([1, 1, 1, ..., 5, 5, 5]) + + >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) + array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) + + >>> np.pad(a, (2,), 'maximum') + array([5, 5, 1, 2, 3, 4, 5, 5, 5]) + + >>> np.pad(a, (2,), 'mean') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> np.pad(a, (2,), 'median') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> a = [[1, 2], [3, 4]] + >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') + array([[1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [3, 3, 3, 4, 3, 3, 3], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1]]) + + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'reflect') + array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) + + >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') + array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + >>> np.pad(a, (2, 3), 'symmetric') + array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) + + >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') + array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) + + >>> np.pad(a, (2, 3), 'wrap') + array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) + + >>> def pad_with(vector, pad_width, iaxis, kwargs): + ... pad_value = kwargs.get('padder', 10) + ... vector[:pad_width[0]] = pad_value + ... vector[-pad_width[1]:] = pad_value + >>> a = np.arange(6) + >>> a = a.reshape((2, 3)) + >>> np.pad(a, 2, pad_with) + array([[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]]) + >>> np.pad(a, 2, pad_with, padder=100) + array([[100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 0, 1, 2, 100, 100], + [100, 100, 3, 4, 5, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100]]) + """ + array = np.asarray(array) + pad_width = np.asarray(pad_width) + + if not pad_width.dtype.kind == 'i': + raise TypeError('`pad_width` must be of integral type.') + + # Broadcast to shape (array.ndim, 2) + pad_width = _as_pairs(pad_width, array.ndim, as_index=True) + + if callable(mode): + # Old behavior: Use user-supplied function with np.apply_along_axis + function = mode + # Create a new zero padded array + padded, _ = _pad_simple(array, pad_width, fill_value=0) + # And apply along each axis + + for axis in range(padded.ndim): + # Iterate using ndindex as in apply_along_axis, but assuming that + # function operates inplace on the padded array. + + # view with the iteration axis at the end + view = np.moveaxis(padded, axis, -1) + + # compute indices for the iteration axes, and append a trailing + # ellipsis to prevent 0d arrays decaying to scalars (gh-8642) + inds = ndindex(view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + for ind in inds: + function(view[ind], pad_width[axis], axis, kwargs) + + return padded + + # Make sure that no unsupported keywords were passed for the current mode + allowed_kwargs = { + 'empty': [], 'edge': [], 'wrap': [], + 'constant': ['constant_values'], + 'linear_ramp': ['end_values'], + 'maximum': ['stat_length'], + 'mean': ['stat_length'], + 'median': ['stat_length'], + 'minimum': ['stat_length'], + 'reflect': ['reflect_type'], + 'symmetric': ['reflect_type'], + } + try: + unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) + except KeyError: + raise ValueError("mode '{}' is not supported".format(mode)) from None + if unsupported_kwargs: + raise ValueError("unsupported keyword arguments for mode '{}': {}" + .format(mode, unsupported_kwargs)) + + stat_functions = {"maximum": np.amax, "minimum": np.amin, + "mean": np.mean, "median": np.median} + + # Create array with final shape and original values + # (padded area is undefined) + padded, original_area_slice = _pad_simple(array, pad_width) + # And prepare iteration over all dimensions + # (zipping may be more readable than using enumerate) + axes = range(padded.ndim) + + if mode == "constant": + values = kwargs.get("constant_values", 0) + values = _as_pairs(values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, values): + roi = _view_roi(padded, original_area_slice, axis) + _set_pad_area(roi, axis, width_pair, value_pair) + + elif mode == "empty": + pass # Do nothing as _pad_simple already returned the correct result + + elif array.size == 0: + # Only modes "constant" and "empty" can extend empty axes, all other + # modes depend on `array` not being empty + # -> ensure every empty axis is only "padded with 0" + for axis, width_pair in zip(axes, pad_width): + if array.shape[axis] == 0 and any(width_pair): + raise ValueError( + "can't extend empty axis {} using modes other than " + "'constant' or 'empty'".format(axis) + ) + # passed, don't need to do anything more as _pad_simple already + # returned the correct result + + elif mode == "edge": + for axis, width_pair in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + edge_pair = _get_edges(roi, axis, width_pair) + _set_pad_area(roi, axis, width_pair, edge_pair) + + elif mode == "linear_ramp": + end_values = kwargs.get("end_values", 0) + end_values = _as_pairs(end_values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, end_values): + roi = _view_roi(padded, original_area_slice, axis) + ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair) + _set_pad_area(roi, axis, width_pair, ramp_pair) + + elif mode in stat_functions: + func = stat_functions[mode] + length = kwargs.get("stat_length", None) + length = _as_pairs(length, padded.ndim, as_index=True) + for axis, width_pair, length_pair in zip(axes, pad_width, length): + roi = _view_roi(padded, original_area_slice, axis) + stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) + _set_pad_area(roi, axis, width_pair, stat_pair) + + elif mode in {"reflect", "symmetric"}: + method = kwargs.get("reflect_type", "even") + include_edge = True if mode == "symmetric" else False + for axis, (left_index, right_index) in zip(axes, pad_width): + if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): + # Extending singleton dimension for 'reflect' is legacy + # behavior; it really should raise an error. + edge_pair = _get_edges(padded, axis, (left_index, right_index)) + _set_pad_area( + padded, axis, (left_index, right_index), edge_pair) + continue + + roi = _view_roi(padded, original_area_slice, axis) + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with reflected + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_reflect_both( + roi, axis, (left_index, right_index), + method, include_edge + ) + + elif mode == "wrap": + for axis, (left_index, right_index) in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with wrapped + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_wrap_both( + roi, axis, (left_index, right_index)) + + return padded diff --git a/MLPY/Lib/site-packages/numpy/lib/arraypad.pyi b/MLPY/Lib/site-packages/numpy/lib/arraypad.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a33da474658928ea415520b6bc490ae5f6d531e4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/arraypad.pyi @@ -0,0 +1,5 @@ +from typing import List + +__all__: List[str] + +def pad(array, pad_width, mode=..., **kwargs): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/arraysetops.py b/MLPY/Lib/site-packages/numpy/lib/arraysetops.py new file mode 100644 index 0000000000000000000000000000000000000000..6be34e938877a09eccffb629b217426cc0c90496 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/arraysetops.py @@ -0,0 +1,826 @@ +""" +Set operations for arrays based on sorting. + +Notes +----- + +For floating point arrays, inaccurate results may appear due to usual round-off +and floating point comparison issues. + +Speed could be gained in some operations by an implementation of +`numpy.sort`, that can provide directly the permutation vectors, thus avoiding +calls to `numpy.argsort`. + +Original author: Robert Cimrman + +""" +import functools + +import numpy as np +from numpy.core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique', + 'in1d', 'isin' + ] + + +def _ediff1d_dispatcher(ary, to_end=None, to_begin=None): + return (ary, to_end, to_begin) + + +@array_function_dispatch(_ediff1d_dispatcher) +def ediff1d(ary, to_end=None, to_begin=None): + """ + The differences between consecutive elements of an array. + + Parameters + ---------- + ary : array_like + If necessary, will be flattened before the differences are taken. + to_end : array_like, optional + Number(s) to append at the end of the returned differences. + to_begin : array_like, optional + Number(s) to prepend at the beginning of the returned differences. + + Returns + ------- + ediff1d : ndarray + The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. + + See Also + -------- + diff, gradient + + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used. + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.ediff1d(x) + array([ 1, 2, 3, -7]) + + >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + array([-99, 1, 2, ..., -7, 88, 99]) + + The returned array is always 1D. + + >>> y = [[1, 2, 4], [1, 6, 24]] + >>> np.ediff1d(y) + array([ 1, 2, -3, 5, 18]) + + """ + # force a 1d array + ary = np.asanyarray(ary).ravel() + + # enforce that the dtype of `ary` is used for the output + dtype_req = ary.dtype + + # fast track default case + if to_begin is None and to_end is None: + return ary[1:] - ary[:-1] + + if to_begin is None: + l_begin = 0 + else: + to_begin = np.asanyarray(to_begin) + if not np.can_cast(to_begin, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_begin` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_begin = to_begin.ravel() + l_begin = len(to_begin) + + if to_end is None: + l_end = 0 + else: + to_end = np.asanyarray(to_end) + if not np.can_cast(to_end, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_end` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_end = to_end.ravel() + l_end = len(to_end) + + # do the calculation in place and copy to_begin and to_end + l_diff = max(len(ary) - 1, 0) + result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype) + result = ary.__array_wrap__(result) + if l_begin > 0: + result[:l_begin] = to_begin + if l_end > 0: + result[l_begin + l_diff:] = to_end + np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) + return result + + +def _unpack_tuple(x): + """ Unpacks one-element tuples for use as return values """ + if len(x) == 1: + return x[0] + else: + return x + + +def _unique_dispatcher(ar, return_index=None, return_inverse=None, + return_counts=None, axis=None): + return (ar,) + + +@array_function_dispatch(_unique_dispatcher) +def unique(ar, return_index=False, return_inverse=False, + return_counts=False, axis=None): + """ + Find the unique elements of an array. + + Returns the sorted unique elements of an array. There are three optional + outputs in addition to the unique elements: + + * the indices of the input array that give the unique values + * the indices of the unique array that reconstruct the input array + * the number of times each unique value comes up in the input array + + Parameters + ---------- + ar : array_like + Input array. Unless `axis` is specified, this will be flattened if it + is not already 1-D. + return_index : bool, optional + If True, also return the indices of `ar` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse : bool, optional + If True, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `ar`. + return_counts : bool, optional + If True, also return the number of times each unique item appears + in `ar`. + + .. versionadded:: 1.9.0 + + axis : int or None, optional + The axis to operate on. If None, `ar` will be flattened. If an integer, + the subarrays indexed by the given axis will be flattened and treated + as the elements of a 1-D array with the dimension of the given axis, + see the notes for more details. Object arrays or structured arrays + that contain objects are not supported if the `axis` kwarg is used. The + default is None. + + .. versionadded:: 1.13.0 + + Returns + ------- + unique : ndarray + The sorted unique values. + unique_indices : ndarray, optional + The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + unique_inverse : ndarray, optional + The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + unique_counts : ndarray, optional + The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + + .. versionadded:: 1.9.0 + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + repeat : Repeat elements of an array. + + Notes + ----- + When an axis is specified the subarrays indexed by the axis are sorted. + This is done by making the specified axis the first dimension of the array + (move the axis to the first dimension to keep the order of the other axes) + and then flattening the subarrays in C order. The flattened subarrays are + then viewed as a structured type with each element given a label, with the + effect that we end up with a 1-D array of structured types that can be + treated in the same way as any other 1-D array. The result is that the + flattened subarrays are sorted in lexicographic order starting with the + first element. + + .. versionchanged: NumPy 1.21 + If nan values are in the input array, a single nan is put + to the end of the sorted unique values. + + Also for complex arrays all NaN values are considered equivalent + (no matter whether the NaN is in the real or imaginary part). + As the representant for the returned array the smallest one in the + lexicographical order is chosen - see np.sort for how the lexicographical + order is defined for complex arrays. + + Examples + -------- + >>> np.unique([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique(a) + array([1, 2, 3]) + + Return the unique rows of a 2D array + + >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) + >>> np.unique(a, axis=0) + array([[1, 0, 0], [2, 3, 4]]) + + Return the indices of the original array that give the unique values: + + >>> a = np.array(['a', 'b', 'b', 'c', 'a']) + >>> u, indices = np.unique(a, return_index=True) + >>> u + array(['a', 'b', 'c'], dtype='>> indices + array([0, 1, 3]) + >>> a[indices] + array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> u, indices = np.unique(a, return_inverse=True) + >>> u + array([1, 2, 3, 4, 6]) + >>> indices + array([0, 1, 4, 3, 1, 2, 1]) + >>> u[indices] + array([1, 2, 6, 4, 2, 3, 2]) + + Reconstruct the input values from the unique values and counts: + + >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> values, counts = np.unique(a, return_counts=True) + >>> values + array([1, 2, 3, 4, 6]) + >>> counts + array([1, 3, 1, 1, 1]) + >>> np.repeat(values, counts) + array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved + + """ + ar = np.asanyarray(ar) + if axis is None: + ret = _unique1d(ar, return_index, return_inverse, return_counts) + return _unpack_tuple(ret) + + # axis was specified and not None + try: + ar = np.moveaxis(ar, axis, 0) + except np.AxisError: + # this removes the "axis1" or "axis2" prefix from the error message + raise np.AxisError(axis, ar.ndim) from None + + # Must reshape to a contiguous 2D array for this to work... + orig_shape, orig_dtype = ar.shape, ar.dtype + ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp)) + ar = np.ascontiguousarray(ar) + dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] + + # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured + # data type with `m` fields where each field has the data type of `ar`. + # In the following, we create the array `consolidated`, which has + # shape `(n,)` with data type `dtype`. + try: + if ar.shape[1] > 0: + consolidated = ar.view(dtype) + else: + # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is + # a data type with itemsize 0, and the call `ar.view(dtype)` will + # fail. Instead, we'll use `np.empty` to explicitly create the + # array with shape `(len(ar),)`. Because `dtype` in this case has + # itemsize 0, the total size of the result is still 0 bytes. + consolidated = np.empty(len(ar), dtype=dtype) + except TypeError as e: + # There's no good way to do this for object arrays, etc... + msg = 'The axis argument to unique is not supported for dtype {dt}' + raise TypeError(msg.format(dt=ar.dtype)) from e + + def reshape_uniq(uniq): + n = len(uniq) + uniq = uniq.view(orig_dtype) + uniq = uniq.reshape(n, *orig_shape[1:]) + uniq = np.moveaxis(uniq, 0, axis) + return uniq + + output = _unique1d(consolidated, return_index, + return_inverse, return_counts) + output = (reshape_uniq(output[0]),) + output[1:] + return _unpack_tuple(output) + + +def _unique1d(ar, return_index=False, return_inverse=False, + return_counts=False): + """ + Find the unique elements of an array, ignoring shape. + """ + ar = np.asanyarray(ar).flatten() + + optional_indices = return_index or return_inverse + + if optional_indices: + perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') + aux = ar[perm] + else: + ar.sort() + aux = ar + mask = np.empty(aux.shape, dtype=np.bool_) + mask[:1] = True + if aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and np.isnan(aux[-1]): + if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent + aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left') + else: + aux_firstnan = np.searchsorted(aux, aux[-1], side='left') + if aux_firstnan > 0: + mask[1:aux_firstnan] = ( + aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) + mask[aux_firstnan] = True + mask[aux_firstnan + 1:] = False + else: + mask[1:] = aux[1:] != aux[:-1] + + ret = (aux[mask],) + if return_index: + ret += (perm[mask],) + if return_inverse: + imask = np.cumsum(mask) - 1 + inv_idx = np.empty(mask.shape, dtype=np.intp) + inv_idx[perm] = imask + ret += (inv_idx,) + if return_counts: + idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) + ret += (np.diff(idx),) + return ret + + +def _intersect1d_dispatcher( + ar1, ar2, assume_unique=None, return_indices=None): + return (ar1, ar2) + + +@array_function_dispatch(_intersect1d_dispatcher) +def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): + """ + Find the intersection of two arrays. + + Return the sorted, unique values that are in both of the input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. Will be flattened if not already 1D. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. If True but ``ar1`` or ``ar2`` are not + unique, incorrect results and out-of-bounds indices could result. + Default is False. + return_indices : bool + If True, the indices which correspond to the intersection of the two + arrays are returned. The first instance of a value is used if there are + multiple. Default is False. + + .. versionadded:: 1.15.0 + + Returns + ------- + intersect1d : ndarray + Sorted 1D array of common and unique elements. + comm1 : ndarray + The indices of the first occurrences of the common values in `ar1`. + Only provided if `return_indices` is True. + comm2 : ndarray + The indices of the first occurrences of the common values in `ar2`. + Only provided if `return_indices` is True. + + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) + array([1, 3]) + + To intersect more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([3]) + + To return the indices of the values common to the input arrays + along with the intersected values: + + >>> x = np.array([1, 1, 2, 3, 4]) + >>> y = np.array([2, 1, 4, 6]) + >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) + >>> x_ind, y_ind + (array([0, 2, 4]), array([1, 0, 2])) + >>> xy, x[x_ind], y[y_ind] + (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) + + """ + ar1 = np.asanyarray(ar1) + ar2 = np.asanyarray(ar2) + + if not assume_unique: + if return_indices: + ar1, ind1 = unique(ar1, return_index=True) + ar2, ind2 = unique(ar2, return_index=True) + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + else: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + + aux = np.concatenate((ar1, ar2)) + if return_indices: + aux_sort_indices = np.argsort(aux, kind='mergesort') + aux = aux[aux_sort_indices] + else: + aux.sort() + + mask = aux[1:] == aux[:-1] + int1d = aux[:-1][mask] + + if return_indices: + ar1_indices = aux_sort_indices[:-1][mask] + ar2_indices = aux_sort_indices[1:][mask] - ar1.size + if not assume_unique: + ar1_indices = ind1[ar1_indices] + ar2_indices = ind2[ar2_indices] + + return int1d, ar1_indices, ar2_indices + else: + return int1d + + +def _setxor1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setxor1d_dispatcher) +def setxor1d(ar1, ar2, assume_unique=False): + """ + Find the set exclusive-or of two arrays. + + Return the sorted, unique values that are in only one (not both) of the + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setxor1d : ndarray + Sorted 1D array of unique values that are in only one of the input + arrays. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4]) + >>> b = np.array([2, 3, 5, 7, 5]) + >>> np.setxor1d(a,b) + array([1, 4, 5, 7]) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = np.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + + aux.sort() + flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) + return aux[flag[1:] & flag[:-1]] + + +def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None): + return (ar1, ar2) + + +@array_function_dispatch(_in1d_dispatcher) +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of a 1-D array is also present in a second array. + + Returns a boolean array the same length as `ar1` that is True + where an element of `ar1` is in `ar2` and False otherwise. + + We recommend using :func:`isin` instead of `in1d` for new code. + + Parameters + ---------- + ar1 : (M,) array_like + Input array. + ar2 : array_like + The values against which to test each value of `ar1`. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted (that is, + False where an element of `ar1` is in `ar2` and True otherwise). + Default is False. ``np.in1d(a, b, invert=True)`` is equivalent + to (but is faster than) ``np.invert(in1d(a, b))``. + + .. versionadded:: 1.8.0 + + Returns + ------- + in1d : (M,) ndarray, bool + The values `ar1[in1d]` are in `ar2`. + + See Also + -------- + isin : Version of this function that preserves the + shape of ar1. + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Notes + ----- + `in1d` can be considered as an element-wise function version of the + python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly + equivalent to ``np.array([item in b for item in a])``. + However, this idea fails if `ar2` is a set, or similar (non-sequence) + container: As ``ar2`` is converted to an array, in those cases + ``asarray(ar2)`` is an object array rather than the expected array of + contained values. + + .. versionadded:: 1.4.0 + + Examples + -------- + >>> test = np.array([0, 1, 2, 5, 0]) + >>> states = [0, 2] + >>> mask = np.in1d(test, states) + >>> mask + array([ True, False, True, False, True]) + >>> test[mask] + array([0, 2, 0]) + >>> mask = np.in1d(test, states, invert=True) + >>> mask + array([False, True, False, True, False]) + >>> test[mask] + array([1, 5]) + """ + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # Ensure that iteration through object arrays yields size-1 arrays + if ar2.dtype == object: + ar2 = ar2.reshape(-1, 1) + + # Check if one of the arrays may contain arbitrary objects + contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject + + # This code is run when + # a) the first condition is true, making the code significantly faster + # b) the second condition is true (i.e. `ar1` or `ar2` may contain + # arbitrary objects), since then sorting is not guaranteed to work + if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: + if invert: + mask = np.ones(len(ar1), dtype=bool) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=bool) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + ar1, rev_idx = np.unique(ar1, return_inverse=True) + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = np.concatenate((bool_ar, [invert])) + ret = np.empty(ar.shape, dtype=bool) + ret[order] = flag + + if assume_unique: + return ret[:len(ar1)] + else: + return ret[rev_idx] + + +def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None): + return (element, test_elements) + + +@array_function_dispatch(_isin_dispatcher) +def isin(element, test_elements, assume_unique=False, invert=False): + """ + Calculates `element in test_elements`, broadcasting over `element` only. + Returns a boolean array of the same shape as `element` that is True + where an element of `element` is in `test_elements` and False otherwise. + + Parameters + ---------- + element : array_like + Input array. + test_elements : array_like + The values against which to test each value of `element`. + This argument is flattened if it is an array or array_like. + See notes for behavior with non-array-like parameters. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted, as if + calculating `element not in test_elements`. Default is False. + ``np.isin(a, b, invert=True)`` is equivalent to (but faster + than) ``np.invert(np.isin(a, b))``. + + Returns + ------- + isin : ndarray, bool + Has the same shape as `element`. The values `element[isin]` + are in `test_elements`. + + See Also + -------- + in1d : Flattened version of this function. + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Notes + ----- + + `isin` is an element-wise function version of the python keyword `in`. + ``isin(a, b)`` is roughly equivalent to + ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. + + `element` and `test_elements` are converted to arrays if they are not + already. If `test_elements` is a set (or other non-sequence collection) + it will be converted to an object array with one element, rather than an + array of the values contained in `test_elements`. This is a consequence + of the `array` constructor's way of handling non-sequence collections. + Converting the set to a list usually gives the desired behavior. + + .. versionadded:: 1.13.0 + + Examples + -------- + >>> element = 2*np.arange(4).reshape((2, 2)) + >>> element + array([[0, 2], + [4, 6]]) + >>> test_elements = [1, 2, 4, 8] + >>> mask = np.isin(element, test_elements) + >>> mask + array([[False, True], + [ True, False]]) + >>> element[mask] + array([2, 4]) + + The indices of the matched values can be obtained with `nonzero`: + + >>> np.nonzero(mask) + (array([0, 1]), array([1, 0])) + + The test can also be inverted: + + >>> mask = np.isin(element, test_elements, invert=True) + >>> mask + array([[ True, False], + [False, True]]) + >>> element[mask] + array([0, 6]) + + Because of how `array` handles sets, the following does not + work as expected: + + >>> test_set = {1, 2, 4, 8} + >>> np.isin(element, test_set) + array([[False, False], + [False, False]]) + + Casting the set to a list gives the expected result: + + >>> np.isin(element, list(test_set)) + array([[False, True], + [ True, False]]) + """ + element = np.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + +def _union1d_dispatcher(ar1, ar2): + return (ar1, ar2) + + +@array_function_dispatch(_union1d_dispatcher) +def union1d(ar1, ar2): + """ + Find the union of two arrays. + + Return the unique, sorted array of values that are in either of the two + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. They are flattened if they are not already 1D. + + Returns + ------- + union1d : ndarray + Unique, sorted union of the input arrays. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> np.union1d([-1, 0, 1], [-2, 0, 2]) + array([-2, -1, 0, 1, 2]) + + To find the union of more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([1, 2, 3, 4, 6]) + """ + return unique(np.concatenate((ar1, ar2), axis=None)) + + +def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setdiff1d_dispatcher) +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Find the set difference of two arrays. + + Return the unique values in `ar1` that are not in `ar2`. + + Parameters + ---------- + ar1 : array_like + Input array. + ar2 : array_like + Input comparison array. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setdiff1d : ndarray + 1D array of values in `ar1` that are not in `ar2`. The result + is sorted when `assume_unique=False`, but otherwise only sorted + if the input is sorted. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4, 1]) + >>> b = np.array([3, 4, 5, 6]) + >>> np.setdiff1d(a, b) + array([1, 2]) + + """ + if assume_unique: + ar1 = np.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] diff --git a/MLPY/Lib/site-packages/numpy/lib/arraysetops.pyi b/MLPY/Lib/site-packages/numpy/lib/arraysetops.pyi new file mode 100644 index 0000000000000000000000000000000000000000..08f1b3e04bb2182439b58bf809275aab6931bd9a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/arraysetops.pyi @@ -0,0 +1,12 @@ +from typing import List + +__all__: List[str] + +def ediff1d(ary, to_end=..., to_begin=...): ... +def unique(ar, return_index=..., return_inverse=..., return_counts=..., axis=...): ... +def intersect1d(ar1, ar2, assume_unique=..., return_indices=...): ... +def setxor1d(ar1, ar2, assume_unique=...): ... +def in1d(ar1, ar2, assume_unique=..., invert=...): ... +def isin(element, test_elements, assume_unique=..., invert=...): ... +def union1d(ar1, ar2): ... +def setdiff1d(ar1, ar2, assume_unique=...): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/arrayterator.py b/MLPY/Lib/site-packages/numpy/lib/arrayterator.py new file mode 100644 index 0000000000000000000000000000000000000000..8b434525fc06a27b3e16a798a6de77ac4b2c8309 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/arrayterator.py @@ -0,0 +1,219 @@ +""" +A buffered iterator for big arrays. + +This module solves the problem of iterating over a big file-based array +without having to read it into memory. The `Arrayterator` class wraps +an array object, and when iterated it will return sub-arrays with at most +a user-specified number of elements. + +""" +from operator import mul +from functools import reduce + +__all__ = ['Arrayterator'] + + +class Arrayterator: + """ + Buffered iterator for big arrays. + + `Arrayterator` creates a buffered iterator for reading big arrays in small + contiguous blocks. The class is useful for objects stored in the + file system. It allows iteration over the object *without* reading + everything in memory; instead, small blocks are read and iterated over. + + `Arrayterator` can be used with any object that supports multidimensional + slices. This includes NumPy arrays, but also variables from + Scientific.IO.NetCDF or pynetcdf for example. + + Parameters + ---------- + var : array_like + The object to iterate over. + buf_size : int, optional + The buffer size. If `buf_size` is supplied, the maximum amount of + data that will be read into memory is `buf_size` elements. + Default is None, which will read as many element as possible + into memory. + + Attributes + ---------- + var + buf_size + start + stop + step + shape + flat + + See Also + -------- + ndenumerate : Multidimensional array iterator. + flatiter : Flat array iterator. + memmap : Create a memory-map to an array stored in a binary file on disk. + + Notes + ----- + The algorithm works by first finding a "running dimension", along which + the blocks will be extracted. Given an array of dimensions + ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the + first dimension will be used. If, on the other hand, + ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. + Blocks are extracted along this dimension, and when the last block is + returned the process continues from the next dimension, until all + elements have been read. + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + >>> a_itor.shape + (3, 4, 5, 6) + + Now we can iterate over ``a_itor``, and it will return arrays of size + two. Since `buf_size` was smaller than any dimension, the first + dimension will be iterated over first: + + >>> for subarr in a_itor: + ... if not subarr.all(): + ... print(subarr, subarr.shape) # doctest: +SKIP + >>> # [[[[0 1]]]] (1, 1, 1, 2) + + """ + + def __init__(self, var, buf_size=None): + self.var = var + self.buf_size = buf_size + + self.start = [0 for dim in var.shape] + self.stop = [dim for dim in var.shape] + self.step = [1 for dim in var.shape] + + def __getattr__(self, attr): + return getattr(self.var, attr) + + def __getitem__(self, index): + """ + Return a new arrayterator. + + """ + # Fix index, handling ellipsis and incomplete slices. + if not isinstance(index, tuple): + index = (index,) + fixed = [] + length, dims = len(index), self.ndim + for slice_ in index: + if slice_ is Ellipsis: + fixed.extend([slice(None)] * (dims-length+1)) + length = len(fixed) + elif isinstance(slice_, int): + fixed.append(slice(slice_, slice_+1, 1)) + else: + fixed.append(slice_) + index = tuple(fixed) + if len(index) < dims: + index += (slice(None),) * (dims-len(index)) + + # Return a new arrayterator object. + out = self.__class__(self.var, self.buf_size) + for i, (start, stop, step, slice_) in enumerate( + zip(self.start, self.stop, self.step, index)): + out.start[i] = start + (slice_.start or 0) + out.step[i] = step * (slice_.step or 1) + out.stop[i] = start + (slice_.stop or stop-start) + out.stop[i] = min(stop, out.stop[i]) + return out + + def __array__(self): + """ + Return corresponding data. + + """ + slice_ = tuple(slice(*t) for t in zip( + self.start, self.stop, self.step)) + return self.var[slice_] + + @property + def flat(self): + """ + A 1-D flat iterator for Arrayterator objects. + + This iterator returns elements of the array to be iterated over in + `Arrayterator` one by one. It is similar to `flatiter`. + + See Also + -------- + Arrayterator + flatiter + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + + >>> for subarr in a_itor.flat: + ... if not subarr: + ... print(subarr, type(subarr)) + ... + 0 + + """ + for block in self: + yield from block.flat + + @property + def shape(self): + """ + The shape of the array to be iterated over. + + For an example, see `Arrayterator`. + + """ + return tuple(((stop-start-1)//step+1) for start, stop, step in + zip(self.start, self.stop, self.step)) + + def __iter__(self): + # Skip arrays with degenerate dimensions + if [dim for dim in self.shape if dim <= 0]: + return + + start = self.start[:] + stop = self.stop[:] + step = self.step[:] + ndims = self.var.ndim + + while True: + count = self.buf_size or reduce(mul, self.shape) + + # iterate over each dimension, looking for the + # running dimension (ie, the dimension along which + # the blocks will be built from) + rundim = 0 + for i in range(ndims-1, -1, -1): + # if count is zero we ran out of elements to read + # along higher dimensions, so we read only a single position + if count == 0: + stop[i] = start[i]+1 + elif count <= self.shape[i]: + # limit along this dimension + stop[i] = start[i] + count*step[i] + rundim = i + else: + # read everything along this dimension + stop[i] = self.stop[i] + stop[i] = min(self.stop[i], stop[i]) + count = count//self.shape[i] + + # yield a block + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + yield self.var[slice_] + + # Update start position, taking care of overflow to + # other dimensions + start[rundim] = stop[rundim] # start where we stopped + for i in range(ndims-1, 0, -1): + if start[i] >= self.stop[i]: + start[i] = self.start[i] + start[i-1] += self.step[i-1] + if start[0] >= self.stop[0]: + return diff --git a/MLPY/Lib/site-packages/numpy/lib/arrayterator.pyi b/MLPY/Lib/site-packages/numpy/lib/arrayterator.pyi new file mode 100644 index 0000000000000000000000000000000000000000..19bad0fbbe7064aa58a479152c16b08860d256a5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/arrayterator.pyi @@ -0,0 +1,53 @@ +import sys +from typing import ( + List, + Any, + TypeVar, + Generator, + List, + Union, + Tuple, + overload, +) + +from numpy import ndarray, dtype, generic +from numpy.typing import DTypeLike + +# TODO: Set a shape bound once we've got proper shape support +_Shape = TypeVar("_Shape", bound=Any) +_DType = TypeVar("_DType", bound=dtype[Any]) +_ScalarType = TypeVar("_ScalarType", bound=generic) + +_Index = Union[ + Union[ellipsis, int, slice], + Tuple[Union[ellipsis, int, slice], ...], +] + +__all__: List[str] + +# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, +# but its ``__getattr__` method does wrap around the former and thus has +# access to all its methods + +class Arrayterator(ndarray[_Shape, _DType]): + var: ndarray[_Shape, _DType] # type: ignore[assignment] + buf_size: None | int + start: List[int] + stop: List[int] + step: List[int] + + @property # type: ignore[misc] + def shape(self) -> Tuple[int, ...]: ... + @property + def flat( # type: ignore[override] + self: ndarray[Any, dtype[_ScalarType]] + ) -> Generator[_ScalarType, None, None]: ... + def __init__( + self, var: ndarray[_Shape, _DType], buf_size: None | int = ... + ) -> None: ... + @overload + def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... + @overload + def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... + def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ... + def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ... diff --git a/MLPY/Lib/site-packages/numpy/lib/format.py b/MLPY/Lib/site-packages/numpy/lib/format.py new file mode 100644 index 0000000000000000000000000000000000000000..274a8354816651b035189d41bbf1366291f006b6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/format.py @@ -0,0 +1,918 @@ +""" +Binary serialization + +NPY format +========== + +A simple format for saving numpy arrays to disk with the full +information about them. + +The ``.npy`` format is the standard binary file format in NumPy for +persisting a *single* arbitrary NumPy array on disk. The format stores all +of the shape and dtype information necessary to reconstruct the array +correctly even on another machine with a different architecture. +The format is designed to be as simple as possible while achieving +its limited goals. + +The ``.npz`` format is the standard format for persisting *multiple* NumPy +arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` +files, one for each array. + +Capabilities +------------ + +- Can represent all NumPy arrays including nested record arrays and + object arrays. + +- Represents the data in its native binary form. + +- Supports Fortran-contiguous arrays directly. + +- Stores all of the necessary information to reconstruct the array + including shape and dtype on a machine of a different + architecture. Both little-endian and big-endian arrays are + supported, and a file with little-endian numbers will yield + a little-endian array on any machine reading the file. The + types are described in terms of their actual sizes. For example, + if a machine with a 64-bit C "long int" writes out an array with + "long ints", a reading machine with 32-bit C "long ints" will yield + an array with 64-bit integers. + +- Is straightforward to reverse engineer. Datasets often live longer than + the programs that created them. A competent developer should be + able to create a solution in their preferred programming language to + read most ``.npy`` files that they have been given without much + documentation. + +- Allows memory-mapping of the data. See `open_memmep`. + +- Can be read from a filelike stream object instead of an actual file. + +- Stores object arrays, i.e. arrays containing elements that are arbitrary + Python objects. Files with object arrays are not to be mmapable, but + can be read and written to disk. + +Limitations +----------- + +- Arbitrary subclasses of numpy.ndarray are not completely preserved. + Subclasses will be accepted for writing, but only the array data will + be written out. A regular numpy.ndarray object will be created + upon reading the file. + +.. warning:: + + Due to limitations in the interpretation of structured dtypes, dtypes + with fields with empty names will have the names replaced by 'f0', 'f1', + etc. Such arrays will not round-trip through the format entirely + accurately. The data is intact; only the field names will differ. We are + working on a fix for this. This fix will not require a change in the + file format. The arrays with such structures can still be saved and + restored, and the correct dtype may be restored by using the + ``loadedarray.view(correct_dtype)`` method. + +File extensions +--------------- + +We recommend using the ``.npy`` and ``.npz`` extensions for files saved +in this format. This is by no means a requirement; applications may wish +to use these file formats but use an extension specific to the +application. In the absence of an obvious alternative, however, +we suggest using ``.npy`` and ``.npz``. + +Version numbering +----------------- + +The version numbering of these formats is independent of NumPy version +numbering. If the format is upgraded, the code in `numpy.io` will still +be able to read and write Version 1.0 files. + +Format Version 1.0 +------------------ + +The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. + +The next 1 byte is an unsigned byte: the major version number of the file +format, e.g. ``\\x01``. + +The next 1 byte is an unsigned byte: the minor version number of the file +format, e.g. ``\\x00``. Note: the version of the file format is not tied +to the version of the numpy package. + +The next 2 bytes form a little-endian unsigned short int: the length of +the header data HEADER_LEN. + +The next HEADER_LEN bytes form the header data describing the array's +format. It is an ASCII string which contains a Python literal expression +of a dictionary. It is terminated by a newline (``\\n``) and padded with +spaces (``\\x20``) to make the total of +``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible +by 64 for alignment purposes. + +The dictionary contains three keys: + + "descr" : dtype.descr + An object that can be passed as an argument to the `numpy.dtype` + constructor to create the array's dtype. + "fortran_order" : bool + Whether the array data is Fortran-contiguous or not. Since + Fortran-contiguous arrays are a common form of non-C-contiguity, + we allow them to be written directly to disk for efficiency. + "shape" : tuple of int + The shape of the array. + +For repeatability and readability, the dictionary keys are sorted in +alphabetic order. This is for convenience only. A writer SHOULD implement +this if possible. A reader MUST NOT depend on this. + +Following the header comes the array data. If the dtype contains Python +objects (i.e. ``dtype.hasobject is True``), then the data is a Python +pickle of the array. Otherwise the data is the contiguous (either C- +or Fortran-, depending on ``fortran_order``) bytes of the array. +Consumers can figure out the number of bytes by multiplying the number +of elements given by the shape (noting that ``shape=()`` means there is +1 element) by ``dtype.itemsize``. + +Format Version 2.0 +------------------ + +The version 1.0 format only allowed the array header to have a total size of +65535 bytes. This can be exceeded by structured arrays with a large number of +columns. The version 2.0 format extends the header size to 4 GiB. +`numpy.save` will automatically save in 2.0 format if the data requires it, +else it will always use the more compatible 1.0 format. + +The description of the fourth element of the header therefore has become: +"The next 4 bytes form a little-endian unsigned int: the length of the header +data HEADER_LEN." + +Format Version 3.0 +------------------ + +This version replaces the ASCII string (which in practice was latin1) with +a utf8-encoded string, so supports structured types with any unicode field +names. + +Notes +----- +The ``.npy`` format, including motivation for creating it and a comparison of +alternatives, is described in the +:doc:`"npy-format" NEP `, however details have +evolved with time and this document is more current. + +""" +import numpy +import io +import warnings +from numpy.lib.utils import safe_eval +from numpy.compat import ( + isfileobj, os_fspath, pickle + ) + + +__all__ = [] + + +EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} +MAGIC_PREFIX = b'\x93NUMPY' +MAGIC_LEN = len(MAGIC_PREFIX) + 2 +ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes + +# difference between version 1.0 and 2.0 is a 4 byte (I) header length +# instead of 2 bytes (H) allowing storage of large structured arrays +_header_size_info = { + (1, 0): (' 255: + raise ValueError("major version must be 0 <= major < 256") + if minor < 0 or minor > 255: + raise ValueError("minor version must be 0 <= minor < 256") + return MAGIC_PREFIX + bytes([major, minor]) + +def read_magic(fp): + """ Read the magic string to get the version of the file format. + + Parameters + ---------- + fp : filelike object + + Returns + ------- + major : int + minor : int + """ + magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") + if magic_str[:-2] != MAGIC_PREFIX: + msg = "the magic string is not correct; expected %r, got %r" + raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) + major, minor = magic_str[-2:] + return major, minor + +def _has_metadata(dt): + if dt.metadata is not None: + return True + elif dt.names is not None: + return any(_has_metadata(dt[k]) for k in dt.names) + elif dt.subdtype is not None: + return _has_metadata(dt.base) + else: + return False + +def dtype_to_descr(dtype): + """ + Get a serializable descriptor from the dtype. + + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. + + """ + if _has_metadata(dtype): + warnings.warn("metadata on a dtype may be saved or ignored, but will " + "raise if saved when read. Use another form of storage.", + UserWarning, stacklevel=2) + if dtype.names is not None: + # This is a record array. The .descr is fine. XXX: parts of the + # record array with an empty name, like padding bytes, still get + # fiddled with. This needs to be fixed in the C implementation of + # dtype(). + return dtype.descr + else: + return dtype.str + +def descr_to_dtype(descr): + """ + Returns a dtype based off the given description. + + This is essentially the reverse of `dtype_to_descr()`. It will remove + the valueless padding fields created by, i.e. simple fields like + dtype('float32'), and then convert the description to its corresponding + dtype. + + Parameters + ---------- + descr : object + The object retreived by dtype.descr. Can be passed to + `numpy.dtype()` in order to replicate the input dtype. + + Returns + ------- + dtype : dtype + The dtype constructed by the description. + + """ + if isinstance(descr, str): + # No padding removal needed + return numpy.dtype(descr) + elif isinstance(descr, tuple): + # subtype, will always have a shape descr[1] + dt = descr_to_dtype(descr[0]) + return numpy.dtype((dt, descr[1])) + + titles = [] + names = [] + formats = [] + offsets = [] + offset = 0 + for field in descr: + if len(field) == 2: + name, descr_str = field + dt = descr_to_dtype(descr_str) + else: + name, descr_str, shape = field + dt = numpy.dtype((descr_to_dtype(descr_str), shape)) + + # Ignore padding bytes, which will be void bytes with '' as name + # Once support for blank names is removed, only "if name == ''" needed) + is_pad = (name == '' and dt.type is numpy.void and dt.names is None) + if not is_pad: + title, name = name if isinstance(name, tuple) else (None, name) + titles.append(title) + names.append(name) + formats.append(dt) + offsets.append(offset) + offset += dt.itemsize + + return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, + 'offsets': offsets, 'itemsize': offset}) + +def header_data_from_array_1_0(array): + """ Get the dictionary of header metadata from a numpy.ndarray. + + Parameters + ---------- + array : numpy.ndarray + + Returns + ------- + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + """ + d = {'shape': array.shape} + if array.flags.c_contiguous: + d['fortran_order'] = False + elif array.flags.f_contiguous: + d['fortran_order'] = True + else: + # Totally non-contiguous data. We will have to make it C-contiguous + # before writing. Note that we need to test for C_CONTIGUOUS first + # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. + d['fortran_order'] = False + + d['descr'] = dtype_to_descr(array.dtype) + return d + + +def _wrap_header(header, version): + """ + Takes a stringified header, and attaches the prefix and padding to it + """ + import struct + assert version is not None + fmt, encoding = _header_size_info[version] + if not isinstance(header, bytes): # always true on python 3 + header = header.encode(encoding) + hlen = len(header) + 1 + padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) + try: + header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) + except struct.error: + msg = "Header length {} too big for version={}".format(hlen, version) + raise ValueError(msg) from None + + # Pad the header with spaces and a final newline such that the magic + # string, the header-length short and the header are aligned on a + # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes + # aligned up to ARRAY_ALIGN on systems like Linux where mmap() + # offset must be page-aligned (i.e. the beginning of the file). + return header_prefix + header + b' '*padlen + b'\n' + + +def _wrap_header_guess_version(header): + """ + Like `_wrap_header`, but chooses an appropriate version given the contents + """ + try: + return _wrap_header(header, (1, 0)) + except ValueError: + pass + + try: + ret = _wrap_header(header, (2, 0)) + except UnicodeEncodeError: + pass + else: + warnings.warn("Stored array in format 2.0. It can only be" + "read by NumPy >= 1.9", UserWarning, stacklevel=2) + return ret + + header = _wrap_header(header, (3, 0)) + warnings.warn("Stored array in format 3.0. It can only be " + "read by NumPy >= 1.17", UserWarning, stacklevel=2) + return header + + +def _write_array_header(fp, d, version=None): + """ Write the header for an array and returns the version used + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + version: tuple or None + None means use oldest that works + explicit version will raise a ValueError if the format does not + allow saving this data. Default: None + """ + header = ["{"] + for key, value in sorted(d.items()): + # Need to use repr here, since we eval these when reading + header.append("'%s': %s, " % (key, repr(value))) + header.append("}") + header = "".join(header) + if version is None: + header = _wrap_header_guess_version(header) + else: + header = _wrap_header(header, version) + fp.write(header) + +def write_array_header_1_0(fp, d): + """ Write the header for an array using the 1.0 format. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (1, 0)) + + +def write_array_header_2_0(fp, d): + """ Write the header for an array using the 2.0 format. + The 2.0 format allows storing very large structured arrays. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (2, 0)) + +def read_array_header_1_0(fp): + """ + Read an array header from a filelike object using the 1.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header(fp, version=(1, 0)) + +def read_array_header_2_0(fp): + """ + Read an array header from a filelike object using the 2.0 file format + version. + + This will leave the file object located just after the header. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header(fp, version=(2, 0)) + + +def _filter_header(s): + """Clean up 'L' in npz header ints. + + Cleans up the 'L' in strings representing integers. Needed to allow npz + headers produced in Python2 to be read in Python3. + + Parameters + ---------- + s : string + Npy file header. + + Returns + ------- + header : str + Cleaned up header. + + """ + import tokenize + from io import StringIO + + tokens = [] + last_token_was_number = False + for token in tokenize.generate_tokens(StringIO(s).readline): + token_type = token[0] + token_string = token[1] + if (last_token_was_number and + token_type == tokenize.NAME and + token_string == "L"): + continue + else: + tokens.append(token) + last_token_was_number = (token_type == tokenize.NUMBER) + return tokenize.untokenize(tokens) + + +def _read_array_header(fp, version): + """ + see read_array_header_1_0 + """ + # Read an unsigned, little-endian short int which has the length of the + # header. + import struct + hinfo = _header_size_info.get(version) + if hinfo is None: + raise ValueError("Invalid version {!r}".format(version)) + hlength_type, encoding = hinfo + + hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") + header_length = struct.unpack(hlength_type, hlength_str)[0] + header = _read_bytes(fp, header_length, "array header") + header = header.decode(encoding) + + # The header is a pretty-printed string representation of a literal + # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte + # boundary. The keys are strings. + # "shape" : tuple of int + # "fortran_order" : bool + # "descr" : dtype.descr + # Versions (2, 0) and (1, 0) could have been created by a Python 2 + # implementation before header filtering was implemented. + if version <= (2, 0): + header = _filter_header(header) + try: + d = safe_eval(header) + except SyntaxError as e: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e + if not isinstance(d, dict): + msg = "Header is not a dictionary: {!r}" + raise ValueError(msg.format(d)) + + if EXPECTED_KEYS != d.keys(): + keys = sorted(d.keys()) + msg = "Header does not contain the correct keys: {!r}" + raise ValueError(msg.format(d.keys())) + + # Sanity-check the values. + if (not isinstance(d['shape'], tuple) or + not all(isinstance(x, int) for x in d['shape'])): + msg = "shape is not valid: {!r}" + raise ValueError(msg.format(d['shape'])) + if not isinstance(d['fortran_order'], bool): + msg = "fortran_order is not a valid bool: {!r}" + raise ValueError(msg.format(d['fortran_order'])) + try: + dtype = descr_to_dtype(d['descr']) + except TypeError as e: + msg = "descr is not a valid dtype descriptor: {!r}" + raise ValueError(msg.format(d['descr'])) from e + + return d['shape'], d['fortran_order'], dtype + +def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): + """ + Write an array to an NPY file, including a header. + + If the array is neither C-contiguous nor Fortran-contiguous AND the + file_like object is not a real file object, this function will have to + copy data in memory. + + Parameters + ---------- + fp : file_like object + An open, writable file object, or similar object with a + ``.write()`` method. + array : ndarray + The array to write to disk. + version : (int, int) or None, optional + The version number of the format. None means use the oldest + supported version that is able to store the data. Default: None + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: True + pickle_kwargs : dict, optional + Additional keyword arguments to pass to pickle.dump, excluding + 'protocol'. These are only useful when pickling objects in object + arrays on Python 3 to Python 2 compatible format. + + Raises + ------ + ValueError + If the array cannot be persisted. This includes the case of + allow_pickle=False and array being an object array. + Various other errors + If the array contains Python objects as part of its dtype, the + process of pickling them may raise various errors if the objects + are not picklable. + + """ + _check_version(version) + _write_array_header(fp, header_data_from_array_1_0(array), version) + + if array.itemsize == 0: + buffersize = 0 + else: + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + + if array.dtype.hasobject: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out + if not allow_pickle: + raise ValueError("Object arrays cannot be saved when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + pickle.dump(array, fp, protocol=3, **pickle_kwargs) + elif array.flags.f_contiguous and not array.flags.c_contiguous: + if isfileobj(fp): + array.T.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='F'): + fp.write(chunk.tobytes('C')) + else: + if isfileobj(fp): + array.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) + + +def read_array(fp, allow_pickle=False, pickle_kwargs=None): + """ + Read an array from an NPY file. + + Parameters + ---------- + fp : file_like object + If this is not a real file object, then this may take extra memory + and time. + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: False + + .. versionchanged:: 1.16.3 + Made default False in response to CVE-2019-6446. + + pickle_kwargs : dict + Additional keyword arguments to pass to pickle.load. These are only + useful when loading object arrays saved on Python 2 when using + Python 3. + + Returns + ------- + array : ndarray + The array from the data on disk. + + Raises + ------ + ValueError + If the data is invalid, or allow_pickle=False and the file contains + an object array. + + """ + version = read_magic(fp) + _check_version(version) + shape, fortran_order, dtype = _read_array_header(fp, version) + if len(shape) == 0: + count = 1 + else: + count = numpy.multiply.reduce(shape, dtype=numpy.int64) + + # Now read the actual data. + if dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + if not allow_pickle: + raise ValueError("Object arrays cannot be loaded when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + try: + array = pickle.load(fp, **pickle_kwargs) + except UnicodeError as err: + # Friendlier error message + raise UnicodeError("Unpickling a python object failed: %r\n" + "You may need to pass the encoding= option " + "to numpy.load" % (err,)) from err + else: + if isfileobj(fp): + # We can use the fast fromfile() function. + array = numpy.fromfile(fp, dtype=dtype, count=count) + else: + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + + # Use np.ndarray instead of np.empty since the latter does + # not correctly instantiate zero-width string dtypes; see + # https://github.com/numpy/numpy/pull/6430 + array = numpy.ndarray(count, dtype=dtype) + + if dtype.itemsize > 0: + # If dtype.itemsize == 0 then there's nothing more to read + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) + + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * dtype.itemsize) + data = _read_bytes(fp, read_size, "array data") + array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, + count=read_count) + + if fortran_order: + array.shape = shape[::-1] + array = array.transpose() + else: + array.shape = shape + + return array + + +def open_memmap(filename, mode='r+', dtype=None, shape=None, + fortran_order=False, version=None): + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. + + Parameters + ---------- + filename : str or path-like + The name of the file on disk. This may *not* be a file-like + object. + mode : str, optional + The mode in which to open the file; the default is 'r+'. In + addition to the standard file modes, 'c' is also accepted to mean + "copy on write." See `memmap` for the available mode strings. + dtype : data-type, optional + The data type of the array if we are creating a new file in "write" + mode, if not, `dtype` is ignored. The default value is None, which + results in a data-type of `float64`. + shape : tuple of int + The shape of the array if we are creating a new file in "write" + mode, in which case this parameter is required. Otherwise, this + parameter is ignored and is thus optional. + fortran_order : bool, optional + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False, the default) if we are creating a new file in + "write" mode. + version : tuple of int (major, minor) or None + If the mode is a "write" mode, then this is the version of the file + format used to create the file. None means use the oldest + supported version that is able to store the data. Default: None + + Returns + ------- + marray : memmap + The memory-mapped array. + + Raises + ------ + ValueError + If the data or the mode is invalid. + IOError + If the file is not found or cannot be opened correctly. + + See Also + -------- + numpy.memmap + + """ + if isfileobj(filename): + raise ValueError("Filename must be a string or a path-like object." + " Memmap cannot use existing file handles.") + + if 'w' in mode: + # We are creating the file, not reading it. + # Check if we ought to create the file. + _check_version(version) + # Ensure that the given dtype is an authentic dtype object rather + # than just something that can be interpreted as a dtype object. + dtype = numpy.dtype(dtype) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + d = dict( + descr=dtype_to_descr(dtype), + fortran_order=fortran_order, + shape=shape, + ) + # If we got here, then it should be safe to create the file. + with open(os_fspath(filename), mode+'b') as fp: + _write_array_header(fp, d, version) + offset = fp.tell() + else: + # Read the header of the file first. + with open(os_fspath(filename), 'rb') as fp: + version = read_magic(fp) + _check_version(version) + + shape, fortran_order, dtype = _read_array_header(fp, version) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + offset = fp.tell() + + if fortran_order: + order = 'F' + else: + order = 'C' + + # We need to change a write-only mode to a read-write mode since we've + # already written data to the file. + if mode == 'w+': + mode = 'r+' + + marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, + mode=mode, offset=offset) + + return marray + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = bytes() + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except io.BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data diff --git a/MLPY/Lib/site-packages/numpy/lib/format.pyi b/MLPY/Lib/site-packages/numpy/lib/format.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9e55235cebbd40dc803355526ac98c9b06e48bd7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/format.pyi @@ -0,0 +1,28 @@ +import sys +from typing import Any, List, Set + +if sys.version_info >= (3, 8): + from typing import Literal, Final +else: + from typing_extensions import Literal, Final + +__all__: List[str] + +EXPECTED_KEYS: Final[Set[str]] +MAGIC_PREFIX: Final[bytes] +MAGIC_LEN: Literal[8] +ARRAY_ALIGN: Literal[64] +BUFFER_SIZE: Literal[262144] # 2**18 + +def magic(major, minor): ... +def read_magic(fp): ... +def dtype_to_descr(dtype): ... +def descr_to_dtype(descr): ... +def header_data_from_array_1_0(array): ... +def write_array_header_1_0(fp, d): ... +def write_array_header_2_0(fp, d): ... +def read_array_header_1_0(fp): ... +def read_array_header_2_0(fp): ... +def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... +def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... +def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/function_base.py b/MLPY/Lib/site-packages/numpy/lib/function_base.py new file mode 100644 index 0000000000000000000000000000000000000000..01736e67c0fedf55ca911b4cd17a3486ca4d9028 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/function_base.py @@ -0,0 +1,4932 @@ +import collections.abc +import functools +import re +import sys +import warnings + +import numpy as np +import numpy.core.numeric as _nx +from numpy.core import transpose +from numpy.core.numeric import ( + ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty, + ndarray, around, floor, ceil, take, dot, where, intp, + integer, isscalar, absolute + ) +from numpy.core.umath import ( + pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, + mod, exp, not_equal, subtract + ) +from numpy.core.fromnumeric import ( + ravel, nonzero, partition, mean, any, sum + ) +from numpy.core.numerictypes import typecodes +from numpy.core.overrides import set_module +from numpy.core import overrides +from numpy.core.function_base import add_newdoc +from numpy.lib.twodim_base import diag +from numpy.core.multiarray import ( + _insert, add_docstring, bincount, normalize_axis_index, _monotonicity, + interp as compiled_interp, interp_complex as compiled_interp_complex + ) +from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc + +import builtins + +# needed in this module for compatibility +from numpy.lib.histograms import histogram, histogramdd + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', + 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', + 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', + 'bincount', 'digitize', 'cov', 'corrcoef', + 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', + 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', + 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc', + 'quantile' + ] + + +def _rot90_dispatcher(m, k=None, axes=None): + return (m,) + + +@array_function_dispatch(_rot90_dispatcher) +def rot90(m, k=1, axes=(0, 1)): + """ + Rotate an array by 90 degrees in the plane specified by axes. + + Rotation direction is from the first towards the second axis. + + Parameters + ---------- + m : array_like + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + axes: (2,) array_like + The array is rotated in the plane defined by the axes. + Axes must be different. + + .. versionadded:: 1.12.0 + + Returns + ------- + y : ndarray + A rotated view of `m`. + + See Also + -------- + flip : Reverse the order of elements in an array along the given axis. + fliplr : Flip an array horizontally. + flipud : Flip an array vertically. + + Notes + ----- + rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) + rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) + + Examples + -------- + >>> m = np.array([[1,2],[3,4]], int) + >>> m + array([[1, 2], + [3, 4]]) + >>> np.rot90(m) + array([[2, 4], + [1, 3]]) + >>> np.rot90(m, 2) + array([[4, 3], + [2, 1]]) + >>> m = np.arange(8).reshape((2,2,2)) + >>> np.rot90(m, 1, (1,2)) + array([[[1, 3], + [0, 2]], + [[5, 7], + [4, 6]]]) + + """ + axes = tuple(axes) + if len(axes) != 2: + raise ValueError("len(axes) must be 2.") + + m = asanyarray(m) + + if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: + raise ValueError("Axes must be different.") + + if (axes[0] >= m.ndim or axes[0] < -m.ndim + or axes[1] >= m.ndim or axes[1] < -m.ndim): + raise ValueError("Axes={} out of range for array of ndim={}." + .format(axes, m.ndim)) + + k %= 4 + + if k == 0: + return m[:] + if k == 2: + return flip(flip(m, axes[0]), axes[1]) + + axes_list = arange(0, m.ndim) + (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], + axes_list[axes[0]]) + + if k == 1: + return transpose(flip(m, axes[1]), axes_list) + else: + # k == 3 + return flip(transpose(m, axes_list), axes[1]) + + +def _flip_dispatcher(m, axis=None): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def flip(m, axis=None): + """ + Reverse the order of elements in an array along the given axis. + + The shape of the array is preserved, but the elements are reordered. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + m : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes along which to flip over. The default, + axis=None, will flip over all of the axes of the input array. + If axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + + .. versionchanged:: 1.15.0 + None and tuples of axes are supported + + Returns + ------- + out : array_like + A view of `m` with the entries of axis reversed. Since a view is + returned, this operation is done in constant time. + + See Also + -------- + flipud : Flip an array vertically (axis=0). + fliplr : Flip an array horizontally (axis=1). + + Notes + ----- + flip(m, 0) is equivalent to flipud(m). + + flip(m, 1) is equivalent to fliplr(m). + + flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. + + flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all + positions. + + flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at + position 0 and position 1. + + Examples + -------- + >>> A = np.arange(8).reshape((2,2,2)) + >>> A + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.flip(A, 0) + array([[[4, 5], + [6, 7]], + [[0, 1], + [2, 3]]]) + >>> np.flip(A, 1) + array([[[2, 3], + [0, 1]], + [[6, 7], + [4, 5]]]) + >>> np.flip(A) + array([[[7, 6], + [5, 4]], + [[3, 2], + [1, 0]]]) + >>> np.flip(A, (0, 2)) + array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) + >>> A = np.random.randn(3,4,5) + >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) + True + """ + if not hasattr(m, 'ndim'): + m = asarray(m) + if axis is None: + indexer = (np.s_[::-1],) * m.ndim + else: + axis = _nx.normalize_axis_tuple(axis, m.ndim) + indexer = [np.s_[:]] * m.ndim + for ax in axis: + indexer[ax] = np.s_[::-1] + indexer = tuple(indexer) + return m[indexer] + + +@set_module('numpy') +def iterable(y): + """ + Check whether or not an object can be iterated over. + + Parameters + ---------- + y : object + Input object. + + Returns + ------- + b : bool + Return ``True`` if the object has an iterator method or is a + sequence and ``False`` otherwise. + + + Examples + -------- + >>> np.iterable([1, 2, 3]) + True + >>> np.iterable(2) + False + + """ + try: + iter(y) + except TypeError: + return False + return True + + +def _average_dispatcher(a, axis=None, weights=None, returned=None): + return (a, weights) + + +@array_function_dispatch(_average_dispatcher) +def average(a, axis=None, weights=None, returned=False): + """ + Compute the weighted average along the specified axis. + + Parameters + ---------- + a : array_like + Array containing data to be averaged. If `a` is not an array, a + conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + axis=None, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. The 1-D calculation is:: + + avg = sum(a * weights) / sum(weights) + + The only constraint on `weights` is that `sum(weights)` must not be 0. + returned : bool, optional + Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) + is returned, otherwise only the average is returned. + If `weights=None`, `sum_of_weights` is equivalent to the number of + elements over which the average is taken. + + Returns + ------- + retval, [sum_of_weights] : array_type or double + Return the average along the specified axis. When `returned` is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. `sum_of_weights` is of the + same type as `retval`. The result dtype follows a genereal pattern. + If `weights` is None, the result dtype will be that of `a` , or ``float64`` + if `a` is integral. Otherwise, if `weights` is not None and `a` is non- + integral, the result type will be the type of lowest precision capable of + representing values of both `a` and `weights`. If `a` happens to be + integral, the previous rules still applies but the result dtype will + at least be ``float64``. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When the length of 1D `weights` is not the same as the shape of `a` + along axis. + + See Also + -------- + mean + + ma.average : average for masked arrays -- useful if your data contains + "missing" values + numpy.result_type : Returns the type that results from applying the + numpy type promotion rules to the arguments. + + Examples + -------- + >>> data = np.arange(1, 5) + >>> data + array([1, 2, 3, 4]) + >>> np.average(data) + 2.5 + >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) + 4.0 + + >>> data = np.arange(6).reshape((3,2)) + >>> data + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.average(data, axis=1, weights=[1./4, 3./4]) + array([0.75, 2.75, 4.75]) + >>> np.average(data, weights=[1./4, 3./4]) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of a and weights differ. + + >>> a = np.ones(5, dtype=np.float128) + >>> w = np.ones(5, dtype=np.complex64) + >>> avg = np.average(a, weights=w) + >>> print(avg.dtype) + complex256 + """ + a = np.asanyarray(a) + + if weights is None: + avg = a.mean(axis) + scl = avg.dtype.type(a.size/avg.size) + else: + wgt = np.asanyarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool_)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ.") + if wgt.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis.") + + # setup wgt to broadcast along axis + wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) + wgt = wgt.swapaxes(-1, axis) + + scl = wgt.sum(axis=axis, dtype=result_dtype) + if np.any(scl == 0.0): + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized") + + avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl + + if returned: + if scl.shape != avg.shape: + scl = np.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg + + +@set_module('numpy') +def asarray_chkfinite(a, dtype=None, order=None): + """Convert the input to an array, checking for NaNs or Infs. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. Success requires no NaNs or Infs. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + Raises + ------ + ValueError + Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). + + See Also + -------- + asarray : Create and array. + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array. If all elements are finite + ``asarray_chkfinite`` is identical to ``asarray``. + + >>> a = [1, 2] + >>> np.asarray_chkfinite(a, dtype=float) + array([1., 2.]) + + Raises ValueError if array_like contains Nans or Infs. + + >>> a = [1, 2, np.inf] + >>> try: + ... np.asarray_chkfinite(a) + ... except ValueError: + ... print('ValueError') + ... + ValueError + + """ + a = asarray(a, dtype=dtype, order=order) + if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): + raise ValueError( + "array must not contain infs or NaNs") + return a + + +def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): + yield x + # support the undocumented behavior of allowing scalars + if np.iterable(condlist): + yield from condlist + + +@array_function_dispatch(_piecewise_dispatcher) +def piecewise(x, condlist, funclist, *args, **kw): + """ + Evaluate a piecewise-defined function. + + Given a set of conditions and corresponding functions, evaluate each + function on the input data wherever its condition is true. + + Parameters + ---------- + x : ndarray or scalar + The input domain. + condlist : list of bool arrays or bool scalars + Each boolean array corresponds to a function in `funclist`. Wherever + `condlist[i]` is True, `funclist[i](x)` is used as the output value. + + Each boolean array in `condlist` selects a piece of `x`, + and should therefore be of the same shape as `x`. + + The length of `condlist` must correspond to that of `funclist`. + If one extra function is given, i.e. if + ``len(funclist) == len(condlist) + 1``, then that extra function + is the default value, used wherever all conditions are false. + funclist : list of callables, f(x,*args,**kw), or scalars + Each function is evaluated over `x` wherever its corresponding + condition is True. It should take a 1d array as input and give an 1d + array or a scalar value as output. If, instead of a callable, + a scalar is provided then a constant function (``lambda x: scalar``) is + assumed. + args : tuple, optional + Any further arguments given to `piecewise` are passed to the functions + upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then + each function is called as ``f(x, 1, 'a')``. + kw : dict, optional + Keyword arguments used in calling `piecewise` are passed to the + functions upon execution, i.e., if called + ``piecewise(..., ..., alpha=1)``, then each function is called as + ``f(x, alpha=1)``. + + Returns + ------- + out : ndarray + The output is the same shape and type as x and is found by + calling the functions in `funclist` on the appropriate portions of `x`, + as defined by the boolean arrays in `condlist`. Portions not covered + by any condition have a default value of 0. + + + See Also + -------- + choose, select, where + + Notes + ----- + This is similar to choose or select, except that functions are + evaluated on elements of `x` that satisfy the corresponding condition from + `condlist`. + + The result is:: + + |-- + |funclist[0](x[condlist[0]]) + out = |funclist[1](x[condlist[1]]) + |... + |funclist[n2](x[condlist[n2]]) + |-- + + Examples + -------- + Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. + + >>> x = np.linspace(-2.5, 2.5, 6) + >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) + array([-1., -1., -1., 1., 1., 1.]) + + Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for + ``x >= 0``. + + >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + + Apply the same function to a scalar value. + + >>> y = -2 + >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) + array(2) + + """ + x = asanyarray(x) + n2 = len(funclist) + + # undocumented: single condition is promoted to a list of one condition + if isscalar(condlist) or ( + not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): + condlist = [condlist] + + condlist = asarray(condlist, dtype=bool) + n = len(condlist) + + if n == n2 - 1: # compute the "otherwise" condition. + condelse = ~np.any(condlist, axis=0, keepdims=True) + condlist = np.concatenate([condlist, condelse], axis=0) + n += 1 + elif n != n2: + raise ValueError( + "with {} condition(s), either {} or {} functions are expected" + .format(n, n, n+1) + ) + + y = zeros_like(x) + for cond, func in zip(condlist, funclist): + if not isinstance(func, collections.abc.Callable): + y[cond] = func + else: + vals = x[cond] + if vals.size > 0: + y[cond] = func(vals, *args, **kw) + + return y + + +def _select_dispatcher(condlist, choicelist, default=None): + yield from condlist + yield from choicelist + + +@array_function_dispatch(_select_dispatcher) +def select(condlist, choicelist, default=0): + """ + Return an array drawn from elements in choicelist, depending on conditions. + + Parameters + ---------- + condlist : list of bool ndarrays + The list of conditions which determine from which array in `choicelist` + the output elements are taken. When multiple conditions are satisfied, + the first one encountered in `condlist` is used. + choicelist : list of ndarrays + The list of arrays from which the output elements are taken. It has + to be of the same length as `condlist`. + default : scalar, optional + The element inserted in `output` when all conditions evaluate to False. + + Returns + ------- + output : ndarray + The output at position m is the m-th element of the array in + `choicelist` where the m-th element of the corresponding array in + `condlist` is True. + + See Also + -------- + where : Return elements from one of two arrays depending on condition. + take, choose, compress, diag, diagonal + + Examples + -------- + >>> x = np.arange(10) + >>> condlist = [x<3, x>5] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist) + array([ 0, 1, 2, ..., 49, 64, 81]) + + """ + # Check the size of condlist and choicelist are the same, or abort. + if len(condlist) != len(choicelist): + raise ValueError( + 'list of cases must be same length as list of conditions') + + # Now that the dtype is known, handle the deprecated select([], []) case + if len(condlist) == 0: + raise ValueError("select with an empty condition list is not possible") + + choicelist = [np.asarray(choice) for choice in choicelist] + + try: + intermediate_dtype = np.result_type(*choicelist) + except TypeError as e: + msg = f'Choicelist elements do not have a common dtype: {e}' + raise TypeError(msg) from None + default_array = np.asarray(default) + choicelist.append(default_array) + + # need to get the result type before broadcasting for correct scalar + # behaviour + try: + dtype = np.result_type(intermediate_dtype, default_array) + except TypeError as e: + msg = f'Choicelists and default value do not have a common dtype: {e}' + raise TypeError(msg) from None + + # Convert conditions to arrays and broadcast conditions and choices + # as the shape is needed for the result. Doing it separately optimizes + # for example when all choices are scalars. + condlist = np.broadcast_arrays(*condlist) + choicelist = np.broadcast_arrays(*choicelist) + + # If cond array is not an ndarray in boolean format or scalar bool, abort. + for i, cond in enumerate(condlist): + if cond.dtype.type is not np.bool_: + raise TypeError( + 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) + + if choicelist[0].ndim == 0: + # This may be common, so avoid the call. + result_shape = condlist[0].shape + else: + result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape + + result = np.full(result_shape, choicelist[-1], dtype) + + # Use np.copyto to burn each choicelist array onto result, using the + # corresponding condlist as a boolean mask. This is done in reverse + # order since the first choice should take precedence. + choicelist = choicelist[-2::-1] + condlist = condlist[::-1] + for choice, cond in zip(choicelist, condlist): + np.copyto(result, choice, where=cond) + + return result + + +def _copy_dispatcher(a, order=None, subok=None): + return (a,) + + +@array_function_dispatch(_copy_dispatcher) +def copy(a, order='K', subok=False): + """ + Return an array copy of the given object. + + Parameters + ---------- + a : array_like + Input data. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :meth:`ndarray.copy` are very + similar, but have different default values for their order= + arguments.) + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise the + returned array will be forced to be a base-class array (defaults to False). + + .. versionadded:: 1.19.0 + + Returns + ------- + arr : ndarray + Array interpretation of `a`. + + See Also + -------- + ndarray.copy : Preferred method for creating an array copy + + Notes + ----- + This is equivalent to: + + >>> np.array(a, copy=True) #doctest: +SKIP + + Examples + -------- + Create an array x, with a reference y and a copy z: + + >>> x = np.array([1, 2, 3]) + >>> y = x + >>> z = np.copy(x) + + Note that, when we modify x, y changes, but not z: + + >>> x[0] = 10 + >>> x[0] == y[0] + True + >>> x[0] == z[0] + False + + Note that np.copy is a shallow copy and will not copy object + elements within arrays. This is mainly important for arrays + containing Python objects. The new array will contain the + same object which may lead to surprises if that object can + be modified (is mutable): + + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> b = np.copy(a) + >>> b[2][0] = 10 + >>> a + array([1, 'm', list([10, 3, 4])], dtype=object) + + To ensure all elements within an ``object`` array are copied, + use `copy.deepcopy`: + + >>> import copy + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> c = copy.deepcopy(a) + >>> c[2][0] = 10 + >>> c + array([1, 'm', list([10, 3, 4])], dtype=object) + >>> a + array([1, 'm', list([2, 3, 4])], dtype=object) + + """ + return array(a, order=order, subok=subok, copy=True) + +# Basic operations + + +def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None): + yield f + yield from varargs + + +@array_function_dispatch(_gradient_dispatcher) +def gradient(f, *varargs, axis=None, edge_order=1): + """ + Return the gradient of an N-dimensional array. + + The gradient is computed using second order accurate central differences + in the interior points and either first or second order accurate one-sides + (forward or backwards) differences at the boundaries. + The returned gradient hence has the same shape as the input array. + + Parameters + ---------- + f : array_like + An N-dimensional array containing samples of a scalar function. + varargs : list of scalar or array, optional + Spacing between f values. Default unitary spacing for all dimensions. + Spacing can be specified using: + + 1. single scalar to specify a sample distance for all dimensions. + 2. N scalars to specify a constant sample distance for each dimension. + i.e. `dx`, `dy`, `dz`, ... + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of + the corresponding dimension + 4. Any combination of N scalars/arrays with the meaning of 2. and 3. + + If `axis` is given, the number of varargs must equal the number of axes. + Default: 1. + + edge_order : {1, 2}, optional + Gradient is calculated using N-th order accurate differences + at the boundaries. Default: 1. + + .. versionadded:: 1.9.1 + + axis : None or int or tuple of ints, optional + Gradient is calculated only along the given axis or axes + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from + the last to the first axis. + + .. versionadded:: 1.11.0 + + Returns + ------- + gradient : ndarray or list of ndarray + A list of ndarrays (or a single ndarray if there is only one dimension) + corresponding to the derivatives of f with respect to each dimension. + Each derivative has the same shape as f. + + Examples + -------- + >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) + >>> np.gradient(f) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + >>> np.gradient(f, 2) + array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + Spacing can be also specified with an array that represents the coordinates + of the values F along the dimensions. + For instance a uniform spacing: + + >>> x = np.arange(f.size) + >>> np.gradient(f, x) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + + Or a non uniform one: + + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) + >>> np.gradient(f, x) + array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) + + For two dimensional arrays, the return will be two arrays ordered by + axis. In this example the first array stands for the gradient in + rows and the second one in columns direction: + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) + [array([[ 2., 2., -1.], + [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])] + + In this example the spacing is also specified: + uniform for axis=0 and non uniform for axis=1 + + >>> dx = 2. + >>> y = [1., 1.5, 3.5] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) + [array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])] + + It is possible to specify how boundaries are treated using `edge_order` + + >>> x = np.array([0, 1, 2, 3, 4]) + >>> f = x**2 + >>> np.gradient(f, edge_order=1) + array([1., 2., 4., 6., 7.]) + >>> np.gradient(f, edge_order=2) + array([0., 2., 4., 6., 8.]) + + The `axis` keyword can be used to specify a subset of axes of which the + gradient is calculated + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) + array([[ 2., 2., -1.], + [ 2., 2., -1.]]) + + Notes + ----- + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous + derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we + minimize the "consistency error" :math:`\\eta_{i}` between the true gradient + and its estimate from a linear combination of the neighboring grid-points: + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + + \\beta f\\left(x_{i} + h_{d}\\right) + + \\gamma f\\left(x_{i}-h_{s}\\right) + \\right] + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving + the following the linear system: + + .. math:: + + \\left\\{ + \\begin{array}{r} + \\alpha+\\beta+\\gamma=0 \\\\ + \\beta h_{d}-\\gamma h_{s}=1 \\\\ + \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 + \\end{array} + \\right. + + The resulting approximation of :math:`f_{i}^{(1)}` is the following: + + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} + { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + + h_{s}}\\right) + + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) + we find the standard second order approximation: + + .. math:: + + \\hat f_{i}^{(1)}= + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + + \\mathcal{O}\\left(h^{2}\\right) + + With a similar procedure the forward/backward approximations used for + boundaries can be derived. + + References + ---------- + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + (Texts in Applied Mathematics). New York: Springer. + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + in Geophysical Fluid Dynamics. New York: Springer. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. + `PDF `_. + """ + f = np.asanyarray(f) + N = f.ndim # number of dimensions + + if axis is None: + axes = tuple(range(N)) + else: + axes = _nx.normalize_axis_tuple(axis, N) + + len_axes = len(axes) + n = len(varargs) + if n == 0: + # no spacing argument - use 1 in all axes + dx = [1.0] * len_axes + elif n == 1 and np.ndim(varargs[0]) == 0: + # single scalar for all axes + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = list(varargs) + for i, distances in enumerate(dx): + distances = np.asanyarray(distances) + if distances.ndim == 0: + continue + elif distances.ndim != 1: + raise ValueError("distances must be either scalars or 1d") + if len(distances) != f.shape[axes[i]]: + raise ValueError("when 1d, distances must match " + "the length of the corresponding dimension") + if np.issubdtype(distances.dtype, np.integer): + # Convert numpy integer types to float64 to avoid modular + # arithmetic in np.diff(distances). + distances = distances.astype(np.float64) + diffx = np.diff(distances) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + else: + raise TypeError("invalid number of arguments") + + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)]*N + slice2 = [slice(None)]*N + slice3 = [slice(None)]*N + slice4 = [slice(None)]*N + + otype = f.dtype + if otype.type is np.datetime64: + # the timedelta dtype with the same unit information + otype = np.dtype(otype.name.replace('datetime', 'timedelta')) + # view as timedelta to allow addition + f = f.view(otype) + elif otype.type is np.timedelta64: + pass + elif np.issubdtype(otype, np.inexact): + pass + else: + # All other types convert to floating point. + # First check if f is a numpy integer type; if so, convert f to float64 + # to avoid modular arithmetic when computing the changes in f. + if np.issubdtype(otype, np.integer): + f = f.astype(np.float64) + otype = np.float64 + + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least (edge_order + 1) elements are required.") + # result allocation + out = np.empty_like(f, dtype=otype) + + # spacing for the current axis + uniform_spacing = np.ndim(ax_dx) == 0 + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) + else: + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] + a = -(dx2)/(dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = np.ones(N, dtype=int) + shape[axis] = -1 + a.shape = b.shape = c.shape = shape + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + # Numerical differentiation: 1st order edges + if edge_order == 1: + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n + + # Numerical differentiation: 2nd order edges + else: + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + if uniform_spacing: + a = -1.5 / ax_dx + b = 2. / ax_dx + c = -0.5 / ax_dx + else: + dx1 = ax_dx[0] + dx2 = ax_dx[1] + a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = - dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + slice1[axis] = -1 + slice2[axis] = -3 + slice3[axis] = -2 + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / ax_dx + b = -2. / ax_dx + c = 1.5 / ax_dx + else: + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = - (dx2 + dx1) / (dx1 * dx2) + c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + outvals.append(out) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if len_axes == 1: + return outvals[0] + else: + return outvals + + +def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): + return (a, prepend, append) + + +@array_function_dispatch(_diff_dispatcher) +def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + .. versionadded:: 1.16.0 + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + gradient, ediff1d, cumsum + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.diff(u8_arr) + array([255], dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + 255 + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.diff(i16_arr) + array([-1], dtype=int16) + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + >>> np.diff(x) + array([1, 1], dtype='timedelta64[D]') + + """ + if n == 0: + return a + if n < 0: + raise ValueError( + "order must be non-negative but got " + repr(n)) + + a = asanyarray(a) + nd = a.ndim + if nd == 0: + raise ValueError("diff requires input that is at least one dimensional") + axis = normalize_axis_index(axis, nd) + + combined = [] + if prepend is not np._NoValue: + prepend = np.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.concatenate(combined, axis) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + + op = not_equal if a.dtype == np.bool_ else subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + + return a + + +def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): + return (x, xp, fp) + + +@array_function_dispatch(_interp_dispatcher) +def interp(x, xp, fp, left=None, right=None, period=None): + """ + One-dimensional linear interpolation for monotonically increasing sample points. + + Returns the one-dimensional piecewise linear interpolant to a function + with given discrete data points (`xp`, `fp`), evaluated at `x`. + + Parameters + ---------- + x : array_like + The x-coordinates at which to evaluate the interpolated values. + + xp : 1-D sequence of floats + The x-coordinates of the data points, must be increasing if argument + `period` is not specified. Otherwise, `xp` is internally sorted after + normalizing the periodic boundaries with ``xp = xp % period``. + + fp : 1-D sequence of float or complex + The y-coordinates of the data points, same length as `xp`. + + left : optional float or complex corresponding to fp + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : optional float or complex corresponding to fp + Value to return for `x > xp[-1]`, default is `fp[-1]`. + + period : None or float, optional + A period for the x-coordinates. This parameter allows the proper + interpolation of angular x-coordinates. Parameters `left` and `right` + are ignored if `period` is specified. + + .. versionadded:: 1.10.0 + + Returns + ------- + y : float or complex (corresponding to fp) or ndarray + The interpolated values, same shape as `x`. + + Raises + ------ + ValueError + If `xp` and `fp` have different length + If `xp` or `fp` are not 1-D sequences + If `period == 0` + + See Also + -------- + scipy.interpolate + + Warnings + -------- + The x-coordinate sequence is expected to be increasing, but this is not + explicitly enforced. However, if the sequence `xp` is non-increasing, + interpolation results are meaningless. + + Note that, since NaN is unsortable, `xp` also cannot contain NaNs. + + A simple check for `xp` being strictly increasing is:: + + np.all(np.diff(xp) > 0) + + Examples + -------- + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> np.interp(2.5, xp, fp) + 1.0 + >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) + array([3. , 3. , 2.5 , 0.56, 0. ]) + >>> UNDEF = -99.0 + >>> np.interp(3.14, xp, fp, right=UNDEF) + -99.0 + + Plot an interpolant to the sine function: + + >>> x = np.linspace(0, 2*np.pi, 10) + >>> y = np.sin(x) + >>> xvals = np.linspace(0, 2*np.pi, 50) + >>> yinterp = np.interp(xvals, x, y) + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o') + [] + >>> plt.plot(xvals, yinterp, '-x') + [] + >>> plt.show() + + Interpolation with periodic x-coordinates: + + >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] + >>> xp = [190, -190, 350, -350] + >>> fp = [5, 10, 3, 4] + >>> np.interp(x, xp, fp, period=360) + array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) + + Complex interpolation: + + >>> x = [1.5, 4.0] + >>> xp = [2,3,5] + >>> fp = [1.0j, 0, 2+3j] + >>> np.interp(x, xp, fp) + array([0.+1.j , 1.+1.5j]) + + """ + + fp = np.asarray(fp) + + if np.iscomplexobj(fp): + interp_func = compiled_interp_complex + input_dtype = np.complex128 + else: + interp_func = compiled_interp + input_dtype = np.float64 + + if period is not None: + if period == 0: + raise ValueError("period must be a non-zero value") + period = abs(period) + left = None + right = None + + x = np.asarray(x, dtype=np.float64) + xp = np.asarray(xp, dtype=np.float64) + fp = np.asarray(fp, dtype=input_dtype) + + if xp.ndim != 1 or fp.ndim != 1: + raise ValueError("Data points must be 1-D sequences") + if xp.shape[0] != fp.shape[0]: + raise ValueError("fp and xp are not of the same length") + # normalizing periodic boundaries + x = x % period + xp = xp % period + asort_xp = np.argsort(xp) + xp = xp[asort_xp] + fp = fp[asort_xp] + xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) + fp = np.concatenate((fp[-1:], fp, fp[0:1])) + + return interp_func(x, xp, fp, left, right) + + +def _angle_dispatcher(z, deg=None): + return (z,) + + +@array_function_dispatch(_angle_dispatcher) +def angle(z, deg=False): + """ + Return the angle of the complex argument. + + Parameters + ---------- + z : array_like + A complex number or sequence of complex numbers. + deg : bool, optional + Return angle in degrees if True, radians if False (default). + + Returns + ------- + angle : ndarray or scalar + The counterclockwise angle from the positive real axis on the complex + plane in the range ``(-pi, pi]``, with dtype as numpy.float64. + + .. versionchanged:: 1.16.0 + This function works on subclasses of ndarray like `ma.array`. + + See Also + -------- + arctan2 + absolute + + Notes + ----- + Although the angle of the complex number 0 is undefined, ``numpy.angle(0)`` + returns the value 0. + + Examples + -------- + >>> np.angle([1.0, 1.0j, 1+1j]) # in radians + array([ 0. , 1.57079633, 0.78539816]) # may vary + >>> np.angle(1+1j, deg=True) # in degrees + 45.0 + + """ + z = asanyarray(z) + if issubclass(z.dtype.type, _nx.complexfloating): + zimag = z.imag + zreal = z.real + else: + zimag = 0 + zreal = z + + a = arctan2(zimag, zreal) + if deg: + a *= 180/pi + return a + + +def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): + return (p,) + + +@array_function_dispatch(_unwrap_dispatcher) +def unwrap(p, discont=None, axis=-1, *, period=2*pi): + r""" + Unwrap by taking the complement of large deltas with respect to the period. + + This unwraps a signal `p` by changing elements which have an absolute + difference from their predecessor of more than ``max(discont, period/2)`` + to their `period`-complementary values. + + For the default case where `period` is :math:`2\pi` and is `discont` is + :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences + are never greater than :math:`\pi` by adding :math:`2k\pi` for some + integer :math:`k`. + + Parameters + ---------- + p : array_like + Input array. + discont : float, optional + Maximum discontinuity between values, default is ``period/2``. + Values below ``period/2`` are treated as if they were ``period/2``. + To have an effect different from the default, `discont` should be + larger than ``period/2``. + axis : int, optional + Axis along which unwrap will operate, default is the last axis. + period: float, optional + Size of the range over which the input wraps. By default, it is + ``2 pi``. + + .. versionadded:: 1.21.0 + + Returns + ------- + out : ndarray + Output array. + + See Also + -------- + rad2deg, deg2rad + + Notes + ----- + If the discontinuity in `p` is smaller than ``period/2``, + but larger than `discont`, no unwrapping is done because taking + the complement would only make the discontinuity larger. + + Examples + -------- + >>> phase = np.linspace(0, np.pi, num=5) + >>> phase[3:] += np.pi + >>> phase + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary + >>> np.unwrap(phase) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary + >>> np.unwrap([0, 1, 2, -1, 0], period=4) + array([0, 1, 2, 3, 4]) + >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) + array([2, 3, 4, 5, 6, 7, 8, 9]) + >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 + >>> np.unwrap(phase_deg, period=360) + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) + """ + p = asarray(p) + nd = p.ndim + dd = diff(p, axis=axis) + if discont is None: + discont = period/2 + slice1 = [slice(None, None)]*nd # full slices + slice1[axis] = slice(1, None) + slice1 = tuple(slice1) + dtype = np.result_type(dd, period) + if _nx.issubdtype(dtype, _nx.integer): + interval_high, rem = divmod(period, 2) + boundary_ambiguous = rem == 0 + else: + interval_high = period / 2 + boundary_ambiguous = True + interval_low = -interval_high + ddmod = mod(dd - interval_low, period) + interval_low + if boundary_ambiguous: + # for `mask = (abs(dd) == period/2)`, the above line made + # `ddmod[mask] == -period/2`. correct these such that + # `ddmod[mask] == sign(dd[mask])*period/2`. + _nx.copyto(ddmod, interval_high, + where=(ddmod == interval_low) & (dd > 0)) + ph_correct = ddmod - dd + _nx.copyto(ph_correct, 0, where=abs(dd) < discont) + up = array(p, copy=True, dtype=dtype) + up[slice1] = p[slice1] + ph_correct.cumsum(axis) + return up + + +def _sort_complex(a): + return (a,) + + +@array_function_dispatch(_sort_complex) +def sort_complex(a): + """ + Sort a complex array using the real part first, then the imaginary part. + + Parameters + ---------- + a : array_like + Input array + + Returns + ------- + out : complex ndarray + Always returns a sorted complex array. + + Examples + -------- + >>> np.sort_complex([5, 3, 6, 2, 1]) + array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + + >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) + array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + + """ + b = array(a, copy=True) + b.sort() + if not issubclass(b.dtype.type, _nx.complexfloating): + if b.dtype.char in 'bhBH': + return b.astype('F') + elif b.dtype.char == 'g': + return b.astype('G') + else: + return b.astype('D') + else: + return b + + +def _trim_zeros(filt, trim=None): + return (filt,) + + +@array_function_dispatch(_trim_zeros) +def trim_zeros(filt, trim='fb'): + """ + Trim the leading and/or trailing zeros from a 1-D array or sequence. + + Parameters + ---------- + filt : 1-D array or sequence + Input array. + trim : str, optional + A string with 'f' representing trim from front and 'b' to trim from + back. Default is 'fb', trim zeros from both front and back of the + array. + + Returns + ------- + trimmed : 1-D array or sequence + The result of trimming the input. The input data type is preserved. + + Examples + -------- + >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) + >>> np.trim_zeros(a) + array([1, 2, 3, 0, 2, 1]) + + >>> np.trim_zeros(a, 'b') + array([0, 0, 0, ..., 0, 2, 1]) + + The input data type is preserved, list/tuple in means list/tuple out. + + >>> np.trim_zeros([0, 1, 2, 0]) + [1, 2] + + """ + + first = 0 + trim = trim.upper() + if 'F' in trim: + for i in filt: + if i != 0.: + break + else: + first = first + 1 + last = len(filt) + if 'B' in trim: + for i in filt[::-1]: + if i != 0.: + break + else: + last = last - 1 + return filt[first:last] + + +def _extract_dispatcher(condition, arr): + return (condition, arr) + + +@array_function_dispatch(_extract_dispatcher) +def extract(condition, arr): + """ + Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If + `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + + Note that `place` does the exact opposite of `extract`. + + Parameters + ---------- + condition : array_like + An array whose nonzero or True entries indicate the elements of `arr` + to extract. + arr : array_like + Input array of the same size as `condition`. + + Returns + ------- + extract : ndarray + Rank 1 array of values from `arr` where `condition` is True. + + See Also + -------- + take, put, copyto, compress, place + + Examples + -------- + >>> arr = np.arange(12).reshape((3, 4)) + >>> arr + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> condition = np.mod(arr, 3)==0 + >>> condition + array([[ True, False, False, True], + [False, False, True, False], + [False, True, False, False]]) + >>> np.extract(condition, arr) + array([0, 3, 6, 9]) + + + If `condition` is boolean: + + >>> arr[condition] + array([0, 3, 6, 9]) + + """ + return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) + + +def _place_dispatcher(arr, mask, vals): + return (arr, mask, vals) + + +@array_function_dispatch(_place_dispatcher) +def place(arr, mask, vals): + """ + Change elements of an array based on conditional and input values. + + Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that + `place` uses the first N elements of `vals`, where N is the number of + True values in `mask`, while `copyto` uses the elements where `mask` + is True. + + Note that `extract` does the exact opposite of `place`. + + Parameters + ---------- + arr : ndarray + Array to put data into. + mask : array_like + Boolean mask array. Must have the same size as `a`. + vals : 1-D sequence + Values to put into `a`. Only the first N elements are used, where + N is the number of True values in `mask`. If `vals` is smaller + than N, it will be repeated, and if elements of `a` are to be masked, + this sequence must be non-empty. + + See Also + -------- + copyto, put, take, extract + + Examples + -------- + >>> arr = np.arange(6).reshape(2, 3) + >>> np.place(arr, arr>2, [44, 55]) + >>> arr + array([[ 0, 1, 2], + [44, 55, 44]]) + + """ + if not isinstance(arr, np.ndarray): + raise TypeError("argument 1 must be numpy.ndarray, " + "not {name}".format(name=type(arr).__name__)) + + return _insert(arr, mask, vals) + + +def disp(mesg, device=None, linefeed=True): + """ + Display a message on a device. + + Parameters + ---------- + mesg : str + Message to display. + device : object + Device to write message. If None, defaults to ``sys.stdout`` which is + very similar to ``print``. `device` needs to have ``write()`` and + ``flush()`` methods. + linefeed : bool, optional + Option whether to print a line feed or not. Defaults to True. + + Raises + ------ + AttributeError + If `device` does not have a ``write()`` or ``flush()`` method. + + Examples + -------- + Besides ``sys.stdout``, a file-like object can also be used as it has + both required methods: + + >>> from io import StringIO + >>> buf = StringIO() + >>> np.disp(u'"Display" in a file', device=buf) + >>> buf.getvalue() + '"Display" in a file\\n' + + """ + if device is None: + device = sys.stdout + if linefeed: + device.write('%s\n' % mesg) + else: + device.write('%s' % mesg) + device.flush() + return + + +# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html +_DIMENSION_NAME = r'\w+' +_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) +_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) +_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) +_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) + + +def _parse_gufunc_signature(signature): + """ + Parse string signatures for a generalized universal function. + + Arguments + --------- + signature : string + Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` + for ``np.matmul``. + + Returns + ------- + Tuple of input and output core dimensions parsed from the signature, each + of the form List[Tuple[str, ...]]. + """ + if not re.match(_SIGNATURE, signature): + raise ValueError( + 'not a valid gufunc signature: {}'.format(signature)) + return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) + for arg in re.findall(_ARGUMENT, arg_list)] + for arg_list in signature.split('->')) + + +def _update_dim_sizes(dim_sizes, arg, core_dims): + """ + Incrementally check and update core dimension sizes for a single argument. + + Arguments + --------- + dim_sizes : Dict[str, int] + Sizes of existing core dimensions. Will be updated in-place. + arg : ndarray + Argument to examine. + core_dims : Tuple[str, ...] + Core dimensions for this argument. + """ + if not core_dims: + return + + num_core_dims = len(core_dims) + if arg.ndim < num_core_dims: + raise ValueError( + '%d-dimensional argument does not have enough ' + 'dimensions for all core dimensions %r' + % (arg.ndim, core_dims)) + + core_shape = arg.shape[-num_core_dims:] + for dim, size in zip(core_dims, core_shape): + if dim in dim_sizes: + if size != dim_sizes[dim]: + raise ValueError( + 'inconsistent size for core dimension %r: %r vs %r' + % (dim, size, dim_sizes[dim])) + else: + dim_sizes[dim] = size + + +def _parse_input_dimensions(args, input_core_dims): + """ + Parse broadcast and core dimensions for vectorize with a signature. + + Arguments + --------- + args : Tuple[ndarray, ...] + Tuple of input arguments to examine. + input_core_dims : List[Tuple[str, ...]] + List of core dimensions corresponding to each input. + + Returns + ------- + broadcast_shape : Tuple[int, ...] + Common shape to broadcast all non-core dimensions to. + dim_sizes : Dict[str, int] + Common sizes for named core dimensions. + """ + broadcast_args = [] + dim_sizes = {} + for arg, core_dims in zip(args, input_core_dims): + _update_dim_sizes(dim_sizes, arg, core_dims) + ndim = arg.ndim - len(core_dims) + dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) + broadcast_args.append(dummy_array) + broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) + return broadcast_shape, dim_sizes + + +def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): + """Helper for calculating broadcast shapes with core dimensions.""" + return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) + for core_dims in list_of_core_dims] + + +def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): + """Helper for creating output arrays in vectorize.""" + shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) + arrays = tuple(np.empty(shape, dtype=dtype) + for shape, dtype in zip(shapes, dtypes)) + return arrays + + +@set_module('numpy') +class vectorize: + """ + vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, + signature=None) + + Generalized function class. + + Define a vectorized function which takes a nested sequence of objects or + numpy arrays as inputs and returns a single numpy array or a tuple of numpy + arrays. The vectorized function evaluates `pyfunc` over successive tuples + of the input arrays like the python map function, except it uses the + broadcasting rules of numpy. + + The data type of the output of `vectorized` is determined by calling + the function with the first element of the input. This can be avoided + by specifying the `otypes` argument. + + Parameters + ---------- + pyfunc : callable + A python function or method. + otypes : str or list of dtypes, optional + The output data type. It must be specified as either a string of + typecode characters or a list of data type specifiers. There should + be one data type specifier for each output. + doc : str, optional + The docstring for the function. If None, the docstring will be the + ``pyfunc.__doc__``. + excluded : set, optional + Set of strings or integers representing the positional or keyword + arguments for which the function will not be vectorized. These will be + passed directly to `pyfunc` unmodified. + + .. versionadded:: 1.7.0 + + cache : bool, optional + If `True`, then cache the first function call that determines the number + of outputs if `otypes` is not provided. + + .. versionadded:: 1.7.0 + + signature : string, optional + Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for + vectorized matrix-vector multiplication. If provided, ``pyfunc`` will + be called with (and expected to return) arrays with shapes given by the + size of corresponding core dimensions. By default, ``pyfunc`` is + assumed to take scalars as input and output. + + .. versionadded:: 1.12.0 + + Returns + ------- + vectorized : callable + Vectorized function. + + See Also + -------- + frompyfunc : Takes an arbitrary Python function and returns a ufunc + + Notes + ----- + The `vectorize` function is provided primarily for convenience, not for + performance. The implementation is essentially a for loop. + + If `otypes` is not specified, then a call to the function with the + first argument will be used to determine the number of outputs. The + results of this call will be cached if `cache` is `True` to prevent + calling the function twice. However, to implement the cache, the + original function must be wrapped which will slow down subsequent + calls, so only do this if your function is expensive. + + The new keyword argument interface and `excluded` argument support + further degrades performance. + + References + ---------- + .. [1] :doc:`/reference/c-api/generalized-ufuncs` + + Examples + -------- + >>> def myfunc(a, b): + ... "Return a-b if a>b, otherwise return a+b" + ... if a > b: + ... return a - b + ... else: + ... return a + b + + >>> vfunc = np.vectorize(myfunc) + >>> vfunc([1, 2, 3, 4], 2) + array([3, 4, 1, 2]) + + The docstring is taken from the input function to `vectorize` unless it + is specified: + + >>> vfunc.__doc__ + 'Return a-b if a>b, otherwise return a+b' + >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') + >>> vfunc.__doc__ + 'Vectorized `myfunc`' + + The output type is determined by evaluating the first element of the input, + unless it is specified: + + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + >>> vfunc = np.vectorize(myfunc, otypes=[float]) + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + + The `excluded` argument can be used to prevent vectorizing over certain + arguments. This can be useful for array-like arguments of a fixed length + such as the coefficients for a polynomial as in `polyval`: + + >>> def mypolyval(p, x): + ... _p = list(p) + ... res = _p.pop(0) + ... while _p: + ... res = res*x + _p.pop(0) + ... return res + >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) + + Positional arguments may also be excluded by specifying their position: + + >>> vpolyval.excluded.add(0) + >>> vpolyval([1, 2, 3], x=[0, 1]) + array([3, 6]) + + The `signature` argument allows for vectorizing functions that act on + non-scalar arrays of fixed length. For example, you can use it for a + vectorized calculation of Pearson correlation coefficient and its p-value: + + >>> import scipy.stats + >>> pearsonr = np.vectorize(scipy.stats.pearsonr, + ... signature='(n),(n)->(),()') + >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) + (array([ 1., -1.]), array([ 0., 0.])) + + Or for a vectorized convolution: + + >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') + >>> convolve(np.eye(4), [1, 2, 1]) + array([[1., 2., 1., 0., 0., 0.], + [0., 1., 2., 1., 0., 0.], + [0., 0., 1., 2., 1., 0.], + [0., 0., 0., 1., 2., 1.]]) + + """ + def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, + cache=False, signature=None): + self.pyfunc = pyfunc + self.cache = cache + self.signature = signature + self._ufunc = {} # Caching to improve default performance + + if doc is None: + self.__doc__ = pyfunc.__doc__ + else: + self.__doc__ = doc + + if isinstance(otypes, str): + for char in otypes: + if char not in typecodes['All']: + raise ValueError("Invalid otype specified: %s" % (char,)) + elif iterable(otypes): + otypes = ''.join([_nx.dtype(x).char for x in otypes]) + elif otypes is not None: + raise ValueError("Invalid otype specification") + self.otypes = otypes + + # Excluded variable support + if excluded is None: + excluded = set() + self.excluded = set(excluded) + + if signature is not None: + self._in_and_out_core_dims = _parse_gufunc_signature(signature) + else: + self._in_and_out_core_dims = None + + def __call__(self, *args, **kwargs): + """ + Return arrays with the results of `pyfunc` broadcast (vectorized) over + `args` and `kwargs` not in `excluded`. + """ + excluded = self.excluded + if not kwargs and not excluded: + func = self.pyfunc + vargs = args + else: + # The wrapper accepts only positional arguments: we use `names` and + # `inds` to mutate `the_args` and `kwargs` to pass to the original + # function. + nargs = len(args) + + names = [_n for _n in kwargs if _n not in excluded] + inds = [_i for _i in range(nargs) if _i not in excluded] + the_args = list(args) + + def func(*vargs): + for _n, _i in enumerate(inds): + the_args[_i] = vargs[_n] + kwargs.update(zip(names, vargs[len(inds):])) + return self.pyfunc(*the_args, **kwargs) + + vargs = [args[_i] for _i in inds] + vargs.extend([kwargs[_n] for _n in names]) + + return self._vectorize_call(func=func, args=vargs) + + def _get_ufunc_and_otypes(self, func, args): + """Return (ufunc, otypes).""" + # frompyfunc will fail if args is empty + if not args: + raise ValueError('args can not be empty') + + if self.otypes is not None: + otypes = self.otypes + + # self._ufunc is a dictionary whose keys are the number of + # arguments (i.e. len(args)) and whose values are ufuncs created + # by frompyfunc. len(args) can be different for different calls if + # self.pyfunc has parameters with default values. We only use the + # cache when func is self.pyfunc, which occurs when the call uses + # only positional arguments and no arguments are excluded. + + nin = len(args) + nout = len(self.otypes) + if func is not self.pyfunc or nin not in self._ufunc: + ufunc = frompyfunc(func, nin, nout) + else: + ufunc = None # We'll get it from self._ufunc + if func is self.pyfunc: + ufunc = self._ufunc.setdefault(nin, ufunc) + else: + # Get number of outputs and output types by calling the function on + # the first entries of args. We also cache the result to prevent + # the subsequent call when the ufunc is evaluated. + # Assumes that ufunc first evaluates the 0th elements in the input + # arrays (the input values are not checked to ensure this) + args = [asarray(arg) for arg in args] + if builtins.any(arg.size == 0 for arg in args): + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + + inputs = [arg.flat[0] for arg in args] + outputs = func(*inputs) + + # Performance note: profiling indicates that -- for simple + # functions at least -- this wrapping can almost double the + # execution time. + # Hence we make it optional. + if self.cache: + _cache = [outputs] + + def _func(*vargs): + if _cache: + return _cache.pop() + else: + return func(*vargs) + else: + _func = func + + if isinstance(outputs, tuple): + nout = len(outputs) + else: + nout = 1 + outputs = (outputs,) + + otypes = ''.join([asarray(outputs[_k]).dtype.char + for _k in range(nout)]) + + # Performance note: profiling indicates that creating the ufunc is + # not a significant cost compared with wrapping so it seems not + # worth trying to cache this. + ufunc = frompyfunc(_func, len(args), nout) + + return ufunc, otypes + + def _vectorize_call(self, func, args): + """Vectorized call to `func` over positional `args`.""" + if self.signature is not None: + res = self._vectorize_call_with_signature(func, args) + elif not args: + res = func() + else: + ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + + # Convert args to object arrays first + inputs = [asanyarray(a, dtype=object) for a in args] + + outputs = ufunc(*inputs) + + if ufunc.nout == 1: + res = asanyarray(outputs, dtype=otypes[0]) + else: + res = tuple([asanyarray(x, dtype=t) + for x, t in zip(outputs, otypes)]) + return res + + def _vectorize_call_with_signature(self, func, args): + """Vectorized call over positional arguments with a signature.""" + input_core_dims, output_core_dims = self._in_and_out_core_dims + + if len(args) != len(input_core_dims): + raise TypeError('wrong number of positional arguments: ' + 'expected %r, got %r' + % (len(input_core_dims), len(args))) + args = tuple(asanyarray(arg) for arg in args) + + broadcast_shape, dim_sizes = _parse_input_dimensions( + args, input_core_dims) + input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, + input_core_dims) + args = [np.broadcast_to(arg, shape, subok=True) + for arg, shape in zip(args, input_shapes)] + + outputs = None + otypes = self.otypes + nout = len(output_core_dims) + + for index in np.ndindex(*broadcast_shape): + results = func(*(arg[index] for arg in args)) + + n_results = len(results) if isinstance(results, tuple) else 1 + + if nout != n_results: + raise ValueError( + 'wrong number of outputs from pyfunc: expected %r, got %r' + % (nout, n_results)) + + if nout == 1: + results = (results,) + + if outputs is None: + for result, core_dims in zip(results, output_core_dims): + _update_dim_sizes(dim_sizes, result, core_dims) + + if otypes is None: + otypes = [asarray(result).dtype for result in results] + + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes) + + for output, result in zip(outputs, results): + output[index] = result + + if outputs is None: + # did not call the function even once + if otypes is None: + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + if builtins.any(dim not in dim_sizes + for dims in output_core_dims + for dim in dims): + raise ValueError('cannot call `vectorize` with a signature ' + 'including new output dimensions on size 0 ' + 'inputs') + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes) + + return outputs[0] if nout == 1 else outputs + + +def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, + fweights=None, aweights=None, *, dtype=None): + return (m, y, fweights, aweights) + + +@array_function_dispatch(_cov_dispatcher) +def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, + aweights=None, *, dtype=None): + """ + Estimate a covariance matrix, given data and weights. + + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. + + See the notes for an outline of the algorithm. + + Parameters + ---------- + m : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same form + as that of `m`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N - 1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + If not ``None`` the default value implied by `bias` is overridden. + Note that ``ddof=1`` will return the unbiased estimate, even if both + `fweights` and `aweights` are specified, and ``ddof=0`` will return + the simple average. See the notes for the details. The default value + is ``None``. + + .. versionadded:: 1.5 + fweights : array_like, int, optional + 1-D array of integer frequency weights; the number of times each + observation vector should be repeated. + + .. versionadded:: 1.10 + aweights : array_like, optional + 1-D array of observation vector weights. These relative weights are + typically large for observations considered "important" and smaller for + observations considered less "important". If ``ddof=0`` the array of + weights can be used to assign probabilities to observation vectors. + + .. versionadded:: 1.10 + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Notes + ----- + Assume that the observations are in the columns of the observation + array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The + steps to compute the weighted covariance are as follows:: + + >>> m = np.arange(10, dtype=np.float64) + >>> f = np.arange(10) * 2 + >>> a = np.arange(10) ** 2. + >>> ddof = 1 + >>> w = f * a + >>> v1 = np.sum(w) + >>> v2 = np.sum(w * a) + >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 + >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) + + Note that when ``a == 1``, the normalization factor + ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` + as it should. + + Examples + -------- + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.stack((x, y), axis=0) + >>> np.cov(X) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x, y) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x) + array(11.71) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError( + "ddof must be integer") + + # Handles complex arrays too + m = np.asarray(m) + if m.ndim > 2: + raise ValueError("m has more than 2 dimensions") + + if y is not None: + y = np.asarray(y) + if y.ndim > 2: + raise ValueError("y has more than 2 dimensions") + + if dtype is None: + if y is None: + dtype = np.result_type(m, np.float64) + else: + dtype = np.result_type(m, y, np.float64) + + X = array(m, ndmin=2, dtype=dtype) + if not rowvar and X.shape[0] != 1: + X = X.T + if X.shape[0] == 0: + return np.array([]).reshape(0, 0) + if y is not None: + y = array(y, copy=False, ndmin=2, dtype=dtype) + if not rowvar and y.shape[0] != 1: + y = y.T + X = np.concatenate((X, y), axis=0) + + if ddof is None: + if bias == 0: + ddof = 1 + else: + ddof = 0 + + # Get the product of frequencies and weights + w = None + if fweights is not None: + fweights = np.asarray(fweights, dtype=float) + if not np.all(fweights == np.around(fweights)): + raise TypeError( + "fweights must be integer") + if fweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional fweights") + if fweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and fweights") + if any(fweights < 0): + raise ValueError( + "fweights cannot be negative") + w = fweights + if aweights is not None: + aweights = np.asarray(aweights, dtype=float) + if aweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional aweights") + if aweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and aweights") + if any(aweights < 0): + raise ValueError( + "aweights cannot be negative") + if w is None: + w = aweights + else: + w *= aweights + + avg, w_sum = average(X, axis=1, weights=w, returned=True) + w_sum = w_sum[0] + + # Determine the normalization + if w is None: + fact = X.shape[1] - ddof + elif ddof == 0: + fact = w_sum + elif aweights is None: + fact = w_sum - ddof + else: + fact = w_sum - ddof*sum(w*aweights)/w_sum + + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=3) + fact = 0.0 + + X -= avg[:, None] + if w is None: + X_T = X.T + else: + X_T = (X*w).T + c = dot(X, X_T.conj()) + c *= np.true_divide(1, fact) + return c.squeeze() + + +def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, + dtype=None): + return (x, y) + + +@array_function_dispatch(_corrcoef_dispatcher) +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, + dtype=None): + """ + Return Pearson product-moment correlation coefficients. + + Please refer to the documentation for `cov` for more detail. The + relationship between the correlation coefficient matrix, `R`, and the + covariance matrix, `C`, is + + .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } + + The values of `R` are between -1 and 1, inclusive. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + R : ndarray + The correlation coefficient matrix of the variables. + + See Also + -------- + cov : Covariance matrix + + Notes + ----- + Due to floating point rounding the resulting array may not be Hermitian, + the diagonal elements may not be 1, and the elements may not satisfy the + inequality abs(a) <= 1. The real and imaginary parts are clipped to the + interval [-1, 1] in an attempt to improve on that situation but is not + much help in the complex case. + + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + + Examples + -------- + In this example we generate two random arrays, ``xarr`` and ``yarr``, and + compute the row-wise and column-wise Pearson correlation coefficients, + ``R``. Since ``rowvar`` is true by default, we first find the row-wise + Pearson correlation coefficients between the variables of ``xarr``. + + >>> import numpy as np + >>> rng = np.random.default_rng(seed=42) + >>> xarr = rng.random((3, 3)) + >>> xarr + array([[0.77395605, 0.43887844, 0.85859792], + [0.69736803, 0.09417735, 0.97562235], + [0.7611397 , 0.78606431, 0.12811363]]) + >>> R1 = np.corrcoef(xarr) + >>> R1 + array([[ 1. , 0.99256089, -0.68080986], + [ 0.99256089, 1. , -0.76492172], + [-0.68080986, -0.76492172, 1. ]]) + + If we add another set of variables and observations ``yarr``, we can + compute the row-wise Pearson correlation coefficients between the + variables in ``xarr`` and ``yarr``. + + >>> yarr = rng.random((3, 3)) + >>> yarr + array([[0.45038594, 0.37079802, 0.92676499], + [0.64386512, 0.82276161, 0.4434142 ], + [0.22723872, 0.55458479, 0.06381726]]) + >>> R2 = np.corrcoef(xarr, yarr) + >>> R2 + array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 , + -0.99004057], + [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098, + -0.99981569], + [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355, + 0.77714685], + [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855, + -0.83571711], + [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. , + 0.97517215], + [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215, + 1. ]]) + + Finally if we use the option ``rowvar=False``, the columns are now + being treated as the variables and we will find the column-wise Pearson + correlation coefficients between variables in ``xarr`` and ``yarr``. + + >>> R3 = np.corrcoef(xarr, yarr, rowvar=False) + >>> R3 + array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 , + 0.22423734], + [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587, + -0.44069024], + [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648, + 0.75137473], + [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469, + 0.47536961], + [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. , + -0.46666491], + [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491, + 1. ]]) + + """ + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn('bias and ddof have no effect and are deprecated', + DeprecationWarning, stacklevel=3) + c = cov(x, y, rowvar, dtype=dtype) + try: + d = diag(c) + except ValueError: + # scalar covariance + # nan if incorrect value (nan, inf, 0), 1 otherwise + return c / c + stddev = sqrt(d.real) + c /= stddev[:, None] + c /= stddev[None, :] + + # Clip real and imaginary parts to [-1, 1]. This does not guarantee + # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without + # excessive work. + np.clip(c.real, -1, 1, out=c.real) + if np.iscomplexobj(c): + np.clip(c.imag, -1, 1, out=c.imag) + + return c + + +@set_module('numpy') +def blackman(M): + """ + Return the Blackman window. + + The Blackman window is a taper formed by using the first three + terms of a summation of cosines. It was designed to have close to the + minimal leakage possible. It is close to optimal, only slightly worse + than a Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, hamming, hanning, kaiser + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the kaiser window. + + References + ---------- + Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, + Dover Publications, New York. + + Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.blackman(12) + array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) + + Plot the window and the frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.blackman(51) + >>> plt.plot(window) + [] + >>> plt.title("Blackman window") + Text(0.5, 1.0, 'Blackman window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +

+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> with np.errstate(divide='ignore', invalid='ignore'): + ... response = 20 * np.log10(mag) + ... + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Blackman window") + Text(0.5, 1.0, 'Frequency response of Blackman window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> _ = plt.axis('tight') + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(1-M, M, 2) + return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1)) + + +@set_module('numpy') +def bartlett(M): + """ + Return the Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The triangular window, with the maximum value normalized to one + (the value one appears only if the number of samples is odd), with + the first and last samples equal to zero. + + See Also + -------- + blackman, hamming, hanning, kaiser + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \\frac{2}{M-1} \\left( + \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| + \\right) + + Most references to the Bartlett window come from the signal + processing literature, where it is used as one of many windowing + functions for smoothing values. Note that convolution with this + window produces linear interpolation. It is also known as an + apodization (which means"removing the foot", i.e. smoothing + discontinuities at the beginning and end of the sampled signal) or + tapering function. The fourier transform of the Bartlett is the product + of two sinc functions. + Note the excellent discussion in Kanasewich. + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.bartlett(12) + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary + 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, + 0.18181818, 0. ]) + + Plot the window and its frequency response (requires SciPy and matplotlib): + + >>> from numpy.fft import fft, fftshift + >>> window = np.bartlett(51) + >>> plt.plot(window) + [] + >>> plt.title("Bartlett window") + Text(0.5, 1.0, 'Bartlett window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +
+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> with np.errstate(divide='ignore', invalid='ignore'): + ... response = 20 * np.log10(mag) + ... + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Bartlett window") + Text(0.5, 1.0, 'Frequency response of Bartlett window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> _ = plt.axis('tight') + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(1-M, M, 2) + return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1)) + + +@set_module('numpy') +def hanning(M): + """ + Return the Hanning window. + + The Hanning window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray, shape(M,) + The window, with the maximum value normalized to one (the value + one appears only if `M` is odd). + + See Also + -------- + bartlett, blackman, hamming, kaiser + + Notes + ----- + The Hanning window is defined as + + .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hanning was named for Julius von Hann, an Austrian meteorologist. + It is also known as the Cosine Bell. Some authors prefer that it be + called a Hann window, to help avoid confusion with the very similar + Hamming window. + + Most references to the Hanning window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hanning(12) + array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) + + Plot the window and its frequency response: + + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, fftshift + >>> window = np.hanning(51) + >>> plt.plot(window) + [] + >>> plt.title("Hann window") + Text(0.5, 1.0, 'Hann window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +
+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> with np.errstate(divide='ignore', invalid='ignore'): + ... response = 20 * np.log10(mag) + ... + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of the Hann window") + Text(0.5, 1.0, 'Frequency response of the Hann window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> plt.axis('tight') + ... + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(1-M, M, 2) + return 0.5 + 0.5*cos(pi*n/(M-1)) + + +@set_module('numpy') +def hamming(M): + """ + Return the Hamming window. + + The Hamming window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hanning, kaiser + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey + and is described in Blackman and Tukey. It was recommended for + smoothing the truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hamming(12) + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary + 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, + 0.15302337, 0.08 ]) + + Plot the window and the frequency response: + + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, fftshift + >>> window = np.hamming(51) + >>> plt.plot(window) + [] + >>> plt.title("Hamming window") + Text(0.5, 1.0, 'Hamming window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +
+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Hamming window") + Text(0.5, 1.0, 'Frequency response of Hamming window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> plt.axis('tight') + ... + >>> plt.show() + + """ + if M < 1: + return array([]) + if M == 1: + return ones(1, float) + n = arange(1-M, M, 2) + return 0.54 + 0.46*cos(pi*n/(M-1)) + + +## Code from cephes for i0 + +_i0A = [ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1 + ] + +_i0B = [ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1 + ] + + +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x*b1 - b2 + vals[i] + + return 0.5*(b0 - b2) + + +def _i0_1(x): + return exp(x) * _chbevl(x/2.0-2, _i0A) + + +def _i0_2(x): + return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) + + +def _i0_dispatcher(x): + return (x,) + + +@array_function_dispatch(_i0_dispatcher) +def i0(x): + """ + Modified Bessel function of the first kind, order 0. + + Usually denoted :math:`I_0`. + + Parameters + ---------- + x : array_like of float + Argument of the Bessel function. + + Returns + ------- + out : ndarray, shape = x.shape, dtype = float + The modified Bessel function evaluated at each of the elements of `x`. + + See Also + -------- + scipy.special.i0, scipy.special.iv, scipy.special.ive + + Notes + ----- + The scipy implementation is recommended over this function: it is a + proper ufunc written in C, and more than an order of magnitude faster. + + We use the algorithm published by Clenshaw [1]_ and referenced by + Abramowitz and Stegun [2]_, for which the function domain is + partitioned into the two intervals [0,8] and (8,inf), and Chebyshev + polynomial expansions are employed in each interval. Relative error on + the domain [0,30] using IEEE arithmetic is documented [3]_ as having a + peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). + + References + ---------- + .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in + *National Physical Laboratory Mathematical Tables*, vol. 5, London: + Her Majesty's Stationery Office, 1962. + .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical + Functions*, 10th printing, New York: Dover, 1964, pp. 379. + http://www.math.sfu.ca/~cbm/aands/page_379.htm + .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero + + Examples + -------- + >>> np.i0(0.) + array(1.0) + >>> np.i0([0, 1, 2, 3]) + array([1. , 1.26606588, 2.2795853 , 4.88079259]) + + """ + x = np.asanyarray(x) + if x.dtype.kind == 'c': + raise TypeError("i0 not supported for complex values") + if x.dtype.kind != 'f': + x = x.astype(float) + x = np.abs(x) + return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) + +## End of cephes code for i0 + + +@set_module('numpy') +def kaiser(M, beta): + """ + Return the Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + beta : float + Shape parameter for window. + + Returns + ------- + out : array + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, hanning + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + \\right)/I_0(\\beta) + + with + + .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple + approximation to the DPSS window based on Bessel functions. The Kaiser + window is a very good approximation to the Digital Prolate Spheroidal + Sequence, or Slepian window, which is the transform which maximizes the + energy in the main lobe of the window relative to total energy. + + The Kaiser can approximate many other windows by varying the beta + parameter. + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hanning + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + get returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.kaiser(12, 14) + array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + + + Plot the window and the frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.kaiser(51, 14) + >>> plt.plot(window) + [] + >>> plt.title("Kaiser window") + Text(0.5, 1.0, 'Kaiser window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +
+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Kaiser window") + Text(0.5, 1.0, 'Frequency response of Kaiser window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) # may vary + >>> plt.show() + + """ + if M == 1: + return np.array([1.]) + n = arange(0, M) + alpha = (M-1)/2.0 + return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) + + +def _sinc_dispatcher(x): + return (x,) + + +@array_function_dispatch(_sinc_dispatcher) +def sinc(x): + r""" + Return the normalized sinc function. + + The sinc function is :math:`\sin(\pi x)/(\pi x)`. + + .. note:: + + Note the normalization factor of ``pi`` used in the definition. + This is the most commonly used definition in signal processing. + Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function + :math:`\sin(x)/(x)` that is more common in mathematics. + + Parameters + ---------- + x : ndarray + Array (possibly multi-dimensional) of values for which to to + calculate ``sinc(x)``. + + Returns + ------- + out : ndarray + ``sinc(x)``, which has the same shape as the input. + + Notes + ----- + ``sinc(0)`` is the limit value 1. + + The name sinc is short for "sine cardinal" or "sinus cardinalis". + + The sinc function is used in various signal processing applications, + including in anti-aliasing, in the construction of a Lanczos resampling + filter, and in interpolation. + + For bandlimited interpolation of discrete-time signals, the ideal + interpolation kernel is proportional to the sinc function. + + References + ---------- + .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web + Resource. http://mathworld.wolfram.com/SincFunction.html + .. [2] Wikipedia, "Sinc function", + https://en.wikipedia.org/wiki/Sinc_function + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-4, 4, 41) + >>> np.sinc(x) + array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary + -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, + -4.92362781e-02, -3.89804309e-17]) + + >>> plt.plot(x, np.sinc(x)) + [] + >>> plt.title("Sinc Function") + Text(0.5, 1.0, 'Sinc Function') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("X") + Text(0.5, 0, 'X') + >>> plt.show() + + """ + x = np.asanyarray(x) + y = pi * where(x == 0, 1.0e-20, x) + return sin(y)/y + + +def _msort_dispatcher(a): + return (a,) + + +@array_function_dispatch(_msort_dispatcher) +def msort(a): + """ + Return a copy of an array sorted along the first axis. + + Parameters + ---------- + a : array_like + Array to be sorted. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + sort + + Notes + ----- + ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. + + """ + b = array(a, subok=True, copy=True) + b.sort(0) + return b + + +def _ureduce(a, func, **kwargs): + """ + Internal Function. + Call `func` with `a` as first argument swapping the axes to use extended + axis on functions that don't support it natively. + + Returns result and a.shape with axis dims set to 1. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + func : callable + Reduction function capable of receiving a single axis argument. + It is called with `a` as first argument followed by `kwargs`. + kwargs : keyword arguments + additional keyword arguments to pass to `func`. + + Returns + ------- + result : tuple + Result of func(a, **kwargs) and a.shape with axis dims set to 1 + which can be used to reshape the result to the same shape a ufunc with + keepdims=True would produce. + + """ + a = np.asanyarray(a) + axis = kwargs.get('axis', None) + if axis is not None: + keepdim = list(a.shape) + nd = a.ndim + axis = _nx.normalize_axis_tuple(axis, nd) + + for ax in axis: + keepdim[ax] = 1 + + if len(axis) == 1: + kwargs['axis'] = axis[0] + else: + keep = set(range(nd)) - set(axis) + nkeep = len(keep) + # swap axis that should not be reduced to front + for i, s in enumerate(sorted(keep)): + a = a.swapaxes(i, s) + # merge reduced axis + a = a.reshape(a.shape[:nkeep] + (-1,)) + kwargs['axis'] = -1 + keepdim = tuple(keepdim) + else: + keepdim = (1,) * a.ndim + + r = func(a, **kwargs) + return r, keepdim + + +def _median_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_median_dispatcher) +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + .. versionadded:: 1.9.0 + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i + e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the + two middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.median(a) + 3.5 + >>> np.median(a, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.median(a, axis=1) + array([7., 2.]) + >>> m = np.median(a, axis=0) + >>> out = np.zeros_like(m) + >>> np.median(a, axis=0, out=m) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.median(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.median(b, axis=None, overwrite_input=True) + 3.5 + >>> assert not np.all(a==b) + + """ + r, k = _ureduce(a, func=_median, axis=axis, out=out, + overwrite_input=overwrite_input) + if keepdims: + return r.reshape(k) + else: + return r + + +def _median(a, axis=None, out=None, overwrite_input=False): + # can't be reasonably be implemented in terms of percentile as we have to + # call mean to not break astropy + a = np.asanyarray(a) + + # Set the partition indexes + if axis is None: + sz = a.size + else: + sz = a.shape[axis] + if sz % 2 == 0: + szh = sz // 2 + kth = [szh - 1, szh] + else: + kth = [(sz - 1) // 2] + # Check if the array contains any nan's + if np.issubdtype(a.dtype, np.inexact): + kth.append(-1) + + if overwrite_input: + if axis is None: + part = a.ravel() + part.partition(kth) + else: + a.partition(kth, axis=axis) + part = a + else: + part = partition(a, kth, axis=axis) + + if part.shape == (): + # make 0-D arrays work + return part.item() + if axis is None: + axis = 0 + + indexer = [slice(None)] * part.ndim + index = part.shape[axis] // 2 + if part.shape[axis] % 2 == 1: + # index with slice to allow mean (below) to work + indexer[axis] = slice(index, index+1) + else: + indexer[axis] = slice(index-1, index+1) + indexer = tuple(indexer) + + # Check if the array contains any nan's + if np.issubdtype(a.dtype, np.inexact) and sz > 0: + # warn and return nans like mean would + rout = mean(part[indexer], axis=axis, out=out) + return np.lib.utils._median_nancheck(part, rout, axis, out) + else: + # if there are no nans + # Use mean in odd and even case to coerce data type + # and check, use out array. + return mean(part[indexer], axis=axis, out=out) + + +def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + interpolation=None, keepdims=None): + return (a, q, out) + + +@array_function_dispatch(_percentile_dispatcher) +def percentile(a, q, axis=None, out=None, + overwrite_input=False, interpolation='linear', keepdims=False): + """ + Compute the q-th percentile of the data along the specified axis. + + Returns the q-th percentile(s) of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be between + 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. + + .. versionchanged:: 1.9.0 + A tuple of axes is supported + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to + use when the desired percentile lies between two data points + ``i < j``: + + * 'linear': ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * 'lower': ``i``. + * 'higher': ``j``. + * 'nearest': ``i`` or ``j``, whichever is nearest. + * 'midpoint': ``(i + j) / 2``. + + .. versionadded:: 1.9.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + .. versionadded:: 1.9.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + median : equivalent to ``percentile(..., 50)`` + nanpercentile + quantile : equivalent to percentile, except with q in the range [0, 1]. + + Notes + ----- + Given a vector ``V`` of length ``N``, the q-th percentile of + ``V`` is the value ``q/100`` of the way from the minimum to the + maximum in a sorted copy of ``V``. The values and distances of + the two nearest neighbors as well as the `interpolation` parameter + will determine the percentile if the normalized ranking does not + match the location of ``q`` exactly. This function is the same as + the median if ``q=50``, the same as the minimum if ``q=0`` and the + same as the maximum if ``q=100``. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.percentile(a, 50) + 3.5 + >>> np.percentile(a, 50, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.percentile(a, 50, axis=1) + array([7., 2.]) + >>> np.percentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + + >>> m = np.percentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.percentile(a, 50, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + + >>> b = a.copy() + >>> np.percentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + The different types of interpolation can be visualized graphically: + + .. plot:: + + import matplotlib.pyplot as plt + + a = np.arange(4) + p = np.linspace(0, 100, 6001) + ax = plt.gca() + lines = [ + ('linear', None), + ('higher', '--'), + ('lower', '--'), + ('nearest', '-.'), + ('midpoint', '-.'), + ] + for interpolation, style in lines: + ax.plot( + p, np.percentile(a, p, interpolation=interpolation), + label=interpolation, linestyle=style) + ax.set( + title='Interpolation methods for list: ' + str(a), + xlabel='Percentile', + ylabel='List item returned', + yticks=a) + ax.legend() + plt.show() + + """ + q = np.true_divide(q, 100) + q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) + if not _quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + return _quantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + +def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + interpolation=None, keepdims=None): + return (a, q, out) + + +@array_function_dispatch(_quantile_dispatcher) +def quantile(a, q, axis=None, out=None, + overwrite_input=False, interpolation='linear', keepdims=False): + """ + Compute the q-th quantile of the data along the specified axis. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : array_like of float + Quantile or sequence of quantiles to compute, which must be between + 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to + use when the desired quantile lies between two data points + ``i < j``: + + * linear: ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * lower: ``i``. + * higher: ``j``. + * nearest: ``i`` or ``j``, whichever is nearest. + * midpoint: ``(i + j) / 2``. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single quantile and `axis=None`, then the result + is a scalar. If multiple quantiles are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + percentile : equivalent to quantile, but with q in the range [0, 100]. + median : equivalent to ``quantile(..., 0.5)`` + nanquantile + + Notes + ----- + Given a vector ``V`` of length ``N``, the q-th quantile of + ``V`` is the value ``q`` of the way from the minimum to the + maximum in a sorted copy of ``V``. The values and distances of + the two nearest neighbors as well as the `interpolation` parameter + will determine the quantile if the normalized ranking does not + match the location of ``q`` exactly. This function is the same as + the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the + same as the maximum if ``q=1.0``. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.quantile(a, 0.5) + 3.5 + >>> np.quantile(a, 0.5, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.quantile(a, 0.5, axis=1) + array([7., 2.]) + >>> np.quantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.quantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.quantile(a, 0.5, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + """ + q = np.asanyarray(q) + if not _quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + return _quantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + +def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=False): + """Assumes that q is in [0, 1], and is an ndarray""" + r, k = _ureduce(a, func=_quantile_ureduce_func, q=q, axis=axis, out=out, + overwrite_input=overwrite_input, + interpolation=interpolation) + if keepdims: + return r.reshape(q.shape + k) + else: + return r + + +def _quantile_is_valid(q): + # avoid expensive reductions, relevant for arrays with < O(1000) elements + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if not (0.0 <= q[i] <= 1.0): + return False + else: + if not (np.all(0 <= q) and np.all(q <= 1)): + return False + return True + + +def _lerp(a, b, t, out=None): + """ Linearly interpolate from a to b by a factor of t """ + diff_b_a = subtract(b, a) + # asanyarray is a stop-gap until gh-13105 + lerp_interpolation = asanyarray(add(a, diff_b_a*t, out=out)) + subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t>=0.5) + if lerp_interpolation.ndim == 0 and out is None: + lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays + return lerp_interpolation + + +def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=False): + a = asarray(a) + + # ufuncs cause 0d array results to decay to scalars (see gh-13105), which + # makes them problematic for __setitem__ and attribute access. As a + # workaround, we call this on the result of every ufunc on a possibly-0d + # array. + not_scalar = np.asanyarray + + # prepare a for partitioning + if overwrite_input: + if axis is None: + ap = a.ravel() + else: + ap = a + else: + if axis is None: + ap = a.flatten() + else: + ap = a.copy() + + if axis is None: + axis = 0 + + if q.ndim > 2: + # The code below works fine for nd, but it might not have useful + # semantics. For now, keep the supported dimensions the same as it was + # before. + raise ValueError("q must be a scalar or 1d") + + Nx = ap.shape[axis] + indices = not_scalar(q * (Nx - 1)) + # round fractional indices according to interpolation method + if interpolation == 'lower': + indices = floor(indices).astype(intp) + elif interpolation == 'higher': + indices = ceil(indices).astype(intp) + elif interpolation == 'midpoint': + indices = 0.5 * (floor(indices) + ceil(indices)) + elif interpolation == 'nearest': + indices = around(indices).astype(intp) + elif interpolation == 'linear': + pass # keep index as fraction and interpolate + else: + raise ValueError( + "interpolation can only be 'linear', 'lower' 'higher', " + "'midpoint', or 'nearest'") + + # The dimensions of `q` are prepended to the output shape, so we need the + # axis being sampled from `ap` to be first. + ap = np.moveaxis(ap, axis, 0) + del axis + + if np.issubdtype(indices.dtype, np.integer): + # take the points along axis + + if np.issubdtype(a.dtype, np.inexact): + # may contain nan, which would sort to the end + ap.partition(concatenate((indices.ravel(), [-1])), axis=0) + n = np.isnan(ap[-1]) + else: + # cannot contain nan + ap.partition(indices.ravel(), axis=0) + n = np.array(False, dtype=bool) + + r = take(ap, indices, axis=0, out=out) + + else: + # weight the points above and below the indices + + indices_below = not_scalar(floor(indices)).astype(intp) + indices_above = not_scalar(indices_below + 1) + indices_above[indices_above > Nx - 1] = Nx - 1 + + if np.issubdtype(a.dtype, np.inexact): + # may contain nan, which would sort to the end + ap.partition(concatenate(( + indices_below.ravel(), indices_above.ravel(), [-1] + )), axis=0) + n = np.isnan(ap[-1]) + else: + # cannot contain nan + ap.partition(concatenate(( + indices_below.ravel(), indices_above.ravel() + )), axis=0) + n = np.array(False, dtype=bool) + + weights_shape = indices.shape + (1,) * (ap.ndim - 1) + weights_above = not_scalar(indices - indices_below).reshape(weights_shape) + + x_below = take(ap, indices_below, axis=0) + x_above = take(ap, indices_above, axis=0) + + r = _lerp(x_below, x_above, weights_above, out=out) + + # if any slice contained a nan, then all results on that slice are also nan + if np.any(n): + if r.ndim == 0 and out is None: + # can't write to a scalar + r = a.dtype.type(np.nan) + else: + r[..., n] = a.dtype.type(np.nan) + + return r + + +def _trapz_dispatcher(y, x=None, dx=None, axis=None): + return (y, x) + + +@array_function_dispatch(_trapz_dispatcher) +def trapz(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapz : float or ndarray + Definite integral of 'y' = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If 'y' is a 1-dimensional array, + then the result is a float. If 'n' is greater than 1, then the result + is an 'n-1' dimensional array. + + See Also + -------- + sum, cumsum + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + >>> np.trapz([1,2,3]) + 4.0 + >>> np.trapz([1,2,3], x=[4,6,8]) + 8.0 + >>> np.trapz([1,2,3], dx=2) + 8.0 + + Using a decreasing `x` corresponds to integrating in reverse: + + >>> np.trapz([1,2,3], x=[8,6,4]) + -8.0 + + More generally `x` is used to integrate along a parametric curve. + This finds the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> np.trapz(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.trapz(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> np.trapz(a, axis=1) + array([2., 8.]) + """ + y = asanyarray(y) + if x is None: + d = dx + else: + x = asanyarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1]*y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)]*nd + slice2 = [slice(None)]*nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) + return ret + + +def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): + return xi + + +# Based on scitools meshgrid +@array_function_dispatch(_meshgrid_dispatcher) +def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): + """ + Return coordinate matrices from coordinate vectors. + + Make N-D coordinate arrays for vectorized evaluations of + N-D scalar/vector fields over N-D grids, given + one-dimensional coordinate arrays x1, x2,..., xn. + + .. versionchanged:: 1.9 + 1-D and 0-D cases are allowed. + + Parameters + ---------- + x1, x2,..., xn : array_like + 1-D arrays representing the coordinates of a grid. + indexing : {'xy', 'ij'}, optional + Cartesian ('xy', default) or matrix ('ij') indexing of output. + See Notes for more details. + + .. versionadded:: 1.7.0 + sparse : bool, optional + If True a sparse grid is returned in order to conserve memory. + Default is False. + + .. versionadded:: 1.7.0 + copy : bool, optional + If False, a view into the original arrays are returned in order to + conserve memory. Default is True. Please note that + ``sparse=False, copy=False`` will likely return non-contiguous + arrays. Furthermore, more than one element of a broadcast array + may refer to a single memory location. If you need to write to the + arrays, make copies first. + + .. versionadded:: 1.7.0 + + Returns + ------- + X1, X2,..., XN : ndarray + For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , + return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' + or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' + with the elements of `xi` repeated to fill the matrix along + the first dimension for `x1`, the second for `x2` and so on. + + Notes + ----- + This function supports both indexing conventions through the indexing + keyword argument. Giving the string 'ij' returns a meshgrid with + matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. + In the 2-D case with inputs of length M and N, the outputs are of shape + (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case + with inputs of length M, N and P, outputs are of shape (N, M, P) for + 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is + illustrated by the following code snippet:: + + xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') + for i in range(nx): + for j in range(ny): + # treat xv[i,j], yv[i,j] + + xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') + for i in range(nx): + for j in range(ny): + # treat xv[j,i], yv[j,i] + + In the 1-D and 0-D case, the indexing and sparse keywords have no effect. + + See Also + -------- + mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. + ogrid : Construct an open multi-dimensional "meshgrid" using indexing + notation. + + Examples + -------- + >>> nx, ny = (3, 2) + >>> x = np.linspace(0, 1, nx) + >>> y = np.linspace(0, 1, ny) + >>> xv, yv = np.meshgrid(x, y) + >>> xv + array([[0. , 0.5, 1. ], + [0. , 0.5, 1. ]]) + >>> yv + array([[0., 0., 0.], + [1., 1., 1.]]) + >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays + >>> xv + array([[0. , 0.5, 1. ]]) + >>> yv + array([[0.], + [1.]]) + + `meshgrid` is very useful to evaluate functions on a grid. + + >>> import matplotlib.pyplot as plt + >>> x = np.arange(-5, 5, 0.1) + >>> y = np.arange(-5, 5, 0.1) + >>> xx, yy = np.meshgrid(x, y, sparse=True) + >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) + >>> h = plt.contourf(x, y, z) + >>> plt.axis('scaled') + >>> plt.show() + + """ + ndim = len(xi) + + if indexing not in ['xy', 'ij']: + raise ValueError( + "Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) + for i, x in enumerate(xi)] + + if indexing == 'xy' and ndim > 1: + # switch first and second axis + output[0].shape = (1, -1) + s0[2:] + output[1].shape = (-1, 1) + s0[2:] + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = np.broadcast_arrays(*output, subok=True) + + if copy: + output = [x.copy() for x in output] + + return output + + +def _delete_dispatcher(arr, obj, axis=None): + return (arr, obj) + + +@array_function_dispatch(_delete_dispatcher) +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int or array of ints + Indicate indices of sub-arrays to remove along the specified axis. + + .. versionchanged:: 1.19.0 + Boolean indices are now treated as a mask of elements to remove, + rather than being cast to the integers 0 and 1. + + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + See Also + -------- + insert : Insert elements into an array. + append : Append elements at the end of an array. + + Notes + ----- + Often it is preferable to use a boolean mask. For example: + + >>> arr = np.arange(12) + 1 + >>> mask = np.ones(len(arr), dtype=bool) + >>> mask[[0,2,4]] = False + >>> result = arr[mask,...] + + Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further + use of `mask`. + + Examples + -------- + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + >>> np.delete(arr, 1, 0) + array([[ 1, 2, 3, 4], + [ 9, 10, 11, 12]]) + + >>> np.delete(arr, np.s_[::2], 1) + array([[ 2, 4], + [ 6, 8], + [10, 12]]) + >>> np.delete(arr, [1,3,5], None) + array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) + + """ + wrap = None + if type(arr) is not ndarray: + try: + wrap = arr.__array_wrap__ + except AttributeError: + pass + + arr = asarray(arr) + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + start, stop, step = obj.indices(N) + xr = range(start, stop, step) + numtodel = len(xr) + + if numtodel <= 0: + if wrap: + return wrap(arr.copy(order=arrorder)) + else: + return arr.copy(order=arrorder) + + # Invert if step is negative: + if step < 0: + step = -step + start = xr[-1] + stop = xr[0] + 1 + + newshape[axis] -= numtodel + new = empty(newshape, arr.dtype, arrorder) + # copy initial chunk + if start == 0: + pass + else: + slobj[axis] = slice(None, start) + new[tuple(slobj)] = arr[tuple(slobj)] + # copy end chunk + if stop == N: + pass + else: + slobj[axis] = slice(stop-numtodel, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(stop, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + # copy middle pieces + if step == 1: + pass + else: # use array indexing. + keep = ones(stop-start, dtype=bool) + keep[:stop-start:step] = False + slobj[axis] = slice(start, stop-numtodel) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(start, stop) + arr = arr[tuple(slobj2)] + slobj2[axis] = keep + new[tuple(slobj)] = arr[tuple(slobj2)] + if wrap: + return wrap(new) + else: + return new + + if isinstance(obj, (int, integer)) and not isinstance(obj, bool): + # optimization for a single value + if (obj < -N or obj >= N): + raise IndexError( + "index %i is out of bounds for axis %i with " + "size %i" % (obj, axis, N)) + if (obj < 0): + obj += N + newshape[axis] -= 1 + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, obj) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(obj, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(obj+1, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + else: + _obj = obj + obj = np.asarray(obj) + if obj.size == 0 and not isinstance(_obj, np.ndarray): + obj = obj.astype(intp) + + if obj.dtype == bool: + if obj.shape != (N,): + raise ValueError('boolean array argument obj to delete ' + 'must be one dimensional and match the axis ' + 'length of {}'.format(N)) + + # optimization, the other branch is slower + keep = ~obj + else: + keep = ones(N, dtype=bool) + keep[obj,] = False + + slobj[axis] = keep + new = arr[tuple(slobj)] + + if wrap: + return wrap(new) + else: + return new + + +def _insert_dispatcher(arr, obj, values, axis=None): + return (arr, obj, values) + + +@array_function_dispatch(_insert_dispatcher) +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : array_like + Input array. + obj : int, slice or sequence of ints + Object that defines the index or indices before which `values` is + inserted. + + .. versionadded:: 1.8.0 + + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (similar to calling insert multiple + times). + values : array_like + Values to insert into `arr`. If the type of `values` is different + from that of `arr`, `values` is converted to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + See Also + -------- + append : Append elements at the end of an array. + concatenate : Join a sequence of arrays along an existing axis. + delete : Delete elements from an array. + + Notes + ----- + Note that for higher dimensional inserts `obj=0` behaves very different + from `obj=[0]` just like `arr[:,0,:] = values` is different from + `arr[:,[0],:] = values`. + + Examples + -------- + >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> a + array([[1, 1], + [2, 2], + [3, 3]]) + >>> np.insert(a, 1, 5) + array([1, 5, 1, ..., 2, 3, 3]) + >>> np.insert(a, 1, 5, axis=1) + array([[1, 5, 1], + [2, 5, 2], + [3, 5, 3]]) + + Difference between sequence and scalars: + + >>> np.insert(a, [1], [[1],[2],[3]], axis=1) + array([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]]) + >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), + ... np.insert(a, [1], [[1],[2],[3]], axis=1)) + True + + >>> b = a.flatten() + >>> b + array([1, 1, 2, 2, 3, 3]) + >>> np.insert(b, [2, 2], [5, 6]) + array([1, 1, 5, ..., 2, 3, 3]) + + >>> np.insert(b, slice(2, 4), [5, 6]) + array([1, 1, 5, ..., 2, 3, 3]) + + >>> np.insert(b, [2, 2], [7.13, False]) # type casting + array([1, 1, 7, ..., 2, 3, 3]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = (1, 3) + >>> np.insert(x, idx, 999, axis=1) + array([[ 0, 999, 1, 2, 999, 3], + [ 4, 999, 5, 6, 999, 7]]) + + """ + wrap = None + if type(arr) is not ndarray: + try: + wrap = arr.__array_wrap__ + except AttributeError: + pass + + arr = asarray(arr) + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + # turn it into a range object + indices = arange(*obj.indices(N), dtype=intp) + else: + # need to copy obj, because indices will be changed in-place + indices = np.array(obj) + if indices.dtype == bool: + # See also delete + # 2012-10-11, NumPy 1.8 + warnings.warn( + "in the future insert will treat boolean arrays and " + "array-likes as a boolean index instead of casting it to " + "integer", FutureWarning, stacklevel=3) + indices = indices.astype(intp) + # Code after warning period: + #if obj.ndim != 1: + # raise ValueError('boolean array argument obj to insert ' + # 'must be one dimensional') + #indices = np.flatnonzero(obj) + elif indices.ndim > 1: + raise ValueError( + "index array argument obj to insert must be one dimensional " + "or scalar") + if indices.size == 1: + index = indices.item() + if index < -N or index > N: + raise IndexError( + "index %i is out of bounds for axis %i with " + "size %i" % (obj, axis, N)) + if (index < 0): + index += N + + # There are some object array corner cases here, but we cannot avoid + # that: + values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) + if indices.ndim == 0: + # broadcasting is very different here, since a[:,0,:] = ... behaves + # very different from a[:,[0],:] = ...! This changes values so that + # it works likes the second case. (here a[:,0:1,:]) + values = np.moveaxis(values, 0, axis) + numnew = values.shape[axis] + newshape[axis] += numnew + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, index) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(index, index+numnew) + new[tuple(slobj)] = values + slobj[axis] = slice(index+numnew, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(index, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + if wrap: + return wrap(new) + return new + elif indices.size == 0 and not isinstance(obj, np.ndarray): + # Can safely cast the empty list to intp + indices = indices.astype(intp) + + indices[indices < 0] += N + + numnew = len(indices) + order = indices.argsort(kind='mergesort') # stable sort + indices[order] += np.arange(numnew) + + newshape[axis] += numnew + old_mask = ones(newshape[axis], dtype=bool) + old_mask[indices] = False + + new = empty(newshape, arr.dtype, arrorder) + slobj2 = [slice(None)]*ndim + slobj[axis] = indices + slobj2[axis] = old_mask + new[tuple(slobj)] = values + new[tuple(slobj2)] = arr + + if wrap: + return wrap(new) + return new + + +def _append_dispatcher(arr, values, axis=None): + return (arr, values) + + +@array_function_dispatch(_append_dispatcher) +def append(arr, values, axis=None): + """ + Append values to the end of an array. + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If + `axis` is not specified, `values` can be any shape and will be + flattened before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not + given, both `arr` and `values` are flattened before use. + + Returns + ------- + append : ndarray + A copy of `arr` with `values` appended to `axis`. Note that + `append` does not occur in-place: a new array is allocated and + filled. If `axis` is None, `out` is a flattened array. + + See Also + -------- + insert : Insert elements into an array. + delete : Delete elements from an array. + + Examples + -------- + >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) + array([1, 2, 3, ..., 7, 8, 9]) + + When `axis` is specified, `values` must have the correct shape. + + >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) + Traceback (most recent call last): + ... + ValueError: all the input arrays must have same number of dimensions, but + the array at index 0 has 2 dimension(s) and the array at index 1 has 1 + dimension(s) + + """ + arr = asanyarray(arr) + if axis is None: + if arr.ndim != 1: + arr = arr.ravel() + values = ravel(values) + axis = arr.ndim-1 + return concatenate((arr, values), axis=axis) + + +def _digitize_dispatcher(x, bins, right=None): + return (x, bins) + + +@array_function_dispatch(_digitize_dispatcher) +def digitize(x, bins, right=False): + """ + Return the indices of the bins to which each value in input array belongs. + + ========= ============= ============================ + `right` order of bins returned index `i` satisfies + ========= ============= ============================ + ``False`` increasing ``bins[i-1] <= x < bins[i]`` + ``True`` increasing ``bins[i-1] < x <= bins[i]`` + ``False`` decreasing ``bins[i-1] > x >= bins[i]`` + ``True`` decreasing ``bins[i-1] >= x > bins[i]`` + ========= ============= ============================ + + If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is + returned as appropriate. + + Parameters + ---------- + x : array_like + Input array to be binned. Prior to NumPy 1.10.0, this array had to + be 1-dimensional, but can now have any shape. + bins : array_like + Array of bins. It has to be 1-dimensional and monotonic. + right : bool, optional + Indicating whether the intervals include the right or the left bin + edge. Default behavior is (right==False) indicating that the interval + does not include the right edge. The left bin end is open in this + case, i.e., bins[i-1] <= x < bins[i] is the default behavior for + monotonically increasing bins. + + Returns + ------- + indices : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique, searchsorted + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. + + .. versionadded:: 1.10.0 + + `np.digitize` is implemented in terms of `np.searchsorted`. This means + that a binary search is used to bin the values, which scales much better + for larger number of bins than the previous linear search. It also removes + the requirement for the input array to be 1-dimensional. + + For monotonically _increasing_ `bins`, the following are equivalent:: + + np.digitize(x, bins, right=True) + np.searchsorted(bins, x, side='left') + + Note that as the order of the arguments are reversed, the side must be too. + The `searchsorted` call is marginally faster, as it does not do any + monotonicity checks. Perhaps more importantly, it supports all dtypes. + + Examples + -------- + >>> x = np.array([0.2, 6.4, 3.0, 1.6]) + >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) + >>> inds = np.digitize(x, bins) + >>> inds + array([1, 4, 3, 2]) + >>> for n in range(x.size): + ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) + ... + 0.0 <= 0.2 < 1.0 + 4.0 <= 6.4 < 10.0 + 2.5 <= 3.0 < 4.0 + 1.0 <= 1.6 < 2.5 + + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0, 5, 10, 15, 20]) + >>> np.digitize(x,bins,right=True) + array([1, 2, 3, 4, 4]) + >>> np.digitize(x,bins,right=False) + array([1, 3, 3, 4, 5]) + """ + x = _nx.asarray(x) + bins = _nx.asarray(bins) + + # here for compatibility, searchsorted below is happy to take this + if np.issubdtype(x.dtype, _nx.complexfloating): + raise TypeError("x may not be complex") + + mono = _monotonicity(bins) + if mono == 0: + raise ValueError("bins must be monotonically increasing or decreasing") + + # this is backwards because the arguments below are swapped + side = 'left' if right else 'right' + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) + else: + return _nx.searchsorted(bins, x, side=side) diff --git a/MLPY/Lib/site-packages/numpy/lib/function_base.pyi b/MLPY/Lib/site-packages/numpy/lib/function_base.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e259f6522636cba026f477d5c32618e4d1774567 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/function_base.pyi @@ -0,0 +1,57 @@ +from typing import List + +from numpy import ( + vectorize as vectorize, +) + +from numpy.core.function_base import ( + add_newdoc as add_newdoc, +) + +from numpy.core.multiarray import ( + add_docstring as add_docstring, + bincount as bincount, +) +from numpy.core.umath import _add_newdoc_ufunc + +__all__: List[str] + +add_newdoc_ufunc = _add_newdoc_ufunc + +def rot90(m, k=..., axes = ...): ... +def flip(m, axis=...): ... +def iterable(y): ... +def average(a, axis=..., weights=..., returned=...): ... +def asarray_chkfinite(a, dtype=..., order=...): ... +def piecewise(x, condlist, funclist, *args, **kw): ... +def select(condlist, choicelist, default=...): ... +def copy(a, order=..., subok=...): ... +def gradient(f, *varargs, axis=..., edge_order=...): ... +def diff(a, n=..., axis=..., prepend = ..., append = ...): ... +def interp(x, xp, fp, left=..., right=..., period=...): ... +def angle(z, deg=...): ... +def unwrap(p, discont = ..., axis=..., *, period=...): ... +def sort_complex(a): ... +def trim_zeros(filt, trim=...): ... +def extract(condition, arr): ... +def place(arr, mask, vals): ... +def disp(mesg, device=..., linefeed=...): ... +def cov(m, y=..., rowvar=..., bias=..., ddof=..., fweights=..., aweights=..., *, dtype=...): ... +def corrcoef(x, y=..., rowvar=..., bias = ..., ddof = ..., *, dtype=...): ... +def blackman(M): ... +def bartlett(M): ... +def hanning(M): ... +def hamming(M): ... +def i0(x): ... +def kaiser(M, beta): ... +def sinc(x): ... +def msort(a): ... +def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def percentile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... +def quantile(a, q, axis=..., out=..., overwrite_input=..., interpolation=..., keepdims=...): ... +def trapz(y, x=..., dx=..., axis=...): ... +def meshgrid(*xi, copy=..., sparse=..., indexing=...): ... +def delete(arr, obj, axis=...): ... +def insert(arr, obj, values, axis=...): ... +def append(arr, values, axis=...): ... +def digitize(x, bins, right=...): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/histograms.py b/MLPY/Lib/site-packages/numpy/lib/histograms.py new file mode 100644 index 0000000000000000000000000000000000000000..ea25185a79b671f7670e9a7e806b3965915cd997 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/histograms.py @@ -0,0 +1,1129 @@ +""" +Histogram-related functions +""" +import contextlib +import functools +import operator +import warnings + +import numpy as np +from numpy.core import overrides + +__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + +# range is a keyword argument to many functions, so save the builtin so they can +# use it. +_range = range + + +def _ptp(x): + """Peak-to-peak value of x. + + This implementation avoids the problem of signed integer arrays having a + peak-to-peak value that cannot be represented with the array's data type. + This function returns an unsigned value for signed integer arrays. + """ + return _unsigned_subtract(x.max(), x.min()) + + +def _hist_bin_sqrt(x, range): + """ + Square root histogram bin estimator. + + Bin width is inversely proportional to the data size. Used by many + programs for its simplicity. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / np.sqrt(x.size) + + +def _hist_bin_sturges(x, range): + """ + Sturges histogram bin estimator. + + A very simplistic estimator based on the assumption of normality of + the data. This estimator has poor performance for non-normal data, + which becomes especially obvious for large data sets. The estimate + depends only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (np.log2(x.size) + 1.0) + + +def _hist_bin_rice(x, range): + """ + Rice histogram bin estimator. + + Another simple estimator with no normality assumption. It has better + performance for large data than Sturges, but tends to overestimate + the number of bins. The number of bins is proportional to the cube + root of data size (asymptotically optimal). The estimate depends + only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) + + +def _hist_bin_scott(x, range): + """ + Scott histogram bin estimator. + + The binwidth is proportional to the standard deviation of the data + and inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) + + +def _hist_bin_stone(x, range): + """ + Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). + + The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. + The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. + https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule + + This paper by Stone appears to be the origination of this rule. + http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : (float, float) + The lower and upper range of the bins. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + + n = x.size + ptp_x = _ptp(x) + if n <= 1 or ptp_x == 0: + return 0 + + def jhat(nbins): + hh = ptp_x / nbins + p_k = np.histogram(x, bins=nbins, range=range)[0] / n + return (2 - (n + 1) * p_k.dot(p_k)) / hh + + nbins_upper_bound = max(100, int(np.sqrt(n))) + nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) + if nbins == nbins_upper_bound: + warnings.warn("The number of bins estimated may be suboptimal.", + RuntimeWarning, stacklevel=3) + return ptp_x / nbins + + +def _hist_bin_doane(x, range): + """ + Doane's histogram bin estimator. + + Improved version of Sturges' formula which works better for + non-normal data. See + stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + if x.size > 2: + sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) + sigma = np.std(x) + if sigma > 0.0: + # These three operations add up to + # g1 = np.mean(((x - np.mean(x)) / sigma)**3) + # but use only one temp array instead of three + temp = x - np.mean(x) + np.true_divide(temp, sigma, temp) + np.power(temp, 3, temp) + g1 = np.mean(temp) + return _ptp(x) / (1.0 + np.log2(x.size) + + np.log2(1.0 + np.absolute(g1) / sg1)) + return 0.0 + + +def _hist_bin_fd(x, range): + """ + The Freedman-Diaconis histogram bin estimator. + + The Freedman-Diaconis rule uses interquartile range (IQR) to + estimate binwidth. It is considered a variation of the Scott rule + with more robustness as the IQR is less affected by outliers than + the standard deviation. However, the IQR depends on fewer points + than the standard deviation, so it is less accurate, especially for + long tailed distributions. + + If the IQR is 0, this function returns 0 for the bin width. + Binwidth is inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + iqr = np.subtract(*np.percentile(x, [75, 25])) + return 2.0 * iqr * x.size ** (-1.0 / 3.0) + + +def _hist_bin_auto(x, range): + """ + Histogram bin estimator that uses the minimum width of the + Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero. + If the bin width from the FD estimator is 0, the Sturges estimator is used. + + The FD estimator is usually the most robust method, but its width + estimate tends to be too large for small `x` and bad for data with limited + variance. The Sturges estimator is quite good for small (<1000) datasets + and is the default in the R language. This method gives good off-the-shelf + behaviour. + + .. versionchanged:: 1.15.0 + If there is limited variance the IQR can be 0, which results in the + FD bin width being 0 too. This is not a valid bin width, so + ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. + If the IQR is 0, it's unlikely any variance-based estimators will be of + use, so we revert to the Sturges estimator, which only uses the size of the + dataset in its calculation. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + + See Also + -------- + _hist_bin_fd, _hist_bin_sturges + """ + fd_bw = _hist_bin_fd(x, range) + sturges_bw = _hist_bin_sturges(x, range) + del range # unused + if fd_bw: + return min(fd_bw, sturges_bw) + else: + # limited variance, so we return a len dependent bw estimator + return sturges_bw + +# Private dict initialized at module load time +_hist_bin_selectors = {'stone': _hist_bin_stone, + 'auto': _hist_bin_auto, + 'doane': _hist_bin_doane, + 'fd': _hist_bin_fd, + 'rice': _hist_bin_rice, + 'scott': _hist_bin_scott, + 'sqrt': _hist_bin_sqrt, + 'sturges': _hist_bin_sturges} + + +def _ravel_and_check_weights(a, weights): + """ Check a and weights have matching shapes, and ravel both """ + a = np.asarray(a) + + # Ensure that the array is a "subtractable" dtype + if a.dtype == np.bool_: + warnings.warn("Converting input from {} to {} for compatibility." + .format(a.dtype, np.uint8), + RuntimeWarning, stacklevel=3) + a = a.astype(np.uint8) + + if weights is not None: + weights = np.asarray(weights) + if weights.shape != a.shape: + raise ValueError( + 'weights should have the same shape as a.') + weights = weights.ravel() + a = a.ravel() + return a, weights + + +def _get_outer_edges(a, range): + """ + Determine the outer bin edges to use, from either the data or the range + argument + """ + if range is not None: + first_edge, last_edge = range + if first_edge > last_edge: + raise ValueError( + 'max must be larger than min in range parameter.') + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + "supplied range of [{}, {}] is not finite".format(first_edge, last_edge)) + elif a.size == 0: + # handle empty arrays. Can't determine range, so use 0-1. + first_edge, last_edge = 0, 1 + else: + first_edge, last_edge = a.min(), a.max() + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge)) + + # expand empty range to avoid divide by zero + if first_edge == last_edge: + first_edge = first_edge - 0.5 + last_edge = last_edge + 0.5 + + return first_edge, last_edge + + +def _unsigned_subtract(a, b): + """ + Subtract two values where a >= b, and produce an unsigned result + + This is needed when finding the difference between the upper and lower + bound of an int16 histogram + """ + # coerce to a single type + signed_to_unsigned = { + np.byte: np.ubyte, + np.short: np.ushort, + np.intc: np.uintc, + np.int_: np.uint, + np.longlong: np.ulonglong + } + dt = np.result_type(a, b) + try: + dt = signed_to_unsigned[dt.type] + except KeyError: + return np.subtract(a, b, dtype=dt) + else: + # we know the inputs are integers, and we are deliberately casting + # signed to unsigned + return np.subtract(a, b, casting='unsafe', dtype=dt) + + +def _get_bin_edges(a, bins, range, weights): + """ + Computes the bins used internally by `histogram`. + + Parameters + ========== + a : ndarray + Ravelled data array + bins, range + Forwarded arguments from `histogram`. + weights : ndarray, optional + Ravelled weights array, or None + + Returns + ======= + bin_edges : ndarray + Array of bin edges + uniform_bins : (Number, Number, int): + The upper bound, lowerbound, and number of bins, used in the optimized + implementation of `histogram` that works on uniform bins. + """ + # parse the overloaded bins argument + n_equal_bins = None + bin_edges = None + + if isinstance(bins, str): + bin_name = bins + # if `bins` is a string for an automatic method, + # this will replace it with the number of bins calculated + if bin_name not in _hist_bin_selectors: + raise ValueError( + "{!r} is not a valid estimator for `bins`".format(bin_name)) + if weights is not None: + raise TypeError("Automated estimation of the number of " + "bins is not supported for weighted data") + + first_edge, last_edge = _get_outer_edges(a, range) + + # truncate the range if needed + if range is not None: + keep = (a >= first_edge) + keep &= (a <= last_edge) + if not np.logical_and.reduce(keep): + a = a[keep] + + if a.size == 0: + n_equal_bins = 1 + else: + # Do not call selectors on empty arrays + width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) + if width: + n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) + else: + # Width can be zero for some estimators, e.g. FD when + # the IQR of the data is zero. + n_equal_bins = 1 + + elif np.ndim(bins) == 0: + try: + n_equal_bins = operator.index(bins) + except TypeError as e: + raise TypeError( + '`bins` must be an integer, a string, or an array') from e + if n_equal_bins < 1: + raise ValueError('`bins` must be positive, when an integer') + + first_edge, last_edge = _get_outer_edges(a, range) + + elif np.ndim(bins) == 1: + bin_edges = np.asarray(bins) + if np.any(bin_edges[:-1] > bin_edges[1:]): + raise ValueError( + '`bins` must increase monotonically, when an array') + + else: + raise ValueError('`bins` must be 1d, when an array') + + if n_equal_bins is not None: + # gh-10322 means that type resolution rules are dependent on array + # shapes. To avoid this causing problems, we pick a type now and stick + # with it throughout. + bin_type = np.result_type(first_edge, last_edge, a) + if np.issubdtype(bin_type, np.integer): + bin_type = np.result_type(bin_type, float) + + # bin edges must be computed + bin_edges = np.linspace( + first_edge, last_edge, n_equal_bins + 1, + endpoint=True, dtype=bin_type) + return bin_edges, (first_edge, last_edge, n_equal_bins) + else: + return bin_edges, None + + +def _search_sorted_inclusive(a, v): + """ + Like `searchsorted`, but where the last item in `v` is placed on the right. + + In the context of a histogram, this makes the last bin edge inclusive + """ + return np.concatenate(( + a.searchsorted(v[:-1], 'left'), + a.searchsorted(v[-1:], 'right') + )) + + +def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_bin_edges_dispatcher) +def histogram_bin_edges(a, bins=10, range=None, weights=None): + r""" + Function to calculate only the edges of the bins used by the `histogram` + function. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines the bin edges, including the rightmost + edge, allowing for non-uniform bin widths. + + If `bins` is a string from the list below, `histogram_bin_edges` will use + the method chosen to calculate the optimal bin width and + consequently the number of bins (see `Notes` for more detail on + the estimators) from the data that falls within the requested + range. While the bin width will be optimal for the actual data + in the range, the number of bins will be computed to fill the + entire range, including the empty portions. For visualisation, + using the 'auto' option is suggested. Weighted data is not + supported for automated bin size selection. + + 'auto' + Maximum of the 'sturges' and 'fd' estimators. Provides good + all around performance. + + 'fd' (Freedman Diaconis Estimator) + Robust (resilient to outliers) estimator that takes into + account data variability and data size. + + 'doane' + An improved version of Sturges' estimator that works better + with non-normal datasets. + + 'scott' + Less robust estimator that that takes into account data + variability and data size. + + 'stone' + Estimator based on leave-one-out cross-validation estimate of + the integrated squared error. Can be regarded as a generalization + of Scott's rule. + + 'rice' + Estimator does not take variability into account, only data + size. Commonly overestimates number of bins required. + + 'sturges' + R's default method, only accounts for data size. Only + optimal for gaussian data and underestimates number of bins + for large non-gaussian datasets. + + 'sqrt' + Square root (of data size) estimator, used by Excel and + other programs for its speed and simplicity. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). This is currently not used by any of the bin estimators, + but may be in the future. + + Returns + ------- + bin_edges : array of dtype float + The edges to pass into `histogram` + + See Also + -------- + histogram + + Notes + ----- + The methods to estimate the optimal number of bins are well founded + in literature, and are inspired by the choices R provides for + histogram visualisation. Note that having the number of bins + proportional to :math:`n^{1/3}` is asymptotically optimal, which is + why it appears in most estimators. These are simply plug-in methods + that give good starting points for number of bins. In the equations + below, :math:`h` is the binwidth and :math:`n_h` is the number of + bins. All estimators that compute bin counts are recast to bin width + using the `ptp` of the data. The final bin count is obtained from + ``np.round(np.ceil(range / h))``. The final bin width is often less + than what is returned by the estimators below. + + 'auto' (maximum of the 'sturges' and 'fd' estimators) + A compromise to get a good value. For small datasets the Sturges + value will usually be chosen, while larger datasets will usually + default to FD. Avoids the overly conservative behaviour of FD + and Sturges for small and large datasets respectively. + Switchover point is usually :math:`a.size \approx 1000`. + + 'fd' (Freedman Diaconis Estimator) + .. math:: h = 2 \frac{IQR}{n^{1/3}} + + The binwidth is proportional to the interquartile range (IQR) + and inversely proportional to cube root of a.size. Can be too + conservative for small datasets, but is quite good for large + datasets. The IQR is very robust to outliers. + + 'scott' + .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} + + The binwidth is proportional to the standard deviation of the + data and inversely proportional to cube root of ``x.size``. Can + be too conservative for small datasets, but is quite good for + large datasets. The standard deviation is not very robust to + outliers. Values are very similar to the Freedman-Diaconis + estimator in the absence of outliers. + + 'rice' + .. math:: n_h = 2n^{1/3} + + The number of bins is only proportional to cube root of + ``a.size``. It tends to overestimate the number of bins and it + does not take into account data variability. + + 'sturges' + .. math:: n_h = \log _{2}n+1 + + The number of bins is the base 2 log of ``a.size``. This + estimator assumes normality of data and is too conservative for + larger, non-normal datasets. This is the default method in R's + ``hist`` method. + + 'doane' + .. math:: n_h = 1 + \log_{2}(n) + + \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) + + g_1 = mean[(\frac{x - \mu}{\sigma})^3] + + \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} + + An improved version of Sturges' formula that produces better + estimates for non-normal datasets. This estimator attempts to + account for the skew of the data. + + 'sqrt' + .. math:: n_h = \sqrt n + + The simplest and fastest estimator. Only takes into account the + data size. + + Examples + -------- + >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) + >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) + array([0. , 0.25, 0.5 , 0.75, 1. ]) + >>> np.histogram_bin_edges(arr, bins=2) + array([0. , 2.5, 5. ]) + + For consistency with histogram, an array of pre-computed bins is + passed through unmodified: + + >>> np.histogram_bin_edges(arr, [1, 2]) + array([1, 2]) + + This function allows one set of bins to be computed, and reused across + multiple histograms: + + >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') + >>> shared_bins + array([0., 1., 2., 3., 4., 5.]) + + >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) + >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) + >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) + + >>> hist_0; hist_1 + array([1, 1, 0, 1, 0]) + array([2, 0, 1, 1, 2]) + + Which gives more easily comparable results than using separate bins for + each histogram: + + >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') + >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') + >>> hist_0; hist_1 + array([1, 1, 1]) + array([2, 1, 1, 2]) + >>> bins_0; bins_1 + array([0., 1., 2., 3.]) + array([0. , 1.25, 2.5 , 3.75, 5. ]) + + """ + a, weights = _ravel_and_check_weights(a, weights) + bin_edges, _ = _get_bin_edges(a, bins, range, weights) + return bin_edges + + +def _histogram_dispatcher( + a, bins=None, range=None, normed=None, weights=None, density=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_dispatcher) +def histogram(a, bins=10, range=None, normed=None, weights=None, + density=None): + r""" + Compute the histogram of a dataset. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines a monotonically increasing array of bin edges, + including the rightmost edge, allowing for non-uniform bin widths. + + .. versionadded:: 1.11.0 + + If `bins` is a string, it defines the method used to calculate the + optimal bin width, as defined by `histogram_bin_edges`. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + normed : bool, optional + + .. deprecated:: 1.6.0 + + This is equivalent to the `density` argument, but produces incorrect + results for unequal bin widths. It should not be used. + + .. versionchanged:: 1.15.0 + DeprecationWarnings are actually emitted. + + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). If `density` is True, the weights are + normalized, so that the integral of the density over the range + remains 1. + density : bool, optional + If ``False``, the result will contain the number of samples in + each bin. If ``True``, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that the sum of the + histogram values will not be equal to 1 unless bins of unity + width are chosen; it is not a probability *mass* function. + + Overrides the ``normed`` keyword if given. + + Returns + ------- + hist : array + The values of the histogram. See `density` and `weights` for a + description of the possible semantics. + bin_edges : array of dtype float + Return the bin edges ``(length(hist)+1)``. + + + See Also + -------- + histogramdd, bincount, searchsorted, digitize, histogram_bin_edges + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, + if `bins` is:: + + [1, 2, 3, 4] + + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and + the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which + *includes* 4. + + + Examples + -------- + >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) + (array([0, 2, 1]), array([0, 1, 2, 3])) + >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) + (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) + (array([1, 4, 1]), array([0, 1, 2, 3])) + + >>> a = np.arange(5) + >>> hist, bin_edges = np.histogram(a, density=True) + >>> hist + array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + >>> hist.sum() + 2.4999999999999996 + >>> np.sum(hist * np.diff(bin_edges)) + 1.0 + + .. versionadded:: 1.11.0 + + Automated Bin Selection Methods example, using 2 peak random data + with 2000 points: + + >>> import matplotlib.pyplot as plt + >>> rng = np.random.RandomState(10) # deterministic random data + >>> a = np.hstack((rng.normal(size=1000), + ... rng.normal(loc=5, scale=2, size=1000))) + >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram + >>> plt.title("Histogram with 'auto' bins") + Text(0.5, 1.0, "Histogram with 'auto' bins") + >>> plt.show() + + """ + a, weights = _ravel_and_check_weights(a, weights) + + bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) + + # Histogram is an integer or a float array depending on the weights. + if weights is None: + ntype = np.dtype(np.intp) + else: + ntype = weights.dtype + + # We set a block size, as this allows us to iterate over chunks when + # computing histograms, to minimize memory usage. + BLOCK = 65536 + + # The fast path uses bincount, but that only works for certain types + # of weight + simple_weights = ( + weights is None or + np.can_cast(weights.dtype, np.double) or + np.can_cast(weights.dtype, complex) + ) + + if uniform_bins is not None and simple_weights: + # Fast algorithm for equal bins + # We now convert values of a to bin indices, under the assumption of + # equal bin widths (which is valid here). + first_edge, last_edge, n_equal_bins = uniform_bins + + # Initialize empty histogram + n = np.zeros(n_equal_bins, ntype) + + # Pre-compute histogram scaling factor + norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge) + + # We iterate over blocks here for two reasons: the first is that for + # large arrays, it is actually faster (for example for a 10^8 array it + # is 2x as fast) and it results in a memory footprint 3x lower in the + # limit of large arrays. + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i+BLOCK] + if weights is None: + tmp_w = None + else: + tmp_w = weights[i:i + BLOCK] + + # Only include values in the right range + keep = (tmp_a >= first_edge) + keep &= (tmp_a <= last_edge) + if not np.logical_and.reduce(keep): + tmp_a = tmp_a[keep] + if tmp_w is not None: + tmp_w = tmp_w[keep] + + # This cast ensures no type promotions occur below, which gh-10322 + # make unpredictable. Getting it wrong leads to precision errors + # like gh-8123. + tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) + + # Compute the bin indices, and for values that lie exactly on + # last_edge we need to subtract one + f_indices = _unsigned_subtract(tmp_a, first_edge) * norm + indices = f_indices.astype(np.intp) + indices[indices == n_equal_bins] -= 1 + + # The index computation is not guaranteed to give exactly + # consistent results within ~1 ULP of the bin edges. + decrement = tmp_a < bin_edges[indices] + indices[decrement] -= 1 + # The last bin includes the right edge. The other bins do not. + increment = ((tmp_a >= bin_edges[indices + 1]) + & (indices != n_equal_bins - 1)) + indices[increment] += 1 + + # We now compute the histogram using bincount + if ntype.kind == 'c': + n.real += np.bincount(indices, weights=tmp_w.real, + minlength=n_equal_bins) + n.imag += np.bincount(indices, weights=tmp_w.imag, + minlength=n_equal_bins) + else: + n += np.bincount(indices, weights=tmp_w, + minlength=n_equal_bins).astype(ntype) + else: + # Compute via cumulative histogram + cum_n = np.zeros(bin_edges.shape, ntype) + if weights is None: + for i in _range(0, len(a), BLOCK): + sa = np.sort(a[i:i+BLOCK]) + cum_n += _search_sorted_inclusive(sa, bin_edges) + else: + zero = np.zeros(1, dtype=ntype) + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i+BLOCK] + tmp_w = weights[i:i+BLOCK] + sorting_index = np.argsort(tmp_a) + sa = tmp_a[sorting_index] + sw = tmp_w[sorting_index] + cw = np.concatenate((zero, sw.cumsum())) + bin_index = _search_sorted_inclusive(sa, bin_edges) + cum_n += cw[bin_index] + + n = np.diff(cum_n) + + # density overrides the normed keyword + if density is not None: + if normed is not None: + # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) + warnings.warn( + "The normed argument is ignored when density is provided. " + "In future passing both will result in an error.", + DeprecationWarning, stacklevel=3) + normed = None + + if density: + db = np.array(np.diff(bin_edges), float) + return n/db/n.sum(), bin_edges + elif normed: + # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) + warnings.warn( + "Passing `normed=True` on non-uniform bins has always been " + "broken, and computes neither the probability density " + "function nor the probability mass function. " + "The result is only correct if the bins are uniform, when " + "density=True will produce the same result anyway. " + "The argument will be removed in a future version of " + "numpy.", + np.VisibleDeprecationWarning, stacklevel=3) + + # this normalization is incorrect, but + db = np.array(np.diff(bin_edges), float) + return n/(n*db).sum(), bin_edges + else: + if normed is not None: + # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) + warnings.warn( + "Passing normed=False is deprecated, and has no effect. " + "Consider passing the density argument instead.", + DeprecationWarning, stacklevel=3) + return n, bin_edges + + +def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None, + weights=None, density=None): + if hasattr(sample, 'shape'): # same condition as used in histogramdd + yield sample + else: + yield from sample + with contextlib.suppress(TypeError): + yield from bins + yield weights + + +@array_function_dispatch(_histogramdd_dispatcher) +def histogramdd(sample, bins=10, range=None, normed=None, weights=None, + density=None): + """ + Compute the multidimensional histogram of some data. + + Parameters + ---------- + sample : (N, D) array, or (D, N) array_like + The data to be histogrammed. + + Note the unusual interpretation of sample when an array_like: + + * When an array, each row is a coordinate in a D-dimensional space - + such as ``histogramdd(np.array([p1, p2, p3]))``. + * When an array_like, each element is the list of values for single + coordinate - such as ``histogramdd((X, Y, Z))``. + + The first form should be preferred. + + bins : sequence or int, optional + The bin specification: + + * A sequence of arrays describing the monotonically increasing bin + edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + + range : sequence, optional + A sequence of length D, each an optional (lower, upper) tuple giving + the outer bin edges to be used if the edges are not given explicitly in + `bins`. + An entry of None in the sequence results in the minimum and maximum + values being used for the corresponding dimension. + The default, None, is equivalent to passing a tuple of D None values. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_volume``. + normed : bool, optional + An alias for the density argument that behaves identically. To avoid + confusion with the broken normed argument to `histogram`, `density` + should be preferred. + weights : (N,) array_like, optional + An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. + Weights are normalized to 1 if normed is True. If normed is False, + the values of the returned histogram are equal to the sum of the + weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray + The multidimensional histogram of sample x. See normed and weights + for the different possible semantics. + edges : list + A list of D arrays describing the bin edges for each dimension. + + See Also + -------- + histogram: 1-D histogram + histogram2d: 2-D histogram + + Examples + -------- + >>> r = np.random.randn(100,3) + >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) + >>> H.shape, edges[0].size, edges[1].size, edges[2].size + ((5, 8, 4), 6, 9, 5) + + """ + + try: + # Sample is an ND-array. + N, D = sample.shape + except (AttributeError, ValueError): + # Sample is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + N, D = sample.shape + + nbin = np.empty(D, int) + edges = D*[None] + dedges = D*[None] + if weights is not None: + weights = np.asarray(weights) + + try: + M = len(bins) + if M != D: + raise ValueError( + 'The dimension of bins must be equal to the dimension of the ' + ' sample x.') + except TypeError: + # bins is an integer + bins = D*[bins] + + # normalize the range argument + if range is None: + range = (None,) * D + elif len(range) != D: + raise ValueError('range argument must have one entry per dimension') + + # Create edge arrays + for i in _range(D): + if np.ndim(bins[i]) == 0: + if bins[i] < 1: + raise ValueError( + '`bins[{}]` must be positive, when an integer'.format(i)) + smin, smax = _get_outer_edges(sample[:,i], range[i]) + try: + n = operator.index(bins[i]) + + except TypeError as e: + raise TypeError( + "`bins[{}]` must be an integer, when a scalar".format(i) + ) from e + + edges[i] = np.linspace(smin, smax, n + 1) + elif np.ndim(bins[i]) == 1: + edges[i] = np.asarray(bins[i]) + if np.any(edges[i][:-1] > edges[i][1:]): + raise ValueError( + '`bins[{}]` must be monotonically increasing, when an array' + .format(i)) + else: + raise ValueError( + '`bins[{}]` must be a scalar or 1d array'.format(i)) + + nbin[i] = len(edges[i]) + 1 # includes an outlier on each end + dedges[i] = np.diff(edges[i]) + + # Compute the bin number each sample falls into. + Ncount = tuple( + # avoid np.digitize to work around gh-11022 + np.searchsorted(edges[i], sample[:, i], side='right') + for i in _range(D) + ) + + # Using digitize, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right edge to be + # counted in the last bin, and not as an outlier. + for i in _range(D): + # Find which points are on the rightmost edge. + on_edge = (sample[:, i] == edges[i][-1]) + # Shift these points one bin to the left. + Ncount[i][on_edge] -= 1 + + # Compute the sample indices in the flattened histogram matrix. + # This raises an error if the array is too large. + xy = np.ravel_multi_index(Ncount, nbin) + + # Compute the number of repetitions in xy and assign it to the + # flattened histmat. + hist = np.bincount(xy, weights, minlength=nbin.prod()) + + # Shape into a proper matrix + hist = hist.reshape(nbin) + + # This preserves the (bad) behavior observed in gh-7845, for now. + hist = hist.astype(float, casting='safe') + + # Remove outliers (indices 0 and -1 for each dimension). + core = D*(slice(1, -1),) + hist = hist[core] + + # handle the aliasing normed argument + if normed is None: + if density is None: + density = False + elif density is None: + # an explicit normed argument was passed, alias it to the new name + density = normed + else: + raise TypeError("Cannot specify both 'normed' and 'density'") + + if density: + # calculate the probability density function + s = hist.sum() + for i in _range(D): + shape = np.ones(D, int) + shape[i] = nbin[i] - 2 + hist = hist / dedges[i].reshape(shape) + hist /= s + + if (hist.shape != nbin - 2).any(): + raise RuntimeError( + "Internal Shape Error") + return hist, edges diff --git a/MLPY/Lib/site-packages/numpy/lib/histograms.pyi b/MLPY/Lib/site-packages/numpy/lib/histograms.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1b6c856e948e7bb2c71ddff546a835d7cdb99eca --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/histograms.pyi @@ -0,0 +1,7 @@ +from typing import List + +__all__: List[str] + +def histogram_bin_edges(a, bins=..., range=..., weights=...): ... +def histogram(a, bins=..., range=..., normed=..., weights=..., density=...): ... +def histogramdd(sample, bins=..., range=..., normed=..., weights=..., density=...): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/index_tricks.py b/MLPY/Lib/site-packages/numpy/lib/index_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..b2645225014b27c50eb394d87e6875c5b4467bff --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/index_tricks.py @@ -0,0 +1,1013 @@ +import functools +import sys +import math +import warnings + +import numpy.core.numeric as _nx +from numpy.core.numeric import ( + asarray, ScalarType, array, alltrue, cumprod, arange, ndim +) +from numpy.core.numerictypes import find_common_type, issubdtype + +import numpy.matrixlib as matrixlib +from .function_base import diff +from numpy.core.multiarray import ravel_multi_index, unravel_index +from numpy.core.overrides import set_module +from numpy.core import overrides, linspace +from numpy.lib.stride_tricks import as_strided + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', + 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', + 'diag_indices', 'diag_indices_from' +] + + +def _ix__dispatcher(*args): + return args + + +@array_function_dispatch(_ix__dispatcher) +def ix_(*args): + """ + Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. + + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array + ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. + + Parameters + ---------- + args : 1-D sequences + Each sequence should be of integer or boolean type. + Boolean sequences will be interpreted as boolean masks for the + corresponding dimension (equivalent to passing in + ``np.nonzero(boolean_sequence)``). + + Returns + ------- + out : tuple of ndarrays + N arrays with N dimensions each, with N the number of input + sequences. Together these arrays form an open mesh. + + See Also + -------- + ogrid, mgrid, meshgrid + + Examples + -------- + >>> a = np.arange(10).reshape(2, 5) + >>> a + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> ixgrid = np.ix_([0, 1], [2, 4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + >>> ixgrid[0].shape, ixgrid[1].shape + ((2, 1), (1, 2)) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + >>> ixgrid = np.ix_([True, True], [2, 4]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + """ + out = [] + nd = len(args) + for k, new in enumerate(args): + if not isinstance(new, _nx.ndarray): + new = asarray(new) + if new.size == 0: + # Explicitly type empty arrays to avoid float default + new = new.astype(_nx.intp) + if new.ndim != 1: + raise ValueError("Cross index must be 1 dimensional") + if issubdtype(new.dtype, _nx.bool_): + new, = new.nonzero() + new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1)) + out.append(new) + return tuple(out) + + +class nd_grid: + """ + Construct a multi-dimensional "meshgrid". + + ``grid = nd_grid()`` creates an instance which will return a mesh-grid + when indexed. The dimension and number of the output arrays are equal + to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + If instantiated with an argument of ``sparse=True``, the mesh-grid is + open (or not fleshed out) so that only one-dimension of each returned + argument is greater than 1. + + Parameters + ---------- + sparse : bool, optional + Whether the grid is sparse or not. Default is False. + + Notes + ----- + Two instances of `nd_grid` are made available in the NumPy namespace, + `mgrid` and `ogrid`, approximately defined as:: + + mgrid = nd_grid(sparse=False) + ogrid = nd_grid(sparse=True) + + Users should use these pre-defined instances instead of using `nd_grid` + directly. + """ + + def __init__(self, sparse=False): + self.sparse = sparse + + def __getitem__(self, key): + try: + size = [] + typ = int + for k in range(len(key)): + step = key[k].step + start = key[k].start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + size.append(int(abs(step))) + typ = float + else: + size.append( + int(math.ceil((key[k].stop - start)/(step*1.0)))) + if (isinstance(step, (_nx.floating, float)) or + isinstance(start, (_nx.floating, float)) or + isinstance(key[k].stop, (_nx.floating, float))): + typ = float + if self.sparse: + nn = [_nx.arange(_x, dtype=_t) + for _x, _t in zip(size, (typ,)*len(size))] + else: + nn = _nx.indices(size, typ) + for k in range(len(size)): + step = key[k].step + start = key[k].start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = int(abs(step)) + if step != 1: + step = (key[k].stop - start)/float(step-1) + nn[k] = (nn[k]*step+start) + if self.sparse: + slobj = [_nx.newaxis]*len(size) + for k in range(len(size)): + slobj[k] = slice(None, None) + nn[k] = nn[k][tuple(slobj)] + slobj[k] = _nx.newaxis + return nn + except (IndexError, TypeError): + step = key.step + stop = key.stop + start = key.start + if start is None: + start = 0 + if isinstance(step, (_nx.complexfloating, complex)): + step = abs(step) + length = int(step) + if step != 1: + step = (key.stop-start)/float(step-1) + stop = key.stop + step + return _nx.arange(0, length, 1, float)*step + start + else: + return _nx.arange(start, stop, step) + + +class MGridClass(nd_grid): + """ + `nd_grid` instance which returns a dense multi-dimensional "meshgrid". + + An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense + (or fleshed out) mesh-grid when indexed, so that each returned argument + has the same shape. The dimensions and number of the output arrays are + equal to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid `ndarrays` all of the same dimensions + + See Also + -------- + numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects + ogrid : like mgrid but returns open (not fleshed out) mesh grids + r_ : array concatenator + + Examples + -------- + >>> np.mgrid[0:5,0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + + """ + + def __init__(self): + super().__init__(sparse=False) + + +mgrid = MGridClass() + + +class OGridClass(nd_grid): + """ + `nd_grid` instance which returns an open multi-dimensional "meshgrid". + + An instance of `numpy.lib.index_tricks.nd_grid` which returns an open + (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension + of each returned array is greater than 1. The dimension and number of the + output arrays are equal to the number of indexing dimensions. If the step + length is not a complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid + `ndarrays` with only one dimension not equal to 1 + + See Also + -------- + np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects + mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids + r_ : array concatenator + + Examples + -------- + >>> from numpy import ogrid + >>> ogrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + >>> ogrid[0:5,0:5] + [array([[0], + [1], + [2], + [3], + [4]]), array([[0, 1, 2, 3, 4]])] + + """ + + def __init__(self): + super().__init__(sparse=True) + + +ogrid = OGridClass() + + +class AxisConcatenator: + """ + Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see `r_`. + """ + # allow ma.mr_ to override this + concatenate = staticmethod(_nx.concatenate) + makemat = staticmethod(matrixlib.matrix) + + def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): + self.axis = axis + self.matrix = matrix + self.trans1d = trans1d + self.ndmin = ndmin + + def __getitem__(self, key): + # handle matrix builder syntax + if isinstance(key, str): + frame = sys._getframe().f_back + mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) + return mymat + + if not isinstance(key, tuple): + key = (key,) + + # copy attributes, since they can be overridden in the first argument + trans1d = self.trans1d + ndmin = self.ndmin + matrix = self.matrix + axis = self.axis + + objs = [] + scalars = [] + arraytypes = [] + scalartypes = [] + + for k, item in enumerate(key): + scalar = False + if isinstance(item, slice): + step = item.step + start = item.start + stop = item.stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + size = int(abs(step)) + newobj = linspace(start, stop, num=size) + else: + newobj = _nx.arange(start, stop, step) + if ndmin > 1: + newobj = array(newobj, copy=False, ndmin=ndmin) + if trans1d != -1: + newobj = newobj.swapaxes(-1, trans1d) + elif isinstance(item, str): + if k != 0: + raise ValueError("special directives must be the " + "first entry.") + if item in ('r', 'c'): + matrix = True + col = (item == 'c') + continue + if ',' in item: + vec = item.split(',') + try: + axis, ndmin = [int(x) for x in vec[:2]] + if len(vec) == 3: + trans1d = int(vec[2]) + continue + except Exception as e: + raise ValueError( + "unknown special directive {!r}".format(item) + ) from e + try: + axis = int(item) + continue + except (ValueError, TypeError) as e: + raise ValueError("unknown special directive") from e + elif type(item) in ScalarType: + newobj = array(item, ndmin=ndmin) + scalars.append(len(objs)) + scalar = True + scalartypes.append(newobj.dtype) + else: + item_ndim = ndim(item) + newobj = array(item, copy=False, subok=True, ndmin=ndmin) + if trans1d != -1 and item_ndim < ndmin: + k2 = ndmin - item_ndim + k1 = trans1d + if k1 < 0: + k1 += k2 + 1 + defaxes = list(range(ndmin)) + axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] + newobj = newobj.transpose(axes) + objs.append(newobj) + if not scalar and isinstance(newobj, _nx.ndarray): + arraytypes.append(newobj.dtype) + + # Ensure that scalars won't up-cast unless warranted + final_dtype = find_common_type(arraytypes, scalartypes) + if final_dtype is not None: + for k in scalars: + objs[k] = objs[k].astype(final_dtype) + + res = self.concatenate(tuple(objs), axis=axis) + + if matrix: + oldndim = res.ndim + res = self.makemat(res) + if oldndim == 1 and col: + res = res.T + return res + + def __len__(self): + return 0 + +# separate classes are used here instead of just making r_ = concatentor(0), +# etc. because otherwise we couldn't get the doc string to come out right +# in help(r_) + + +class RClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the first axis. + + This is a simple way to build up arrays quickly. There are two use cases. + + 1. If the index expression contains comma separated arrays, then stack + them along their first axis. + 2. If the index expression contains slice notation or scalars then create + a 1-D array with a range indicated by the slice notation. + + If slice notation is used, the syntax ``start:stop:step`` is equivalent + to ``np.arange(start, stop, step)`` inside of the brackets. However, if + ``step`` is an imaginary number (i.e. 100j) then its integer portion is + interpreted as a number-of-points desired and the start and stop are + inclusive. In other words ``start:stop:stepj`` is interpreted as + ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. + After expansion of slice notation, all comma separated sequences are + concatenated together. + + Optional character strings placed as the first element of the index + expression can be used to change the output. The strings 'r' or 'c' result + in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) + matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 + (column) matrix is produced. If the result is 2-D then both provide the + same matrix result. + + A string integer specifies which axis to stack multiple comma separated + arrays along. A string of two comma-separated integers allows indication + of the minimum number of dimensions to force each entry into as the + second integer (the axis to concatenate along is still the first integer). + + A string with three comma-separated integers allows specification of the + axis to concatenate along, the minimum number of dimensions to force the + entries to, and which axis should contain the start of the arrays which + are less than the specified number of dimensions. In other words the third + integer allows you to specify where the 1's should be placed in the shape + of the arrays that have their shapes upgraded. By default, they are placed + in the front of the shape tuple. The third argument allows you to specify + where the start of the array should be instead. Thus, a third argument of + '0' would place the 1's at the end of the array shape. Negative integers + specify where in the new shape tuple the last dimension of upgraded arrays + should be placed, so the default is '-1'. + + Parameters + ---------- + Not a function, so takes no parameters + + + Returns + ------- + A concatenated ndarray or matrix. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + c_ : Translates slice objects to concatenation along the second axis. + + Examples + -------- + >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] + array([1, 2, 3, ..., 4, 5, 6]) + >>> np.r_[-1:1:6j, [0]*3, 5, 6] + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) + + String integers specify the axis to concatenate along or the minimum + number of dimensions to force entries into. + + >>> a = np.array([[0, 1, 2], [3, 4, 5]]) + >>> np.r_['-1', a, a] # concatenate along last axis + array([[0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5]]) + >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.r_['0,2,0', [1,2,3], [4,5,6]] + array([[1], + [2], + [3], + [4], + [5], + [6]]) + >>> np.r_['1,2,0', [1,2,3], [4,5,6]] + array([[1, 4], + [2, 5], + [3, 6]]) + + Using 'r' or 'c' as a first string argument creates a matrix. + + >>> np.r_['r',[1,2,3], [4,5,6]] + matrix([[1, 2, 3, 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, 0) + + +r_ = RClass() + + +class CClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the second axis. + + This is short-hand for ``np.r_['-1,2,0', index expression]``, which is + useful because of its common occurrence. In particular, arrays will be + stacked along their last axis after being upgraded to at least 2-D with + 1's post-pended to the shape (column vectors made out of 1-D arrays). + + See Also + -------- + column_stack : Stack 1-D arrays as columns into a 2-D array. + r_ : For more detailed documentation. + + Examples + -------- + >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] + array([[1, 2, 3, ..., 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) + + +c_ = CClass() + + +@set_module('numpy') +class ndenumerate: + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values. + + Parameters + ---------- + arr : ndarray + Input array. + + See Also + -------- + ndindex, flatiter + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> for index, x in np.ndenumerate(a): + ... print(index, x) + (0, 0) 1 + (0, 1) 2 + (1, 0) 3 + (1, 1) 4 + + """ + + def __init__(self, arr): + self.iter = asarray(arr).flat + + def __next__(self): + """ + Standard iterator method, returns the index tuple and array value. + + Returns + ------- + coords : tuple of ints + The indices of the current iteration. + val : scalar + The array element of the current iteration. + + """ + return self.iter.coords, next(self.iter) + + def __iter__(self): + return self + + +@set_module('numpy') +class ndindex: + """ + An N-dimensional iterator object to index arrays. + + Given the shape of an array, an `ndindex` instance iterates over + the N-dimensional index of the array. At each iteration a tuple + of indices is returned, the last dimension is iterated over first. + + Parameters + ---------- + shape : ints, or a single tuple of ints + The size of each dimension of the array can be passed as + individual parameters or as the elements of a tuple. + + See Also + -------- + ndenumerate, flatiter + + Examples + -------- + # dimensions as individual arguments + >>> for index in np.ndindex(3, 2, 1): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + # same dimensions - but in a tuple (3, 2, 1) + >>> for index in np.ndindex((3, 2, 1)): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + """ + + def __init__(self, *shape): + if len(shape) == 1 and isinstance(shape[0], tuple): + shape = shape[0] + x = as_strided(_nx.zeros(1), shape=shape, + strides=_nx.zeros_like(shape)) + self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], + order='C') + + def __iter__(self): + return self + + def ndincr(self): + """ + Increment the multi-dimensional index by one. + + This method is for backward compatibility only: do not use. + + .. deprecated:: 1.20.0 + This method has been advised against since numpy 1.8.0, but only + started emitting DeprecationWarning as of this version. + """ + # NumPy 1.20.0, 2020-09-08 + warnings.warn( + "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead", + DeprecationWarning, stacklevel=2) + next(self) + + def __next__(self): + """ + Standard iterator method, updates the index and returns the index + tuple. + + Returns + ------- + val : tuple of ints + Returns a tuple containing the indices of the current + iteration. + + """ + next(self._it) + return self._it.multi_index + + +# You can do all this with slice() plus a few special objects, +# but there's a lot to remember. This version is simpler because +# it uses the standard array indexing syntax. +# +# Written by Konrad Hinsen +# last revision: 1999-7-23 +# +# Cosmetic changes by T. Oliphant 2001 +# +# + +class IndexExpression: + """ + A nicer way to build up index tuples for arrays. + + .. note:: + Use one of the two predefined instances `index_exp` or `s_` + rather than directly using `IndexExpression`. + + For any index combination, including slicing and axis insertion, + ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any + array `a`. However, ``np.index_exp[indices]`` can be used anywhere + in Python code and returns a tuple of slice objects that can be + used in the construction of complex index expressions. + + Parameters + ---------- + maketuple : bool + If True, always returns a tuple. + + See Also + -------- + index_exp : Predefined instance that always returns a tuple: + `index_exp = IndexExpression(maketuple=True)`. + s_ : Predefined instance without tuple conversion: + `s_ = IndexExpression(maketuple=False)`. + + Notes + ----- + You can do all this with `slice()` plus a few special objects, + but there's a lot to remember and this version is simpler because + it uses the standard array indexing syntax. + + Examples + -------- + >>> np.s_[2::2] + slice(2, None, 2) + >>> np.index_exp[2::2] + (slice(2, None, 2),) + + >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] + array([2, 4]) + + """ + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + +# End contribution from Konrad. + + +# The following functions complement those in twodim_base, but are +# applicable to N-dimensions. + + +def _fill_diagonal_dispatcher(a, val, wrap=None): + return (a,) + + +@array_function_dispatch(_fill_diagonal_dispatcher) +def fill_diagonal(a, val, wrap=False): + """Fill the main diagonal of the given array of any dimensionality. + + For an array `a` with ``a.ndim >= 2``, the diagonal is the list of + locations with indices ``a[i, ..., i]`` all identical. This function + modifies the input array in-place, it does not return a value. + + Parameters + ---------- + a : array, at least 2-D. + Array whose diagonal is to be filled, it gets modified in-place. + + val : scalar or array_like + Value(s) to write on the diagonal. If `val` is scalar, the value is + written along the diagonal. If array-like, the flattened `val` is + written along the diagonal, repeating if necessary to fill all + diagonal entries. + + wrap : bool + For tall matrices in NumPy version up to 1.6.2, the + diagonal "wrapped" after N columns. You can have this behavior + with this option. This affects only tall matrices. + + See also + -------- + diag_indices, diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + This functionality can be obtained via `diag_indices`, but internally + this version uses a much faster implementation that never constructs the + indices and uses simple slicing. + + Examples + -------- + >>> a = np.zeros((3, 3), int) + >>> np.fill_diagonal(a, 5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + The same function can operate on a 4-D array: + + >>> a = np.zeros((3, 3, 3, 3), int) + >>> np.fill_diagonal(a, 4) + + We only show a few blocks for clarity: + + >>> a[0, 0] + array([[4, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + >>> a[1, 1] + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 0]]) + >>> a[2, 2] + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 4]]) + + The wrap option affects only tall matrices: + + >>> # tall matrices no wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [0, 0, 0]]) + + >>> # tall matrices wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [4, 0, 0]]) + + >>> # wide matrices + >>> a = np.zeros((3, 5), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 0, 4, 0, 0]]) + + The anti-diagonal can be filled by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.zeros((3, 3), int); + >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip + >>> a + array([[0, 0, 1], + [0, 2, 0], + [3, 0, 0]]) + >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip + >>> a + array([[0, 0, 3], + [0, 2, 0], + [1, 0, 0]]) + + Note that the order in which the diagonal is filled varies depending + on the flip function. + """ + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + end = None + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + # This is needed to don't have tall matrix have the diagonal wrap. + if not wrap: + end = a.shape[1] * a.shape[1] + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not alltrue(diff(a.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + step = 1 + (cumprod(a.shape[:-1])).sum() + + # Write the value out into the diagonal. + a.flat[:end:step] = val + + +@set_module('numpy') +def diag_indices(n, ndim=2): + """ + Return the indices to access the main diagonal of an array. + + This returns a tuple of indices that can be used to access the main + diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape + (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for + ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` + for ``i = [0..n-1]``. + + Parameters + ---------- + n : int + The size, along each dimension, of the arrays for which the returned + indices can be used. + + ndim : int, optional + The number of dimensions. + + See Also + -------- + diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Create a set of indices to access the diagonal of a (4, 4) array: + + >>> di = np.diag_indices(4) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> a[di] = 100 + >>> a + array([[100, 1, 2, 3], + [ 4, 100, 6, 7], + [ 8, 9, 100, 11], + [ 12, 13, 14, 100]]) + + Now, we create indices to manipulate a 3-D array: + + >>> d3 = np.diag_indices(2, 3) + >>> d3 + (array([0, 1]), array([0, 1]), array([0, 1])) + + And use it to set the diagonal of an array of zeros to 1: + + >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + + """ + idx = arange(n) + return (idx,) * ndim + + +def _diag_indices_from(arr): + return (arr,) + + +@array_function_dispatch(_diag_indices_from) +def diag_indices_from(arr): + """ + Return the indices to access the main diagonal of an n-dimensional array. + + See `diag_indices` for full details. + + Parameters + ---------- + arr : array, at least 2-D + + See Also + -------- + diag_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not alltrue(diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/MLPY/Lib/site-packages/numpy/lib/index_tricks.pyi b/MLPY/Lib/site-packages/numpy/lib/index_tricks.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d7562b329ec740b04de700c530104c53e6abee26 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/index_tricks.pyi @@ -0,0 +1,194 @@ +import sys +from typing import ( + Any, + Tuple, + TypeVar, + Generic, + overload, + List, + Union, + Sequence, +) + +from numpy import ( + # Circumvent a naming conflict with `AxisConcatenator.matrix` + matrix as _Matrix, + ndenumerate as ndenumerate, + ndindex as ndindex, + ndarray, + dtype, + integer, + str_, + bytes_, + bool_, + int_, + float_, + complex_, + intp, + _OrderCF, + _ModeKind, +) +from numpy.typing import ( + # Arrays + ArrayLike, + _NestedSequence, + _RecursiveSequence, + NDArray, + _ArrayLikeInt, + + # DTypes + DTypeLike, + _SupportsDType, + + # Shapes + _ShapeLike, +) + +if sys.version_info >= (3, 8): + from typing import Literal, SupportsIndex +else: + from typing_extensions import Literal, SupportsIndex + +_T = TypeVar("_T") +_DType = TypeVar("_DType", bound=dtype[Any]) +_BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) +_TupType = TypeVar("_TupType", bound=Tuple[Any, ...]) +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + +__all__: List[str] + +@overload +def unravel_index( # type: ignore[misc] + indices: Union[int, integer[Any]], + shape: _ShapeLike, + order: _OrderCF = ... +) -> Tuple[intp, ...]: ... +@overload +def unravel_index( + indices: _ArrayLikeInt, + shape: _ShapeLike, + order: _OrderCF = ... +) -> Tuple[NDArray[intp], ...]: ... + +@overload +def ravel_multi_index( # type: ignore[misc] + multi_index: Sequence[Union[int, integer[Any]]], + dims: _ShapeLike, + mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., + order: _OrderCF = ... +) -> intp: ... +@overload +def ravel_multi_index( + multi_index: Sequence[_ArrayLikeInt], + dims: _ShapeLike, + mode: Union[_ModeKind, Tuple[_ModeKind, ...]] = ..., + order: _OrderCF = ... +) -> NDArray[intp]: ... + +@overload +def ix_(*args: _NestedSequence[_SupportsDType[_DType]]) -> Tuple[ndarray[Any, _DType], ...]: ... +@overload +def ix_(*args: _NestedSequence[str]) -> Tuple[NDArray[str_], ...]: ... +@overload +def ix_(*args: _NestedSequence[bytes]) -> Tuple[NDArray[bytes_], ...]: ... +@overload +def ix_(*args: _NestedSequence[bool]) -> Tuple[NDArray[bool_], ...]: ... +@overload +def ix_(*args: _NestedSequence[int]) -> Tuple[NDArray[int_], ...]: ... +@overload +def ix_(*args: _NestedSequence[float]) -> Tuple[NDArray[float_], ...]: ... +@overload +def ix_(*args: _NestedSequence[complex]) -> Tuple[NDArray[complex_], ...]: ... +@overload +def ix_(*args: _RecursiveSequence) -> Tuple[NDArray[Any], ...]: ... + +class nd_grid(Generic[_BoolType]): + sparse: _BoolType + def __init__(self, sparse: _BoolType = ...) -> None: ... + @overload + def __getitem__( + self: nd_grid[Literal[False]], + key: Union[slice, Sequence[slice]], + ) -> NDArray[Any]: ... + @overload + def __getitem__( + self: nd_grid[Literal[True]], + key: Union[slice, Sequence[slice]], + ) -> List[NDArray[Any]]: ... + +class MGridClass(nd_grid[Literal[False]]): + def __init__(self) -> None: ... + +mgrid: MGridClass + +class OGridClass(nd_grid[Literal[True]]): + def __init__(self) -> None: ... + +ogrid: OGridClass + +class AxisConcatenator: + axis: int + matrix: bool + ndmin: int + trans1d: int + def __init__( + self, + axis: int = ..., + matrix: bool = ..., + ndmin: int = ..., + trans1d: int = ..., + ) -> None: ... + @staticmethod + @overload + def concatenate( # type: ignore[misc] + *a: ArrayLike, axis: SupportsIndex = ..., out: None = ... + ) -> NDArray[Any]: ... + @staticmethod + @overload + def concatenate( + *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ... + ) -> _ArrayType: ... + @staticmethod + def makemat( + data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ... + ) -> _Matrix: ... + + # TODO: Sort out this `__getitem__` method + def __getitem__(self, key: Any) -> Any: ... + +class RClass(AxisConcatenator): + axis: Literal[0] + matrix: Literal[False] + ndmin: Literal[1] + trans1d: Literal[-1] + def __init__(self) -> None: ... + +r_: RClass + +class CClass(AxisConcatenator): + axis: Literal[-1] + matrix: Literal[False] + ndmin: Literal[2] + trans1d: Literal[0] + def __init__(self) -> None: ... + +c_: CClass + +class IndexExpression(Generic[_BoolType]): + maketuple: _BoolType + def __init__(self, maketuple: _BoolType) -> None: ... + @overload + def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc] + @overload + def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> Tuple[_T]: ... + @overload + def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ... + +index_exp: IndexExpression[Literal[True]] +s_: IndexExpression[Literal[False]] + +def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ... +def diag_indices(n: int, ndim: int = ...) -> Tuple[NDArray[int_], ...]: ... +def diag_indices_from(arr: ArrayLike) -> Tuple[NDArray[int_], ...]: ... + +# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex` diff --git a/MLPY/Lib/site-packages/numpy/lib/mixins.py b/MLPY/Lib/site-packages/numpy/lib/mixins.py new file mode 100644 index 0000000000000000000000000000000000000000..bc38c67d1c12001bb78a4566d73cba6a4b7f5cb4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/mixins.py @@ -0,0 +1,176 @@ +"""Mixin classes for custom array types that don't inherit from ndarray.""" +from numpy.core import umath as um + + +__all__ = ['NDArrayOperatorsMixin'] + + +def _disables_array_ufunc(obj): + """True when __array_ufunc__ is set to None.""" + try: + return obj.__array_ufunc__ is None + except AttributeError: + return False + + +def _binary_method(ufunc, name): + """Implement a forward binary method with a ufunc, e.g., __add__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(self, other) + func.__name__ = '__{}__'.format(name) + return func + + +def _reflected_binary_method(ufunc, name): + """Implement a reflected binary method with a ufunc, e.g., __radd__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(other, self) + func.__name__ = '__r{}__'.format(name) + return func + + +def _inplace_binary_method(ufunc, name): + """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" + def func(self, other): + return ufunc(self, other, out=(self,)) + func.__name__ = '__i{}__'.format(name) + return func + + +def _numeric_methods(ufunc, name): + """Implement forward, reflected and inplace binary methods with a ufunc.""" + return (_binary_method(ufunc, name), + _reflected_binary_method(ufunc, name), + _inplace_binary_method(ufunc, name)) + + +def _unary_method(ufunc, name): + """Implement a unary special method with a ufunc.""" + def func(self): + return ufunc(self) + func.__name__ = '__{}__'.format(name) + return func + + +class NDArrayOperatorsMixin: + """Mixin defining all operator special methods using __array_ufunc__. + + This class implements the special methods for almost all of Python's + builtin operators defined in the `operator` module, including comparisons + (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by + deferring to the ``__array_ufunc__`` method, which subclasses must + implement. + + It is useful for writing classes that do not inherit from `numpy.ndarray`, + but that should support arithmetic and numpy universal functions like + arrays as described in `A Mechanism for Overriding Ufuncs + `_. + + As an trivial example, consider this implementation of an ``ArrayLike`` + class that simply wraps a NumPy array and ensures that the result of any + arithmetic operation is also an ``ArrayLike`` object:: + + class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.value) + + In interactions between ``ArrayLike`` objects and numbers or numpy arrays, + the result is always another ``ArrayLike``: + + >>> x = ArrayLike([1, 2, 3]) + >>> x - 1 + ArrayLike(array([0, 1, 2])) + >>> 1 - x + ArrayLike(array([ 0, -1, -2])) + >>> np.arange(3) - x + ArrayLike(array([-1, -1, -1])) + >>> x - np.arange(3) + ArrayLike(array([1, 1, 1])) + + Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations + with arbitrary, unrecognized types. This ensures that interactions with + ArrayLike preserve a well-defined casting hierarchy. + + .. versionadded:: 1.13 + """ + # Like np.ndarray, this mixin class implements "Option 1" from the ufunc + # overrides NEP. + + # comparisons don't have reflected and in-place versions + __lt__ = _binary_method(um.less, 'lt') + __le__ = _binary_method(um.less_equal, 'le') + __eq__ = _binary_method(um.equal, 'eq') + __ne__ = _binary_method(um.not_equal, 'ne') + __gt__ = _binary_method(um.greater, 'gt') + __ge__ = _binary_method(um.greater_equal, 'ge') + + # numeric methods + __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') + __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') + __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') + __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( + um.matmul, 'matmul') + # Python 3 does not use __div__, __rdiv__, or __idiv__ + __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( + um.true_divide, 'truediv') + __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( + um.floor_divide, 'floordiv') + __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') + __divmod__ = _binary_method(um.divmod, 'divmod') + __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') + # __idivmod__ does not exist + # TODO: handle the optional third argument for __pow__? + __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') + __lshift__, __rlshift__, __ilshift__ = _numeric_methods( + um.left_shift, 'lshift') + __rshift__, __rrshift__, __irshift__ = _numeric_methods( + um.right_shift, 'rshift') + __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') + __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') + __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') + + # unary methods + __neg__ = _unary_method(um.negative, 'neg') + __pos__ = _unary_method(um.positive, 'pos') + __abs__ = _unary_method(um.absolute, 'abs') + __invert__ = _unary_method(um.invert, 'invert') diff --git a/MLPY/Lib/site-packages/numpy/lib/mixins.pyi b/MLPY/Lib/site-packages/numpy/lib/mixins.pyi new file mode 100644 index 0000000000000000000000000000000000000000..895860200c4a489a4803bf6e320b7ddc479b542c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/mixins.pyi @@ -0,0 +1,62 @@ +from typing import List +from abc import ABCMeta, abstractmethod + +__all__: List[str] + +# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, +# even though it's reliant on subclasses implementing `__array_ufunc__` + +class NDArrayOperatorsMixin(metaclass=ABCMeta): + @abstractmethod + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... + def __lt__(self, other): ... + def __le__(self, other): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __gt__(self, other): ... + def __ge__(self, other): ... + def __add__(self, other): ... + def __radd__(self, other): ... + def __iadd__(self, other): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __isub__(self, other): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __imul__(self, other): ... + def __matmul__(self, other): ... + def __rmatmul__(self, other): ... + def __imatmul__(self, other): ... + def __truediv__(self, other): ... + def __rtruediv__(self, other): ... + def __itruediv__(self, other): ... + def __floordiv__(self, other): ... + def __rfloordiv__(self, other): ... + def __ifloordiv__(self, other): ... + def __mod__(self, other): ... + def __rmod__(self, other): ... + def __imod__(self, other): ... + def __divmod__(self, other): ... + def __rdivmod__(self, other): ... + def __pow__(self, other): ... + def __rpow__(self, other): ... + def __ipow__(self, other): ... + def __lshift__(self, other): ... + def __rlshift__(self, other): ... + def __ilshift__(self, other): ... + def __rshift__(self, other): ... + def __rrshift__(self, other): ... + def __irshift__(self, other): ... + def __and__(self, other): ... + def __rand__(self, other): ... + def __iand__(self, other): ... + def __xor__(self, other): ... + def __rxor__(self, other): ... + def __ixor__(self, other): ... + def __or__(self, other): ... + def __ror__(self, other): ... + def __ior__(self, other): ... + def __neg__(self): ... + def __pos__(self): ... + def __abs__(self): ... + def __invert__(self): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/nanfunctions.py b/MLPY/Lib/site-packages/numpy/lib/nanfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..47e9d620f806537b1951e60c41152ccfd1316a13 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/nanfunctions.py @@ -0,0 +1,1676 @@ +""" +Functions that ignore NaN. + +Functions +--------- + +- `nanmin` -- minimum non-NaN value +- `nanmax` -- maximum non-NaN value +- `nanargmin` -- index of minimum non-NaN value +- `nanargmax` -- index of maximum non-NaN value +- `nansum` -- sum of non-NaN values +- `nanprod` -- product of non-NaN values +- `nancumsum` -- cumulative sum of non-NaN values +- `nancumprod` -- cumulative product of non-NaN values +- `nanmean` -- mean of non-NaN values +- `nanvar` -- variance of non-NaN values +- `nanstd` -- standard deviation of non-NaN values +- `nanmedian` -- median of non-NaN values +- `nanquantile` -- qth quantile of non-NaN values +- `nanpercentile` -- qth percentile of non-NaN values + +""" +import functools +import warnings +import numpy as np +from numpy.lib import function_base +from numpy.core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', + 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', + 'nancumsum', 'nancumprod', 'nanquantile' + ] + + +def _nan_mask(a, out=None): + """ + Parameters + ---------- + a : array-like + Input array with at least 1 dimension. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output and will prevent the allocation of a new array. + + Returns + ------- + y : bool ndarray or True + A bool array where ``np.nan`` positions are marked with ``False`` + and other positions are marked with ``True``. If the type of ``a`` + is such that it can't possibly contain ``np.nan``, returns ``True``. + """ + # we assume that a is an array for this private function + + if a.dtype.kind not in 'fc': + return True + + y = np.isnan(a, out=out) + y = np.invert(y, out=y) + return y + +def _replace_nan(a, val): + """ + If `a` is of inexact type, make a copy of `a`, replace NaNs with + the `val` value, and return the copy together with a boolean mask + marking the locations where NaNs were present. If `a` is not of + inexact type, do nothing and return `a` together with a mask of None. + + Note that scalars will end up as array scalars, which is important + for using the result as the value of the out argument in some + operations. + + Parameters + ---------- + a : array-like + Input array. + val : float + NaN values are set to val before doing the operation. + + Returns + ------- + y : ndarray + If `a` is of inexact type, return a copy of `a` with the NaNs + replaced by the fill value, otherwise return `a`. + mask: {bool, None} + If `a` is of inexact type, return a boolean mask marking locations of + NaNs, otherwise return None. + + """ + a = np.asanyarray(a) + + if a.dtype == np.object_: + # object arrays do not support `isnan` (gh-9009), so make a guess + mask = np.not_equal(a, a, dtype=bool) + elif issubclass(a.dtype.type, np.inexact): + mask = np.isnan(a) + else: + mask = None + + if mask is not None: + a = np.array(a, subok=True, copy=True) + np.copyto(a, val, where=mask) + + return a, mask + + +def _copyto(a, val, mask): + """ + Replace values in `a` with NaN where `mask` is True. This differs from + copyto in that it will deal with the case where `a` is a numpy scalar. + + Parameters + ---------- + a : ndarray or numpy scalar + Array or numpy scalar some of whose values are to be replaced + by val. + val : numpy scalar + Value used a replacement. + mask : ndarray, scalar + Boolean array. Where True the corresponding element of `a` is + replaced by `val`. Broadcasts. + + Returns + ------- + res : ndarray, scalar + Array with elements replaced or scalar `val`. + + """ + if isinstance(a, np.ndarray): + np.copyto(a, val, where=mask, casting='unsafe') + else: + a = a.dtype.type(val) + return a + + +def _remove_nan_1d(arr1d, overwrite_input=False): + """ + Equivalent to arr1d[~arr1d.isnan()], but in a different order + + Presumably faster as it incurs fewer copies + + Parameters + ---------- + arr1d : ndarray + Array to remove nans from + overwrite_input : bool + True if `arr1d` can be modified in place + + Returns + ------- + res : ndarray + Array with nan elements removed + overwrite_input : bool + True if `res` can be modified in place, given the constraint on the + input + """ + + c = np.isnan(arr1d) + s = np.nonzero(c)[0] + if s.size == arr1d.size: + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=5) + return arr1d[:0], True + elif s.size == 0: + return arr1d, overwrite_input + else: + if not overwrite_input: + arr1d = arr1d.copy() + # select non-nans at end of array + enonan = arr1d[-s.size:][~c[-s.size:]] + # fill nans in beginning of array with non-nans of end + arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], True + + +def _divide_by_count(a, b, out=None): + """ + Compute a/b ignoring invalid results. If `a` is an array the division + is done in place. If `a` is a scalar, then its type is preserved in the + output. If out is None, then then a is used instead so that the + division is in place. Note that this is only called with `a` an inexact + type. + + Parameters + ---------- + a : {ndarray, numpy scalar} + Numerator. Expected to be of inexact type but not checked. + b : {ndarray, numpy scalar} + Denominator. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + + Returns + ------- + ret : {ndarray, numpy scalar} + The return value is a/b. If `a` was an ndarray the division is done + in place. If `a` is a numpy scalar, the division preserves its type. + + """ + with np.errstate(invalid='ignore', divide='ignore'): + if isinstance(a, np.ndarray): + if out is None: + return np.divide(a, b, out=a, casting='unsafe') + else: + return np.divide(a, b, out=out, casting='unsafe') + else: + if out is None: + return a.dtype.type(a / b) + else: + # This is questionable, but currently a numpy scalar can + # be output to a zero dimensional array. + return np.divide(a, b, out=out, casting='unsafe') + + +def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmin_dispatcher) +def nanmin(a, axis=None, out=None, keepdims=np._NoValue): + """ + Return minimum of an array or minimum along an axis, ignoring any NaNs. + When all-NaN slices are encountered a ``RuntimeWarning`` is raised and + Nan is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose minimum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the minimum is computed. The default is to compute + the minimum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `min` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + + Returns + ------- + nanmin : ndarray + An array with the same shape as `a`, with the specified axis + removed. If `a` is a 0-d array, or if axis is None, an ndarray + scalar is returned. The same dtype as `a` is returned. + + See Also + -------- + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + amin : + The minimum value of an array along a given axis, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amax, fmax, maximum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.min. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + 1.0 + >>> np.nanmin(a, axis=0) + array([1., 2.]) + >>> np.nanmin(a, axis=1) + array([1., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmin([1, 2, np.nan, np.inf]) + 1.0 + >>> np.nanmin([1, 2, np.nan, np.NINF]) + -inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is np.ndarray and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) + res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=3) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, +np.inf) + res = np.amin(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=3) + return res + + +def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmax_dispatcher) +def nanmax(a, axis=None, out=None, keepdims=np._NoValue): + """ + Return the maximum of an array or maximum along an axis, ignoring any + NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is + raised and NaN is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose maximum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the maximum is computed. The default is to compute + the maximum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `max` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + + Returns + ------- + nanmax : ndarray + An array with the same shape as `a`, with the specified axis removed. + If `a` is a 0-d array, or if axis is None, an ndarray scalar is + returned. The same dtype as `a` is returned. + + See Also + -------- + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + amax : + The maximum value of an array along a given axis, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amin, fmin, minimum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.max. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + 3.0 + >>> np.nanmax(a, axis=0) + array([3., 2.]) + >>> np.nanmax(a, axis=1) + array([2., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmax([1, 2, np.nan, np.NINF]) + 2.0 + >>> np.nanmax([1, 2, np.nan, np.inf]) + inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if type(a) is np.ndarray and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) + res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=3) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, -np.inf) + res = np.amax(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=3) + return res + + +def _nanargmin_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_nanargmin_dispatcher) +def nanargmin(a, axis=None): + """ + Return the indices of the minimum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results + cannot be trusted if a slice contains only NaNs and Infs. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmin, nanargmax + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmin(a) + 0 + >>> np.nanargmin(a) + 2 + >>> np.nanargmin(a, axis=0) + array([1, 1]) + >>> np.nanargmin(a, axis=1) + array([1, 0]) + + """ + a, mask = _replace_nan(a, np.inf) + res = np.argmin(a, axis=axis) + if mask is not None: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + return res + + +def _nanargmax_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_nanargmax_dispatcher) +def nanargmax(a, axis=None): + """ + Return the indices of the maximum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the + results cannot be trusted if a slice contains only NaNs and -Infs. + + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmax, nanargmin + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + 0 + >>> np.nanargmax(a) + 1 + >>> np.nanargmax(a, axis=0) + array([1, 0]) + >>> np.nanargmax(a, axis=1) + array([1, 1]) + + """ + a, mask = _replace_nan(a, -np.inf) + res = np.argmax(a, axis=axis) + if mask is not None: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + return res + + +def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nansum_dispatcher) +def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. + + In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or + empty. In later versions zero is returned. + + Parameters + ---------- + a : array_like + Array containing numbers whose sum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the sum is computed. The default is to compute the + sum of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + + .. versionadded:: 1.8.0 + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + + Returns + ------- + nansum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.sum : Sum across array propagating NaNs. + isnan : Show which elements are NaN. + isfinite : Show which elements are not NaN or +/-inf. + + Notes + ----- + If both positive and negative infinity are present, the sum will be Not + A Number (NaN). + + Examples + -------- + >>> np.nansum(1) + 1 + >>> np.nansum([1]) + 1 + >>> np.nansum([1, np.nan]) + 1.0 + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a) + 3.0 + >>> np.nansum(a, axis=0) + array([2., 1.]) + >>> np.nansum([1, np.nan, np.inf]) + inf + >>> np.nansum([1, np.nan, np.NINF]) + -inf + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + nan + + """ + a, mask = _replace_nan(a, 0) + return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + + +def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanprod_dispatcher) +def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the product of array elements over a given axis treating Not a + Numbers (NaNs) as ones. + + One is returned for slices that are all-NaN or empty. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose product is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the product is computed. The default is to compute + the product of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + keepdims : bool, optional + If True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will + broadcast correctly against the original `arr`. + + Returns + ------- + nanprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.prod : Product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nanprod(1) + 1 + >>> np.nanprod([1]) + 1 + >>> np.nanprod([1, np.nan]) + 1.0 + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + 6.0 + >>> np.nanprod(a, axis=0) + array([3., 2.]) + + """ + a, mask = _replace_nan(a, 1) + return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + + +def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumsum_dispatcher) +def nancumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are + encountered and leading NaNs are replaced by zeros. + + Zeros are returned for slices that are all-NaN or empty. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` for + more details. + + Returns + ------- + nancumsum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.cumsum : Cumulative sum across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nancumsum(1) + array([1]) + >>> np.nancumsum([1]) + array([1]) + >>> np.nancumsum([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumsum(a) + array([1., 3., 6., 6.]) + >>> np.nancumsum(a, axis=0) + array([[1., 2.], + [4., 2.]]) + >>> np.nancumsum(a, axis=1) + array([[1., 3.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 0) + return np.cumsum(a, axis=axis, dtype=dtype, out=out) + + +def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumprod_dispatcher) +def nancumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of array elements over a given axis treating Not a + Numbers (NaNs) as one. The cumulative product does not change when NaNs are + encountered and leading NaNs are replaced by ones. + + Ones are returned for slices that are all-NaN or empty. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + nancumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.cumprod : Cumulative product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nancumprod(1) + array([1]) + >>> np.nancumprod([1]) + array([1]) + >>> np.nancumprod([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumprod(a) + array([1., 2., 6., 6.]) + >>> np.nancumprod(a, axis=0) + array([[1., 2.], + [3., 2.]]) + >>> np.nancumprod(a, axis=1) + array([[1., 2.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 1) + return np.cumprod(a, axis=axis, dtype=dtype, out=out) + + +def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmean_dispatcher) +def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Compute the arithmetic mean along the specified axis, ignoring NaNs. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for inexact inputs, it is the same as the input + dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. Nan is + returned for slices that contain only NaNs. + + See Also + -------- + average : Weighted average + mean : Arithmetic mean taken while not ignoring NaNs + var, nanvar + + Notes + ----- + The arithmetic mean is the sum of the non-NaN elements along the axis + divided by the number of non-NaN elements. + + Note that for floating-point input, the mean is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32`. Specifying a + higher-precision accumulator using the `dtype` keyword can alleviate + this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanmean(a) + 2.6666666666666665 + >>> np.nanmean(a, axis=0) + array([2., 4.]) + >>> np.nanmean(a, axis=1) + array([1., 3.5]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims) + tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + avg = _divide_by_count(tot, cnt, out=out) + + isbad = (cnt == 0) + if isbad.any(): + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=3) + # NaN is the only possible bad value, so no further + # action is needed to handle bad results. + return avg + + +def _nanmedian1d(arr1d, overwrite_input=False): + """ + Private function for rank 1 arrays. Compute the median ignoring NaNs. + See nanmedian for parameter usage + """ + arr1d_parsed, overwrite_input = _remove_nan_1d( + arr1d, overwrite_input=overwrite_input, + ) + + if arr1d_parsed.size == 0: + # Ensure that a nan-esque scalar of the appropiate type (and unit) + # is returned for `timedelta64` and `complexfloating` + return arr1d[-1] + + return np.median(arr1d_parsed, overwrite_input=overwrite_input) + + +def _nanmedian(a, axis=None, out=None, overwrite_input=False): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanmedian for parameter usage + + """ + if axis is None or a.ndim == 1: + part = a.ravel() + if out is None: + return _nanmedian1d(part, overwrite_input) + else: + out[...] = _nanmedian1d(part, overwrite_input) + return out + else: + # for small medians use sort + indexing which is still faster than + # apply_along_axis + # benchmarked with shuffled (50, 50, x) containing a few NaN + if a.shape[axis] < 600: + return _nanmedian_small(a, axis, out, overwrite_input) + result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) + if out is not None: + out[...] = result + return result + + +def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): + """ + sort + indexing median, faster for small medians along multiple + dimensions due to the high overhead of apply_along_axis + + see nanmedian for parameter usage + """ + a = np.ma.masked_array(a, np.isnan(a)) + m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) + for i in range(np.count_nonzero(m.mask.ravel())): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=4) + + fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan + if out is not None: + out[...] = m.filled(fill_value) + return out + return m.filled(fill_value) + + +def _nanmedian_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmedian_dispatcher) +def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): + """ + Compute the median along the specified axis, while ignoring NaNs. + + Returns the median of the array elements. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, median, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., + ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two + middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) + >>> a[0, 1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.median(a) + nan + >>> np.nanmedian(a) + 3.0 + >>> np.nanmedian(a, axis=0) + array([6.5, 2. , 2.5]) + >>> np.median(a, axis=1) + array([nan, 2.]) + >>> b = a.copy() + >>> np.nanmedian(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.nanmedian(b, axis=None, overwrite_input=True) + 3.0 + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + # apply_along_axis in _nanmedian doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + r, k = function_base._ureduce(a, func=_nanmedian, axis=axis, out=out, + overwrite_input=overwrite_input) + if keepdims and keepdims is not np._NoValue: + return r.reshape(k) + else: + return r + + +def _nanpercentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + interpolation=None, keepdims=None): + return (a, q, out) + + +@array_function_dispatch(_nanpercentile_dispatcher) +def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=np._NoValue): + """ + Compute the qth percentile of the data along the specified axis, + while ignoring nan values. + + Returns the qth percentile(s) of the array elements. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be between + 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to + use when the desired percentile lies between two data points + ``i < j``: + + * 'linear': ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * 'lower': ``i``. + * 'higher': ``j``. + * 'nearest': ``i`` or ``j``, whichever is nearest. + * 'midpoint': ``(i + j) / 2``. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + nanmean + nanmedian : equivalent to ``nanpercentile(..., 50)`` + percentile, median, mean + nanquantile : equivalent to nanpercentile, but with q in the range [0, 1]. + + Notes + ----- + Given a vector ``V`` of length ``N``, the ``q``-th percentile of + ``V`` is the value ``q/100`` of the way from the minimum to the + maximum in a sorted copy of ``V``. The values and distances of + the two nearest neighbors as well as the `interpolation` parameter + will determine the percentile if the normalized ranking does not + match the location of ``q`` exactly. This function is the same as + the median if ``q=50``, the same as the minimum if ``q=0`` and the + same as the maximum if ``q=100``. + + Examples + -------- + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.percentile(a, 50) + nan + >>> np.nanpercentile(a, 50) + 3.0 + >>> np.nanpercentile(a, 50, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanpercentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanpercentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanpercentile(a, 50, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + + >>> b = a.copy() + >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + q = np.true_divide(q, 100.0) # handles the asarray for us too + if not function_base._quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + +def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + interpolation=None, keepdims=None): + return (a, q, out) + + +@array_function_dispatch(_nanquantile_dispatcher) +def nanquantile(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=np._NoValue): + """ + Compute the qth quantile of the data along the specified axis, + while ignoring nan values. + Returns the qth quantile(s) of the array elements. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored + q : array_like of float + Quantile or sequence of quantiles to compute, which must be between + 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to + use when the desired quantile lies between two data points + ``i < j``: + + * linear: ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * lower: ``i``. + * higher: ``j``. + * nearest: ``i`` or ``j``, whichever is nearest. + * midpoint: ``(i + j) / 2``. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple quantiles are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + quantile + nanmean, nanmedian + nanmedian : equivalent to ``nanquantile(..., 0.5)`` + nanpercentile : same as nanquantile, but with q in the range [0, 100]. + + Examples + -------- + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.quantile(a, 0.5) + nan + >>> np.nanquantile(a, 0.5) + 3.0 + >>> np.nanquantile(a, 0.5, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanquantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanquantile(a, 0.5, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + >>> b = a.copy() + >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + """ + a = np.asanyarray(a) + q = np.asanyarray(q) + if not function_base._quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + +def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=np._NoValue): + """Assumes that q is in [0, 1], and is an ndarray""" + # apply_along_axis in _nanpercentile doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + r, k = function_base._ureduce( + a, func=_nanquantile_ureduce_func, q=q, axis=axis, out=out, + overwrite_input=overwrite_input, interpolation=interpolation + ) + if keepdims and keepdims is not np._NoValue: + return r.reshape(q.shape + k) + else: + return r + + +def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear'): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + """ + if axis is None or a.ndim == 1: + part = a.ravel() + result = _nanquantile_1d(part, q, overwrite_input, interpolation) + else: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, interpolation) + # apply_along_axis fills in collapsed axis with results. + # Move that axis to the beginning to match percentile's + # convention. + if q.ndim != 0: + result = np.moveaxis(result, axis, 0) + + if out is not None: + out[...] = result + return result + + +def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation='linear'): + """ + Private function for rank 1 arrays. Compute quantile ignoring NaNs. + See nanpercentile for parameter usage + """ + arr1d, overwrite_input = _remove_nan_1d(arr1d, + overwrite_input=overwrite_input) + if arr1d.size == 0: + return np.full(q.shape, np.nan)[()] # convert to scalar + + return function_base._quantile_unchecked( + arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation) + + +def _nanvar_dispatcher( + a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanvar_dispatcher) +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + Returns the variance of the array elements, a measure of the spread of + a distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the variance is computed. The default is to compute + the variance of the flattened array. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : int, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + + Returns + ------- + variance : ndarray, see dtype parameter above + If `out` is None, return a new array containing the variance, + otherwise return a reference to the output array. If ddof is >= the + number of non-NaN elements in a slice or the slice contains only + NaNs, then the result for that slice is NaN. + + See Also + -------- + std : Standard deviation + mean : Average + var : Variance while not ignoring NaNs + nanstd, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite + population. ``ddof=0`` provides a maximum likelihood estimate of the + variance for normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + For this function to work on sub-classes of ndarray, they must define + `sum` with the kwarg `keepdims` + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanvar(a) + 1.5555555555555554 + >>> np.nanvar(a, axis=0) + array([1., 0.]) + >>> np.nanvar(a, axis=1) + array([0., 0.25]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + # Compute mean + if type(arr) is np.matrix: + _keepdims = np._NoValue + else: + _keepdims = True + # we need to special case matrix for reverse compatibility + # in order for this to work, these sums need to be called with + # keepdims=True, however matrix now raises an error in this case, but + # the reason that it drops the keepdims kwarg is to force keepdims=True + # so this used to work by serendipity. + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims) + avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims) + avg = _divide_by_count(avg, cnt) + + # Compute squared deviation from mean. + np.subtract(arr, avg, out=arr, casting='unsafe') + arr = _copyto(arr, 0, mask) + if issubclass(arr.dtype.type, np.complexfloating): + sqr = np.multiply(arr, arr.conj(), out=arr).real + else: + sqr = np.multiply(arr, arr, out=arr) + + # Compute variance. + var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims) + if var.ndim < cnt.ndim: + # Subclasses of ndarray may ignore keepdims, so check here. + cnt = cnt.squeeze(axis) + dof = cnt - ddof + var = _divide_by_count(var, dof) + + isbad = (dof <= 0) + if np.any(isbad): + warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, + stacklevel=3) + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + var = _copyto(var, np.nan, isbad) + return var + + +def _nanstd_dispatcher( + a, axis=None, dtype=None, out=None, ddof=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanstd_dispatcher) +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): + """ + Compute the standard deviation along the specified axis, while + ignoring NaNs. + + Returns the standard deviation, a measure of the spread of a + distribution, of the non-NaN array elements. The standard deviation is + computed for the flattened array by default, otherwise over the + specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Calculate the standard deviation of the non-NaN values. + axis : {int, tuple of int, None}, optional + Axis or axes along which the standard deviation is computed. The default is + to compute the standard deviation of the flattened array. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it + is the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the + calculated values) will be cast if necessary. + ddof : int, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this value is anything but the default it is passed through + as-is to the relevant functions of the sub-classes. If these + functions do not have a `keepdims` kwarg, a RuntimeError will + be raised. + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard + deviation, otherwise return a reference to the output array. If + ddof is >= the number of non-NaN elements in a slice or the slice + contains only NaNs, then the result for that slice is NaN. + + See Also + -------- + var, mean, std + nanvar, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is + specified, the divisor ``N - ddof`` is used instead. In standard + statistical practice, ``ddof=1`` provides an unbiased estimator of the + variance of the infinite population. ``ddof=0`` provides a maximum + likelihood estimate of the variance for normally distributed variables. + The standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute value before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example + below). Specifying a higher-accuracy accumulator using the `dtype` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanstd(a) + 1.247219128924647 + >>> np.nanstd(a, axis=0) + array([1., 0.]) + >>> np.nanstd(a, axis=1) + array([0., 0.5]) # may vary + + """ + var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims) + if isinstance(var, np.ndarray): + std = np.sqrt(var, out=var) + else: + std = var.dtype.type(np.sqrt(var)) + return std diff --git a/MLPY/Lib/site-packages/numpy/lib/nanfunctions.pyi b/MLPY/Lib/site-packages/numpy/lib/nanfunctions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4269e877941553fa9b22ca6732d0dc447853cdd3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/nanfunctions.pyi @@ -0,0 +1,54 @@ +from typing import List + +__all__: List[str] + +def nanmin(a, axis=..., out=..., keepdims=...): ... +def nanmax(a, axis=..., out=..., keepdims=...): ... +def nanargmin(a, axis=...): ... +def nanargmax(a, axis=...): ... +def nansum(a, axis=..., dtype=..., out=..., keepdims=...): ... +def nanprod(a, axis=..., dtype=..., out=..., keepdims=...): ... +def nancumsum(a, axis=..., dtype=..., out=...): ... +def nancumprod(a, axis=..., dtype=..., out=...): ... +def nanmean(a, axis=..., dtype=..., out=..., keepdims=...): ... +def nanmedian( + a, + axis=..., + out=..., + overwrite_input=..., + keepdims=..., +): ... +def nanpercentile( + a, + q, + axis=..., + out=..., + overwrite_input=..., + interpolation=..., + keepdims=..., +): ... +def nanquantile( + a, + q, + axis=..., + out=..., + overwrite_input=..., + interpolation=..., + keepdims=..., +): ... +def nanvar( + a, + axis=..., + dtype=..., + out=..., + ddof=..., + keepdims=..., +): ... +def nanstd( + a, + axis=..., + dtype=..., + out=..., + ddof=..., + keepdims=..., +): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/npyio.py b/MLPY/Lib/site-packages/numpy/lib/npyio.py new file mode 100644 index 0000000000000000000000000000000000000000..90a4c191c58e95eb758f02cf4115699dccba8b57 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/npyio.py @@ -0,0 +1,2415 @@ +import sys +import os +import re +import functools +import itertools +import warnings +import weakref +import contextlib +from operator import itemgetter, index as opindex +from collections.abc import Mapping + +import numpy as np +from . import format +from ._datasource import DataSource +from numpy.core import overrides +from numpy.core.multiarray import packbits, unpackbits +from numpy.core.overrides import set_array_function_like_doc, set_module +from numpy.core._internal import recursive +from ._iotools import ( + LineSplitter, NameValidator, StringConverter, ConverterError, + ConverterLockError, ConversionWarning, _is_string_like, + has_nested_fields, flatten_dtype, easy_dtype, _decode_line + ) + +from numpy.compat import ( + asbytes, asstr, asunicode, os_fspath, os_PathLike, + pickle + ) + + +@set_module('numpy') +def loads(*args, **kwargs): + # NumPy 1.15.0, 2017-12-10 + warnings.warn( + "np.loads is deprecated, use pickle.loads instead", + DeprecationWarning, stacklevel=2) + return pickle.loads(*args, **kwargs) + + +__all__ = [ + 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', + 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', + 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +class BagObj: + """ + BagObj(obj) + + Convert attribute look-ups to getitems on the object passed in. + + Parameters + ---------- + obj : class instance + Object on which attribute look-up is performed. + + Examples + -------- + >>> from numpy.lib.npyio import BagObj as BO + >>> class BagDemo: + ... def __getitem__(self, key): # An instance of BagObj(BagDemo) + ... # will call this method when any + ... # attribute look-up is required + ... result = "Doesn't matter what you want, " + ... return result + "you're gonna get this" + ... + >>> demo_obj = BagDemo() + >>> bagobj = BO(demo_obj) + >>> bagobj.hello_there + "Doesn't matter what you want, you're gonna get this" + >>> bagobj.I_can_be_anything + "Doesn't matter what you want, you're gonna get this" + + """ + + def __init__(self, obj): + # Use weakref to make NpzFile objects collectable by refcount + self._obj = weakref.proxy(obj) + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, '_obj')[key] + except KeyError: + raise AttributeError(key) from None + + def __dir__(self): + """ + Enables dir(bagobj) to list the files in an NpzFile. + + This also enables tab-completion in an interpreter or IPython. + """ + return list(object.__getattribute__(self, '_obj').keys()) + + +def zipfile_factory(file, *args, **kwargs): + """ + Create a ZipFile. + + Allows for Zip64, and the `file` argument can accept file, str, or + pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile + constructor. + """ + if not hasattr(file, 'read'): + file = os_fspath(file) + import zipfile + kwargs['allowZip64'] = True + return zipfile.ZipFile(file, *args, **kwargs) + + +class NpzFile(Mapping): + """ + NpzFile(fid) + + A dictionary-like object with lazy-loading of files in the zipped + archive provided on construction. + + `NpzFile` is used to load files in the NumPy ``.npz`` data archive + format. It assumes that files in the archive have a ``.npy`` extension, + other files are ignored. + + The arrays and file strings are lazily loaded on either + getitem access using ``obj['key']`` or attribute lookup using + ``obj.f.key``. A list of all files (without ``.npy`` extensions) can + be obtained with ``obj.files`` and the ZipFile object itself using + ``obj.zip``. + + Attributes + ---------- + files : list of str + List of all files in the archive with a ``.npy`` extension. + zip : ZipFile instance + The ZipFile object initialized with the zipped archive. + f : BagObj instance + An object on which attribute can be performed as an alternative + to getitem access on the `NpzFile` instance itself. + allow_pickle : bool, optional + Allow loading pickled data. Default: False + + .. versionchanged:: 1.16.3 + Made default False in response to CVE-2019-6446. + + pickle_kwargs : dict, optional + Additional keyword arguments to pass on to pickle.load. + These are only useful when loading object arrays saved on + Python 2 when using Python 3. + + Parameters + ---------- + fid : file or str + The zipped archive to open. This is either a file-like object + or a string containing the path to the archive. + own_fid : bool, optional + Whether NpzFile should close the file handle. + Requires that `fid` is a file-like object. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + + >>> npz = np.load(outfile) + >>> isinstance(npz, np.lib.io.NpzFile) + True + >>> sorted(npz.files) + ['x', 'y'] + >>> npz['x'] # getitem access + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> npz.f.x # attribute lookup + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + # Make __exit__ safe if zipfile_factory raises an exception + zip = None + fid = None + + def __init__(self, fid, own_fid=False, allow_pickle=False, + pickle_kwargs=None): + # Import is postponed to here since zipfile depends on gzip, an + # optional component of the so-called standard library. + _zip = zipfile_factory(fid) + self._files = _zip.namelist() + self.files = [] + self.allow_pickle = allow_pickle + self.pickle_kwargs = pickle_kwargs + for x in self._files: + if x.endswith('.npy'): + self.files.append(x[:-4]) + else: + self.files.append(x) + self.zip = _zip + self.f = BagObj(self) + if own_fid: + self.fid = fid + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + """ + Close the file. + + """ + if self.zip is not None: + self.zip.close() + self.zip = None + if self.fid is not None: + self.fid.close() + self.fid = None + self.f = None # break reference cycle + + def __del__(self): + self.close() + + # Implement the Mapping ABC + def __iter__(self): + return iter(self.files) + + def __len__(self): + return len(self.files) + + def __getitem__(self, key): + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + member = False + if key in self._files: + member = True + elif key in self.files: + member = True + key += '.npy' + if member: + bytes = self.zip.open(key) + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.close() + if magic == format.MAGIC_PREFIX: + bytes = self.zip.open(key) + return format.read_array(bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs) + else: + return self.zip.read(key) + else: + raise KeyError("%s is not a file in the archive" % key) + + + # deprecate the python 2 dict apis that we supported by accident in + # python 3. We forgot to implement itervalues() at all in earlier + # versions of numpy, so no need to deprecated it here. + + def iteritems(self): + # Numpy 1.15, 2018-02-20 + warnings.warn( + "NpzFile.iteritems is deprecated in python 3, to match the " + "removal of dict.itertems. Use .items() instead.", + DeprecationWarning, stacklevel=2) + return self.items() + + def iterkeys(self): + # Numpy 1.15, 2018-02-20 + warnings.warn( + "NpzFile.iterkeys is deprecated in python 3, to match the " + "removal of dict.iterkeys. Use .keys() instead.", + DeprecationWarning, stacklevel=2) + return self.keys() + + +@set_module('numpy') +def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, + encoding='ASCII'): + """ + Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. + + .. warning:: Loading files that contain object arrays uses the ``pickle`` + module, which is not secure against erroneous or maliciously + constructed data. Consider passing ``allow_pickle=False`` to + load data that is known not to contain object arrays for the + safer handling of untrusted sources. + + Parameters + ---------- + file : file-like object, string, or pathlib.Path + The file to read. File-like objects must support the + ``seek()`` and ``read()`` methods. Pickled files require that the + file-like object support the ``readline()`` method as well. + mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, then memory-map the file, using the given mode (see + `numpy.memmap` for a detailed description of the modes). A + memory-mapped array is kept on disk. However, it can be accessed + and sliced like any ndarray. Memory mapping is especially useful + for accessing small fragments of large files without reading the + entire file into memory. + allow_pickle : bool, optional + Allow loading pickled object arrays stored in npy files. Reasons for + disallowing pickles include security, as loading pickled data can + execute arbitrary code. If pickles are disallowed, loading object + arrays will fail. Default: False + + .. versionchanged:: 1.16.3 + Made default False in response to CVE-2019-6446. + + fix_imports : bool, optional + Only useful when loading Python 2 generated pickled files on Python 3, + which includes npy/npz files containing object arrays. If `fix_imports` + is True, pickle will try to map the old Python 2 names to the new names + used in Python 3. + encoding : str, optional + What encoding to use when reading Python 2 strings. Only useful when + loading Python 2 generated pickled files in Python 3, which includes + npy/npz files containing object arrays. Values other than 'latin1', + 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical + data. Default: 'ASCII' + + Returns + ------- + result : array, tuple, dict, etc. + Data stored in the file. For ``.npz`` files, the returned instance + of NpzFile class must be closed to avoid leaking file descriptors. + + Raises + ------ + IOError + If the input file does not exist or cannot be read. + ValueError + The file contains an object array, but allow_pickle=False given. + + See Also + -------- + save, savez, savez_compressed, loadtxt + memmap : Create a memory-map to an array stored in a file on disk. + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + - If the file contains pickle data, then whatever object is stored + in the pickle is returned. + - If the file is a ``.npy`` file, then a single array is returned. + - If the file is a ``.npz`` file, then a dictionary-like object is + returned, containing ``{filename: array}`` key-value pairs, one for + each file in the archive. + - If the file is a ``.npz`` file, the returned value supports the + context manager protocol in a similar fashion to the open function:: + + with load('foo.npz') as data: + a = data['a'] + + The underlying file descriptor is closed when exiting the 'with' + block. + + Examples + -------- + Store data to disk, and load it again: + + >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) + >>> np.load('/tmp/123.npy') + array([[1, 2, 3], + [4, 5, 6]]) + + Store compressed data to disk, and load it again: + + >>> a=np.array([[1, 2, 3], [4, 5, 6]]) + >>> b=np.array([1, 2]) + >>> np.savez('/tmp/123.npz', a=a, b=b) + >>> data = np.load('/tmp/123.npz') + >>> data['a'] + array([[1, 2, 3], + [4, 5, 6]]) + >>> data['b'] + array([1, 2]) + >>> data.close() + + Mem-map the stored array, and then access the second row + directly from disk: + + >>> X = np.load('/tmp/123.npy', mmap_mode='r') + >>> X[1, :] + memmap([4, 5, 6]) + + """ + if encoding not in ('ASCII', 'latin1', 'bytes'): + # The 'encoding' value for pickle also affects what encoding + # the serialized binary data of NumPy arrays is loaded + # in. Pickle does not pass on the encoding information to + # NumPy. The unpickling code in numpy.core.multiarray is + # written to assume that unicode data appearing where binary + # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. + # + # Other encoding values can corrupt binary data, and we + # purposefully disallow them. For the same reason, the errors= + # argument is not exposed, as values other than 'strict' + # result can similarly silently corrupt numerical data. + raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") + + pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) + + with contextlib.ExitStack() as stack: + if hasattr(file, 'read'): + fid = file + own_fid = False + else: + fid = stack.enter_context(open(os_fspath(file), "rb")) + own_fid = True + + # Code to distinguish from NumPy binary files and pickles. + _ZIP_PREFIX = b'PK\x03\x04' + _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this + N = len(format.MAGIC_PREFIX) + magic = fid.read(N) + # If the file size is less than N, we need to make sure not + # to seek past the beginning of the file + fid.seek(-min(N, len(magic)), 1) # back-up + if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): + # zip-file (assume .npz) + # Potentially transfer file ownership to NpzFile + stack.pop_all() + ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + return ret + elif magic == format.MAGIC_PREFIX: + # .npy file + if mmap_mode: + return format.open_memmap(file, mode=mmap_mode) + else: + return format.read_array(fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + else: + # Try a pickle + if not allow_pickle: + raise ValueError("Cannot load file containing pickled data " + "when allow_pickle=False") + try: + return pickle.load(fid, **pickle_kwargs) + except Exception as e: + raise IOError( + "Failed to interpret file %s as a pickle" % repr(file)) from e + + +def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): + return (arr,) + + +@array_function_dispatch(_save_dispatcher) +def save(file, arr, allow_pickle=True, fix_imports=True): + """ + Save an array to a binary file in NumPy ``.npy`` format. + + Parameters + ---------- + file : file, str, or pathlib.Path + File or filename to which the data is saved. If file is a file-object, + then the filename is unchanged. If file is a string or Path, a ``.npy`` + extension will be appended to the filename if it does not already + have one. + arr : array_like + Array data to be saved. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for disallowing + pickles include security (loading pickled data can execute arbitrary + code) and portability (pickled objects may not be loadable on different + Python installations, for example if the stored objects require libraries + that are not available, and not all pickled data is compatible between + Python 2 and Python 3). + Default: True + fix_imports : bool, optional + Only useful in forcing objects in object arrays on Python 3 to be + pickled in a Python 2 compatible way. If `fix_imports` is True, pickle + will try to map the new Python 3 names to the old module names used in + Python 2, so that the pickle data stream is readable with Python 2. + + See Also + -------- + savez : Save several arrays into a ``.npz`` archive + savetxt, load + + Notes + ----- + For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + Any data saved to the file is appended to the end of the file. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + + >>> x = np.arange(10) + >>> np.save(outfile, x) + + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> np.load(outfile) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + + >>> with open('test.npy', 'wb') as f: + ... np.save(f, np.array([1, 2])) + ... np.save(f, np.array([1, 3])) + >>> with open('test.npy', 'rb') as f: + ... a = np.load(f) + ... b = np.load(f) + >>> print(a, b) + # [1 2] [1 3] + """ + if hasattr(file, 'write'): + file_ctx = contextlib.nullcontext(file) + else: + file = os_fspath(file) + if not file.endswith('.npy'): + file = file + '.npy' + file_ctx = open(file, "wb") + + with file_ctx as fid: + arr = np.asanyarray(arr) + format.write_array(fid, arr, allow_pickle=allow_pickle, + pickle_kwargs=dict(fix_imports=fix_imports)) + + +def _savez_dispatcher(file, *args, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_dispatcher) +def savez(file, *args, **kwds): + """Save several arrays into a single file in uncompressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : str or file + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + save : Save a single array to a binary file in NumPy format. + savetxt : Save an array to a file as plain text. + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is not compressed and each file + in the archive contains one variable in ``.npy`` format. For a + description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + When opening the saved ``.npz`` file with `load` a `NpzFile` object is + returned. This is a dictionary-like object which can be queried for + its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + When saving dictionaries, the dictionary keys become filenames + inside the ZIP archive. Therefore, keys should be valid filenames. + E.g., avoid keys that begin with ``/`` or contain ``.``. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + + Using `savez` with \\*args, the arrays are saved with default names. + + >>> np.savez(outfile, x, y) + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['arr_0', 'arr_1'] + >>> npzfile['arr_0'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + Using `savez` with \\**kwds, the arrays are saved with the keyword names. + + >>> outfile = TemporaryFile() + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + >>> npzfile = np.load(outfile) + >>> sorted(npzfile.files) + ['x', 'y'] + >>> npzfile['x'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + _savez(file, args, kwds, False) + + +def _savez_compressed_dispatcher(file, *args, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_compressed_dispatcher) +def savez_compressed(file, *args, **kwds): + """ + Save several arrays into a single file in compressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : str or file + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + numpy.save : Save a single array to a binary file in NumPy format. + numpy.savetxt : Save an array to a file as plain text. + numpy.savez : Save several arrays into an uncompressed ``.npz`` file format + numpy.load : Load the files created by savez_compressed. + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is compressed with + ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable + in ``.npy`` format. For a description of the ``.npy`` format, see + :py:mod:`numpy.lib.format`. + + + When opening the saved ``.npz`` file with `load` a `NpzFile` object is + returned. This is a dictionary-like object which can be queried for + its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Examples + -------- + >>> test_array = np.random.rand(3, 2) + >>> test_vector = np.random.rand(4) + >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) + >>> loaded = np.load('/tmp/123.npz') + >>> print(np.array_equal(test_array, loaded['a'])) + True + >>> print(np.array_equal(test_vector, loaded['b'])) + True + + """ + _savez(file, args, kwds, True) + + +def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): + # Import is postponed to here since zipfile depends on gzip, an optional + # component of the so-called standard library. + import zipfile + + if not hasattr(file, 'write'): + file = os_fspath(file) + if not file.endswith('.npz'): + file = file + '.npz' + + namedict = kwds + for i, val in enumerate(args): + key = 'arr_%d' % i + if key in namedict.keys(): + raise ValueError( + "Cannot use un-named variables and keyword %s" % key) + namedict[key] = val + + if compress: + compression = zipfile.ZIP_DEFLATED + else: + compression = zipfile.ZIP_STORED + + zipf = zipfile_factory(file, mode="w", compression=compression) + + for key, val in namedict.items(): + fname = key + '.npy' + val = np.asanyarray(val) + # always force zip64, gh-10776 + with zipf.open(fname, 'w', force_zip64=True) as fid: + format.write_array(fid, val, + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + + zipf.close() + + +def _getconv(dtype): + """ Find the correct dtype converter. Adapted from matplotlib """ + + def floatconv(x): + x.lower() + if '0x' in x: + return float.fromhex(x) + return float(x) + + typ = dtype.type + if issubclass(typ, np.bool_): + return lambda x: bool(int(x)) + if issubclass(typ, np.uint64): + return np.uint64 + if issubclass(typ, np.int64): + return np.int64 + if issubclass(typ, np.integer): + return lambda x: int(float(x)) + elif issubclass(typ, np.longdouble): + return np.longdouble + elif issubclass(typ, np.floating): + return floatconv + elif issubclass(typ, complex): + return lambda x: complex(asstr(x).replace('+-', '-')) + elif issubclass(typ, np.bytes_): + return asbytes + elif issubclass(typ, np.unicode_): + return asunicode + else: + return asstr + + +# amount of lines loadtxt reads in one chunk, can be overridden for testing +_loadtxt_chunksize = 50000 + + +def _loadtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None, + converters=None, skiprows=None, usecols=None, unpack=None, + ndmin=None, encoding=None, max_rows=None, *, like=None): + return (like,) + + +@set_array_function_like_doc +@set_module('numpy') +def loadtxt(fname, dtype=float, comments='#', delimiter=None, + converters=None, skiprows=0, usecols=None, unpack=False, + ndmin=0, encoding='bytes', max_rows=None, *, like=None): + r""" + Load data from a text file. + + Each row in the text file must have the same number of values. + + Parameters + ---------- + fname : file, str, or pathlib.Path + File, filename, or generator to read. If the filename extension is + ``.gz`` or ``.bz2``, the file is first decompressed. Note that + generators should return byte strings. + dtype : data-type, optional + Data-type of the resulting array; default: float. If this is a + structured data-type, the resulting array will be 1-dimensional, and + each row will be interpreted as an element of the array. In this + case, the number of columns used must match the number of fields in + the data-type. + comments : str or sequence of str, optional + The characters or list of characters used to indicate the start of a + comment. None implies no comments. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is '#'. + delimiter : str, optional + The string used to separate values. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is whitespace. + converters : dict, optional + A dictionary mapping column number to a function that will parse the + column string into the desired value. E.g., if column 0 is a date + string: ``converters = {0: datestr2num}``. Converters can also be + used to provide a default value for missing data (but see also + `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. + Default: None. + skiprows : int, optional + Skip the first `skiprows` lines, including comments; default: 0. + usecols : int or sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + The default, None, results in all columns being read. + + .. versionchanged:: 1.11.0 + When a single column has to be read it is possible to use + an integer instead of a tuple. E.g ``usecols = 3`` reads the + fourth column the same way as ``usecols = (3,)`` would. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + ndmin : int, optional + The returned array will have at least `ndmin` dimensions. + Otherwise mono-dimensional axes will be squeezed. + Legal values: 0 (default), 1 or 2. + + .. versionadded:: 1.6.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + The special value 'bytes' enables backward compatibility workarounds + that ensures you receive byte arrays as results if possible and passes + 'latin1' encoded strings to converters. Override this value to receive + unicode arrays and pass strings as input to converters. If set to None + the system default is used. The default value is 'bytes'. + + .. versionadded:: 1.14.0 + max_rows : int, optional + Read `max_rows` lines of content after `skiprows` lines. The default + is to read all the lines. + + .. versionadded:: 1.16.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. + + See Also + -------- + load, fromstring, fromregex + genfromtxt : Load data with missing values handled as specified. + scipy.io.loadmat : reads MATLAB data files + + Notes + ----- + This function aims to be a fast reader for simply formatted files. The + `genfromtxt` function provides more sophisticated handling of, e.g., + lines with missing values. + + .. versionadded:: 1.10.0 + + The strings produced by the Python float.hex method can be used as + input for floats. + + Examples + -------- + >>> from io import StringIO # StringIO behaves like a file object + >>> c = StringIO("0 1\n2 3") + >>> np.loadtxt(c) + array([[0., 1.], + [2., 3.]]) + + >>> d = StringIO("M 21 72\nF 35 58") + >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), + ... 'formats': ('S1', 'i4', 'f4')}) + array([(b'M', 21, 72.), (b'F', 35, 58.)], + dtype=[('gender', 'S1'), ('age', '>> c = StringIO("1,0,2\n3,0,4") + >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) + >>> x + array([1., 3.]) + >>> y + array([2., 4.]) + + This example shows how `converters` can be used to convert a field + with a trailing minus sign into a negative number. + + >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94') + >>> def conv(fld): + ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld) + ... + >>> np.loadtxt(s, converters={0: conv, 1: conv}) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + """ + + if like is not None: + return _loadtxt_with_like( + fname, dtype=dtype, comments=comments, delimiter=delimiter, + converters=converters, skiprows=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows, like=like + ) + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + # Nested functions used by loadtxt. + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + # not to be confused with the flatten_dtype we import... + @recursive + def flatten_dtype_internal(self, dt): + """Unpack a structured data-type, and produce re-packing info.""" + if dt.names is None: + # If the dtype is flattened, return. + # If the dtype has a shape, the dtype occurs + # in the list more than once. + shape = dt.shape + if len(shape) == 0: + return ([dt.base], None) + else: + packing = [(shape[-1], list)] + if len(shape) > 1: + for dim in dt.shape[-2::-1]: + packing = [(dim*packing[0][0], packing*dim)] + return ([dt.base] * int(np.prod(dt.shape)), packing) + else: + types = [] + packing = [] + for field in dt.names: + tp, bytes = dt.fields[field] + flat_dt, flat_packing = self(tp) + types.extend(flat_dt) + # Avoid extra nesting for subarrays + if tp.ndim > 0: + packing.extend(flat_packing) + else: + packing.append((len(flat_dt), flat_packing)) + return (types, packing) + + @recursive + def pack_items(self, items, packing): + """Pack items into nested lists based on re-packing info.""" + if packing is None: + return items[0] + elif packing is tuple: + return tuple(items) + elif packing is list: + return list(items) + else: + start = 0 + ret = [] + for length, subpacking in packing: + ret.append(self(items[start:start+length], subpacking)) + start += length + return tuple(ret) + + def split_line(line): + """Chop off comments, strip, and split at delimiter. """ + line = _decode_line(line, encoding=encoding) + + if comments is not None: + line = regex_comments.split(line, maxsplit=1)[0] + line = line.strip('\r\n') + return line.split(delimiter) if line else [] + + def read_data(chunk_size): + """Parse each line, including the first. + + The file read, `fh`, is a global defined above. + + Parameters + ---------- + chunk_size : int + At most `chunk_size` lines are read at a time, with iteration + until all lines are read. + + """ + X = [] + line_iter = itertools.chain([first_line], fh) + line_iter = itertools.islice(line_iter, max_rows) + for i, line in enumerate(line_iter): + vals = split_line(line) + if len(vals) == 0: + continue + if usecols: + vals = [vals[j] for j in usecols] + if len(vals) != N: + line_num = i + skiprows + 1 + raise ValueError("Wrong number of columns at line %d" + % line_num) + + # Convert each value according to its column and store + items = [conv(val) for (conv, val) in zip(converters, vals)] + + # Then pack it according to the dtype's nesting + items = pack_items(items, packing) + X.append(items) + if len(X) > chunk_size: + yield X + X = [] + if X: + yield X + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + # Main body of loadtxt. + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + # Check correctness of the values of `ndmin` + if ndmin not in [0, 1, 2]: + raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) + + # Type conversions for Py3 convenience + if comments is not None: + if isinstance(comments, (str, bytes)): + comments = [comments] + comments = [_decode_line(x) for x in comments] + # Compile regex for comments beforehand + comments = (re.escape(comment) for comment in comments) + regex_comments = re.compile('|'.join(comments)) + + if delimiter is not None: + delimiter = _decode_line(delimiter) + + user_converters = converters + + byte_converters = False + if encoding == 'bytes': + encoding = None + byte_converters = True + + if usecols is not None: + # Allow usecols to be a single int or a sequence of ints + try: + usecols_as_list = list(usecols) + except TypeError: + usecols_as_list = [usecols] + for col_idx in usecols_as_list: + try: + opindex(col_idx) + except TypeError as e: + e.args = ( + "usecols must be an int or a sequence of ints but " + "it contains at least one element of type %s" % + type(col_idx), + ) + raise + # Fall back to existing code + usecols = usecols_as_list + + # Make sure we're dealing with a proper dtype + dtype = np.dtype(dtype) + defconv = _getconv(dtype) + + dtype_types, packing = flatten_dtype_internal(dtype) + + fown = False + try: + if isinstance(fname, os_PathLike): + fname = os_fspath(fname) + if _is_string_like(fname): + fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fencoding = getattr(fh, 'encoding', 'latin1') + fh = iter(fh) + fown = True + else: + fh = iter(fname) + fencoding = getattr(fname, 'encoding', 'latin1') + except TypeError as e: + raise ValueError( + 'fname must be a string, file handle, or generator' + ) from e + + # input may be a python2 io stream + if encoding is not None: + fencoding = encoding + # we must assume local encoding + # TODO emit portability warning? + elif fencoding is None: + import locale + fencoding = locale.getpreferredencoding() + + try: + # Skip the first `skiprows` lines + for i in range(skiprows): + next(fh) + + # Read until we find a line with some values, and use + # it to estimate the number of columns, N. + first_vals = None + try: + while not first_vals: + first_line = next(fh) + first_vals = split_line(first_line) + except StopIteration: + # End of lines reached + first_line = '' + first_vals = [] + warnings.warn('loadtxt: Empty input file: "%s"' % fname, + stacklevel=2) + N = len(usecols or first_vals) + + # Now that we know N, create the default converters list, and + # set packing, if necessary. + if len(dtype_types) > 1: + # We're dealing with a structured array, each field of + # the dtype matches a column + converters = [_getconv(dt) for dt in dtype_types] + else: + # All fields have the same dtype + converters = [defconv for i in range(N)] + if N > 1: + packing = [(N, tuple)] + + # By preference, use the converters specified by the user + for i, conv in (user_converters or {}).items(): + if usecols: + try: + i = usecols.index(i) + except ValueError: + # Unused converter specified + continue + if byte_converters: + # converters may use decode to workaround numpy's old + # behaviour, so encode the string again before passing to + # the user converter + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + converters[i] = functools.partial(tobytes_first, conv=conv) + else: + converters[i] = conv + + converters = [conv if conv is not bytes else + lambda x: x.encode(fencoding) for conv in converters] + + # read data in chunks and fill it into an array via resize + # over-allocating and shrinking the array later may be faster but is + # probably not relevant compared to the cost of actually reading and + # converting the data + X = None + for x in read_data(_loadtxt_chunksize): + if X is None: + X = np.array(x, dtype) + else: + nshape = list(X.shape) + pos = nshape[0] + nshape[0] += len(x) + X.resize(nshape, refcheck=False) + X[pos:, ...] = x + finally: + if fown: + fh.close() + + if X is None: + X = np.array([], dtype) + + # Multicolumn data are returned with shape (1, N, M), i.e. + # (1, 1, M) for a single row - remove the singleton dimension there + if X.ndim == 3 and X.shape[:2] == (1, 1): + X.shape = (1, -1) + + # Verify that the array has at least dimensions `ndmin`. + # Tweak the size and shape of the arrays - remove extraneous dimensions + if X.ndim > ndmin: + X = np.squeeze(X) + # and ensure we have the minimum number of dimensions asked for + # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 + if X.ndim < ndmin: + if ndmin == 1: + X = np.atleast_1d(X) + elif ndmin == 2: + X = np.atleast_2d(X).T + + if unpack: + if len(dtype_types) > 1: + # For structured arrays, return an array for each field. + return [X[field] for field in dtype.names] + else: + return X.T + else: + return X + + +_loadtxt_with_like = array_function_dispatch( + _loadtxt_dispatcher +)(loadtxt) + + +def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, + header=None, footer=None, comments=None, + encoding=None): + return (X,) + + +@array_function_dispatch(_savetxt_dispatcher) +def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', + footer='', comments='# ', encoding=None): + """ + Save an array to a text file. + + Parameters + ---------- + fname : filename or file handle + If the filename ends in ``.gz``, the file is automatically saved in + compressed gzip format. `loadtxt` understands gzipped files + transparently. + X : 1D or 2D array_like + Data to be saved to a text file. + fmt : str or sequence of strs, optional + A single format (%10.5f), a sequence of formats, or a + multi-format string, e.g. 'Iteration %d -- %10.5f', in which + case `delimiter` is ignored. For complex `X`, the legal options + for `fmt` are: + + * a single specifier, `fmt='%.4e'`, resulting in numbers formatted + like `' (%s+%sj)' % (fmt, fmt)` + * a full string specifying every real and imaginary part, e.g. + `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns + * a list of specifiers, one per column - in this case, the real + and imaginary part must have separate specifiers, + e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns + delimiter : str, optional + String or character separating columns. + newline : str, optional + String or character separating lines. + + .. versionadded:: 1.5.0 + header : str, optional + String that will be written at the beginning of the file. + + .. versionadded:: 1.7.0 + footer : str, optional + String that will be written at the end of the file. + + .. versionadded:: 1.7.0 + comments : str, optional + String that will be prepended to the ``header`` and ``footer`` strings, + to mark them as comments. Default: '# ', as expected by e.g. + ``numpy.loadtxt``. + + .. versionadded:: 1.7.0 + encoding : {None, str}, optional + Encoding used to encode the outputfile. Does not apply to output + streams. If the encoding is something other than 'bytes' or 'latin1' + you will not be able to load the file in NumPy versions < 1.14. Default + is 'latin1'. + + .. versionadded:: 1.14.0 + + + See Also + -------- + save : Save an array to a binary file in NumPy ``.npy`` format + savez : Save several arrays into an uncompressed ``.npz`` archive + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + Further explanation of the `fmt` parameter + (``%[flag]width[.precision]specifier``): + + flags: + ``-`` : left justify + + ``+`` : Forces to precede result with + or -. + + ``0`` : Left pad the number with zeros instead of space (see width). + + width: + Minimum number of characters to be printed. The value is not truncated + if it has more characters. + + precision: + - For integer specifiers (eg. ``d,i,o,x``), the minimum number of + digits. + - For ``e, E`` and ``f`` specifiers, the number of digits to print + after the decimal point. + - For ``g`` and ``G``, the maximum number of significant digits. + - For ``s``, the maximum number of characters. + + specifiers: + ``c`` : character + + ``d`` or ``i`` : signed decimal integer + + ``e`` or ``E`` : scientific notation with ``e`` or ``E``. + + ``f`` : decimal floating point + + ``g,G`` : use the shorter of ``e,E`` or ``f`` + + ``o`` : signed octal + + ``s`` : string of characters + + ``u`` : unsigned decimal integer + + ``x,X`` : unsigned hexadecimal integer + + This explanation of ``fmt`` is not complete, for an exhaustive + specification see [1]_. + + References + ---------- + .. [1] `Format Specification Mini-Language + `_, + Python Documentation. + + Examples + -------- + >>> x = y = z = np.arange(0.0,5.0,1.0) + >>> np.savetxt('test.out', x, delimiter=',') # X is an array + >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation + + """ + + # Py3 conversions first + if isinstance(fmt, bytes): + fmt = asstr(fmt) + delimiter = asstr(delimiter) + + class WriteWrap: + """Convert to bytes on bytestream inputs. + + """ + def __init__(self, fh, encoding): + self.fh = fh + self.encoding = encoding + self.do_write = self.first_write + + def close(self): + self.fh.close() + + def write(self, v): + self.do_write(v) + + def write_bytes(self, v): + if isinstance(v, bytes): + self.fh.write(v) + else: + self.fh.write(v.encode(self.encoding)) + + def write_normal(self, v): + self.fh.write(asunicode(v)) + + def first_write(self, v): + try: + self.write_normal(v) + self.write = self.write_normal + except TypeError: + # input is probably a bytestream + self.write_bytes(v) + self.write = self.write_bytes + + own_fh = False + if isinstance(fname, os_PathLike): + fname = os_fspath(fname) + if _is_string_like(fname): + # datasource doesn't support creating a new file ... + open(fname, 'wt').close() + fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) + own_fh = True + elif hasattr(fname, 'write'): + # wrap to handle byte output streams + fh = WriteWrap(fname, encoding or 'latin1') + else: + raise ValueError('fname must be a string or file handle') + + try: + X = np.asarray(X) + + # Handle 1-dimensional arrays + if X.ndim == 0 or X.ndim > 2: + raise ValueError( + "Expected 1D or 2D array, got %dD array instead" % X.ndim) + elif X.ndim == 1: + # Common case -- 1d array of numbers + if X.dtype.names is None: + X = np.atleast_2d(X).T + ncol = 1 + + # Complex dtype -- each field indicates a separate column + else: + ncol = len(X.dtype.names) + else: + ncol = X.shape[1] + + iscomplex_X = np.iscomplexobj(X) + # `fmt` can be a string with multiple insertion points or a + # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') + if type(fmt) in (list, tuple): + if len(fmt) != ncol: + raise AttributeError('fmt has wrong shape. %s' % str(fmt)) + format = asstr(delimiter).join(map(asstr, fmt)) + elif isinstance(fmt, str): + n_fmt_chars = fmt.count('%') + error = ValueError('fmt has wrong number of %% formats: %s' % fmt) + if n_fmt_chars == 1: + if iscomplex_X: + fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol + else: + fmt = [fmt, ] * ncol + format = delimiter.join(fmt) + elif iscomplex_X and n_fmt_chars != (2 * ncol): + raise error + elif ((not iscomplex_X) and n_fmt_chars != ncol): + raise error + else: + format = fmt + else: + raise ValueError('invalid fmt: %r' % (fmt,)) + + if len(header) > 0: + header = header.replace('\n', '\n' + comments) + fh.write(comments + header + newline) + if iscomplex_X: + for row in X: + row2 = [] + for number in row: + row2.append(number.real) + row2.append(number.imag) + s = format % tuple(row2) + newline + fh.write(s.replace('+-', '-')) + else: + for row in X: + try: + v = format % tuple(row) + newline + except TypeError as e: + raise TypeError("Mismatch between array dtype ('%s') and " + "format specifier ('%s')" + % (str(X.dtype), format)) from e + fh.write(v) + + if len(footer) > 0: + footer = footer.replace('\n', '\n' + comments) + fh.write(comments + footer + newline) + finally: + if own_fh: + fh.close() + + +@set_module('numpy') +def fromregex(file, regexp, dtype, encoding=None): + """ + Construct an array from a text file, using regular expression parsing. + + The returned array is always a structured array, and is constructed from + all matches of the regular expression in the file. Groups in the regular + expression are converted to fields of the structured array. + + Parameters + ---------- + file : str or file + Filename or file object to read. + regexp : str or regexp + Regular expression used to parse the file. + Groups in the regular expression correspond to fields in the dtype. + dtype : dtype or list of dtypes + Dtype for the structured array. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + + .. versionadded:: 1.14.0 + + Returns + ------- + output : ndarray + The output array, containing the part of the content of `file` that + was matched by `regexp`. `output` is always a structured array. + + Raises + ------ + TypeError + When `dtype` is not a valid dtype for a structured array. + + See Also + -------- + fromstring, loadtxt + + Notes + ----- + Dtypes for structured arrays can be specified in several forms, but all + forms specify at least the data type and field name. For details see + `basics.rec`. + + Examples + -------- + >>> f = open('test.dat', 'w') + >>> _ = f.write("1312 foo\\n1534 bar\\n444 qux") + >>> f.close() + + >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] + >>> output = np.fromregex('test.dat', regexp, + ... [('num', np.int64), ('key', 'S3')]) + >>> output + array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], + dtype=[('num', '>> output['num'] + array([1312, 1534, 444]) + + """ + own_fh = False + if not hasattr(file, "read"): + file = np.lib._datasource.open(file, 'rt', encoding=encoding) + own_fh = True + + try: + if not isinstance(dtype, np.dtype): + dtype = np.dtype(dtype) + + content = file.read() + if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode): + regexp = asbytes(regexp) + elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes): + regexp = asstr(regexp) + + if not hasattr(regexp, 'match'): + regexp = re.compile(regexp) + seq = regexp.findall(content) + if seq and not isinstance(seq[0], tuple): + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + + return output + finally: + if own_fh: + file.close() + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + +def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None, + skip_header=None, skip_footer=None, converters=None, + missing_values=None, filling_values=None, usecols=None, + names=None, excludelist=None, deletechars=None, + replace_space=None, autostrip=None, case_sensitive=None, + defaultfmt=None, unpack=None, usemask=None, loose=None, + invalid_raise=None, max_rows=None, encoding=None, *, + like=None): + return (like,) + + +@set_array_function_like_doc +@set_module('numpy') +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, + skip_header=0, skip_footer=0, converters=None, + missing_values=None, filling_values=None, usecols=None, + names=None, excludelist=None, + deletechars=''.join(sorted(NameValidator.defaultdeletechars)), + replace_space='_', autostrip=False, case_sensitive=True, + defaultfmt="f%i", unpack=None, usemask=False, loose=True, + invalid_raise=True, max_rows=None, encoding='bytes', *, + like=None): + """ + Load data from a text file, with missing values handled as specified. + + Each line past the first `skip_header` lines is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is `.gz` or `.bz2`, the file is first decompressed. Note + that generators must return byte strings. The strings + in a list or produced by a generator are treated as lines. + dtype : dtype, optional + Data type of the resulting array. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : str, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded. + delimiter : str, int, or sequence, optional + The string used to separate values. By default, any consecutive + whitespaces act as delimiter. An integer or sequence of integers + can also be provided as width(s) of each field. + skiprows : int, optional + `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. + skip_header : int, optional + The number of lines to skip at the beginning of the file. + skip_footer : int, optional + The number of lines to skip at the end of the file. + converters : variable, optional + The set of functions that convert the data of a column to a value. + The converters can also be used to provide a default value + for missing data: ``converters = {3: lambda s: float(s or 0)}``. + missing : variable, optional + `missing` was removed in numpy 1.10. Please use `missing_values` + instead. + missing_values : variable, optional + The set of strings corresponding to missing data. + filling_values : variable, optional + The set of values to be used as default when the data are missing. + usecols : sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, str, sequence}, optional + If `names` is True, the field names are read from the first line after + the first `skip_header` lines. This line can optionally be preceeded + by a comment delimiter. If `names` is a sequence or a single-string of + comma-separated names, the names will be used to define the field names + in a structured dtype. If `names` is None, the names of the dtype + fields will be used, if any. + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended with an + underscore: for example, `file` would become `file_`. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + defaultfmt : str, optional + A format used to define default field names, such as "f%i" or "f_%02i". + autostrip : bool, optional + Whether to automatically strip white spaces from the variables. + replace_space : char, optional + Character(s) used in replacement of white spaces in the variable + names. By default, use a '_'. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = genfromtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + usemask : bool, optional + If True, return a masked array. + If False, return a regular array. + loose : bool, optional + If True, do not raise errors for invalid values. + invalid_raise : bool, optional + If True, an exception is raised if an inconsistency is detected in the + number of columns. + If False, a warning is emitted and the offending lines are skipped. + max_rows : int, optional + The maximum number of rows to read. Must not be used with skip_footer + at the same time. If given, the value must be at least 1. Default is + to read the entire file. + + .. versionadded:: 1.10.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply when `fname` is + a file object. The special value 'bytes' enables backward compatibility + workarounds that ensure that you receive byte arrays when possible + and passes latin1 encoded strings to converters. Override this value to + receive unicode arrays and pass strings as input to converters. If set + to None the system default is used. The default value is 'bytes'. + + .. versionadded:: 1.14.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. If `usemask` is True, this is a + masked array. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + Notes + ----- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When the variables are named (either by a flexible dtype or with `names`), + there must not be any header in the file (else a ValueError + exception is raised). + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + + References + ---------- + .. [1] NumPy User Guide, section `I/O with NumPy + `_. + + Examples + -------- + >>> from io import StringIO + >>> import numpy as np + + Comma delimited file with mixed dtype + + >>> s = StringIO(u"1,1.3,abcde") + >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), + ... ('mystring','S5')], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only + >>> data = np.genfromtxt(s, dtype=None, + ... names = ['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> _ = s.seek(0) + >>> data = np.genfromtxt(s, dtype="i8,f8,S5", + ... names=['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> s = StringIO(u"11.3abcde") + >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], + ... delimiter=[1,3,5]) + >>> data + array((1, 1.3, b'abcde'), + dtype=[('intvar', '>> f = StringIO(''' + ... text,# of chars + ... hello world,11 + ... numpy,5''') + >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') + array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], + dtype=[('f0', 'S12'), ('f1', 'S12')]) + + """ + + if like is not None: + return _genfromtxt_with_like( + fname, dtype=dtype, comments=comments, delimiter=delimiter, + skip_header=skip_header, skip_footer=skip_footer, + converters=converters, missing_values=missing_values, + filling_values=filling_values, usecols=usecols, names=names, + excludelist=excludelist, deletechars=deletechars, + replace_space=replace_space, autostrip=autostrip, + case_sensitive=case_sensitive, defaultfmt=defaultfmt, + unpack=unpack, usemask=usemask, loose=loose, + invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding, + like=like + ) + + if max_rows is not None: + if skip_footer: + raise ValueError( + "The keywords 'skip_footer' and 'max_rows' can not be " + "specified at the same time.") + if max_rows < 1: + raise ValueError("'max_rows' must be at least 1.") + + if usemask: + from numpy.ma import MaskedArray, make_mask_descr + # Check the input dictionary of converters + user_converters = converters or {} + if not isinstance(user_converters, dict): + raise TypeError( + "The input argument 'converter' should be a valid dictionary " + "(got '%s' instead)" % type(user_converters)) + + if encoding == 'bytes': + encoding = None + byte_converters = True + else: + byte_converters = False + + # Initialize the filehandle, the LineSplitter and the NameValidator + try: + if isinstance(fname, os_PathLike): + fname = os_fspath(fname) + if isinstance(fname, str): + fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fid_ctx = contextlib.closing(fid) + else: + fid = fname + fid_ctx = contextlib.nullcontext(fid) + fhd = iter(fid) + except TypeError as e: + raise TypeError( + "fname must be a string, filehandle, list of strings, " + "or generator. Got %s instead." % type(fname)) from e + + with fid_ctx: + split_line = LineSplitter(delimiter=delimiter, comments=comments, + autostrip=autostrip, encoding=encoding) + validate_names = NameValidator(excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + + # Skip the first `skip_header` rows + try: + for i in range(skip_header): + next(fhd) + + # Keep on until we find the first valid values + first_values = None + + while not first_values: + first_line = _decode_line(next(fhd), encoding) + if (names is True) and (comments is not None): + if comments in first_line: + first_line = ( + ''.join(first_line.split(comments)[1:])) + first_values = split_line(first_line) + except StopIteration: + # return an empty array if the datafile is empty + first_line = '' + first_values = [] + warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2) + + # Should we take the first values as names ? + if names is True: + fval = first_values[0].strip() + if comments is not None: + if fval in comments: + del first_values[0] + + # Check the columns to use: make sure `usecols` is a list + if usecols is not None: + try: + usecols = [_.strip() for _ in usecols.split(",")] + except AttributeError: + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols, ] + nbcols = len(usecols or first_values) + + # Check the names and overwrite the dtype.names if needed + if names is True: + names = validate_names([str(_.strip()) for _ in first_values]) + first_line = '' + elif _is_string_like(names): + names = validate_names([_.strip() for _ in names.split(',')]) + elif names: + names = validate_names(names) + # Get the dtype + if dtype is not None: + dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, + excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + # Make sure the names is a list (for 2.5) + if names is not None: + names = list(names) + + if usecols: + for (i, current) in enumerate(usecols): + # if usecols is a list of names, convert to a list of indices + if _is_string_like(current): + usecols[i] = names.index(current) + elif current < 0: + usecols[i] = current + len(first_values) + # If the dtype is not None, make sure we update it + if (dtype is not None) and (len(dtype) > nbcols): + descr = dtype.descr + dtype = np.dtype([descr[_] for _ in usecols]) + names = list(dtype.names) + # If `names` is not None, update the names + elif (names is not None) and (len(names) > nbcols): + names = [names[_] for _ in usecols] + elif (names is not None) and (dtype is not None): + names = list(dtype.names) + + # Process the missing values ............................... + # Rename missing_values for convenience + user_missing_values = missing_values or () + if isinstance(user_missing_values, bytes): + user_missing_values = user_missing_values.decode('latin1') + + # Define the list of missing_values (one column: one list) + missing_values = [list(['']) for _ in range(nbcols)] + + # We have a dictionary: process it field by field + if isinstance(user_missing_values, dict): + # Loop on the items + for (key, val) in user_missing_values.items(): + # Is the key a string ? + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key as needed if it's a column number + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Transform the value as a list of string + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val), ] + # Add the value(s) to the current list of missing + if key is None: + # None acts as default + for miss in missing_values: + miss.extend(val) + else: + missing_values[key].extend(val) + # We have a sequence : each item matches a column + elif isinstance(user_missing_values, (list, tuple)): + for (value, entry) in zip(user_missing_values, missing_values): + value = str(value) + if value not in entry: + entry.append(value) + # We have a string : apply it to all entries + elif isinstance(user_missing_values, str): + user_value = user_missing_values.split(",") + for entry in missing_values: + entry.extend(user_value) + # We have something else: apply it to all entries + else: + for entry in missing_values: + entry.extend([str(user_missing_values)]) + + # Process the filling_values ............................... + # Rename the input for convenience + user_filling_values = filling_values + if user_filling_values is None: + user_filling_values = [] + # Define the default + filling_values = [None] * nbcols + # We have a dictionary : update each entry individually + if isinstance(user_filling_values, dict): + for (key, val) in user_filling_values.items(): + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped, + continue + # Redefine the key if it's a column number and usecols is defined + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Add the value to the list + filling_values[key] = val + # We have a sequence : update on a one-to-one basis + elif isinstance(user_filling_values, (list, tuple)): + n = len(user_filling_values) + if (n <= nbcols): + filling_values[:n] = user_filling_values + else: + filling_values = user_filling_values[:nbcols] + # We have something else : use it for all entries + else: + filling_values = [user_filling_values] * nbcols + + # Initialize the converters ................................ + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times the same + # ... converter, instead of 3 different converters. + converters = [StringConverter(None, missing_values=miss, default=fill) + for (miss, fill) in zip(missing_values, filling_values)] + else: + dtype_flat = flatten_dtype(dtype, flatten_base=True) + # Initialize the converters + if len(dtype_flat) > 1: + # Flexible type : get a converter from each dtype + zipit = zip(dtype_flat, missing_values, filling_values) + converters = [StringConverter(dt, locked=True, + missing_values=miss, default=fill) + for (dt, miss, fill) in zipit] + else: + # Set to a default converter (but w/ different missing values) + zipit = zip(missing_values, filling_values) + converters = [StringConverter(dtype, locked=True, + missing_values=miss, default=fill) + for (miss, fill) in zipit] + # Update the converters to use the user-defined ones + uc_update = [] + for (j, conv) in user_converters.items(): + # If the converter is specified by column names, use the index instead + if _is_string_like(j): + try: + j = names.index(j) + i = j + except ValueError: + continue + elif usecols: + try: + i = usecols.index(j) + except ValueError: + # Unused converter specified + continue + else: + i = j + # Find the value to test - first_line is not filtered by usecols: + if len(first_line): + testing_value = first_values[j] + else: + testing_value = None + if conv is bytes: + user_conv = asbytes + elif byte_converters: + # converters may use decode to workaround numpy's old behaviour, + # so encode the string again before passing to the user converter + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + user_conv = functools.partial(tobytes_first, conv=conv) + else: + user_conv = conv + converters[i].update(user_conv, locked=True, + testing_value=testing_value, + default=filling_values[i], + missing_values=missing_values[i],) + uc_update.append((i, user_conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Fixme: possible error as following variable never used. + # miss_chars = [_.missing_values for _ in converters] + + # Initialize the output lists ... + # ... rows + rows = [] + append_to_rows = rows.append + # ... masks + if usemask: + masks = [] + append_to_masks = masks.append + # ... invalid + invalid = [] + append_to_invalid = invalid.append + + # Parse each line + for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): + values = split_line(line) + nbvalues = len(values) + # Skip an empty line + if nbvalues == 0: + continue + if usecols: + # Select only the columns we need + try: + values = [values[_] for _ in usecols] + except IndexError: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + elif nbvalues != nbcols: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple([v.strip() in m + for (v, m) in zip(values, + missing_values)])) + if len(rows) == max_rows: + break + + # Upgrade the converters (if needed) + if dtype is None: + for (i, converter) in enumerate(converters): + current_column = [itemgetter(i)(_m) for _m in rows] + try: + converter.iterupgrade(current_column) + except ConverterLockError: + errmsg = "Converter #%i is locked and cannot be upgraded: " % i + current_column = map(itemgetter(i), rows) + for (j, value) in enumerate(current_column): + try: + converter.upgrade(value) + except (ConverterError, ValueError): + errmsg += "(occurred line #%i for value '%s')" + errmsg %= (j + 1 + skip_header, value) + raise ConverterError(errmsg) + + # Check that we don't have invalid values + nbinvalid = len(invalid) + if nbinvalid > 0: + nbrows = len(rows) + nbinvalid - skip_footer + # Construct the error message + template = " Line #%%i (got %%i columns instead of %i)" % nbcols + if skip_footer > 0: + nbinvalid_skipped = len([_ for _ in invalid + if _[0] > nbrows + skip_header]) + invalid = invalid[:nbinvalid - nbinvalid_skipped] + skip_footer -= nbinvalid_skipped +# +# nbrows -= skip_footer +# errmsg = [template % (i, nb) +# for (i, nb) in invalid if i < nbrows] +# else: + errmsg = [template % (i, nb) + for (i, nb) in invalid] + if len(errmsg): + errmsg.insert(0, "Some errors were detected !") + errmsg = "\n".join(errmsg) + # Raise an exception ? + if invalid_raise: + raise ValueError(errmsg) + # Issue a warning ? + else: + warnings.warn(errmsg, ConversionWarning, stacklevel=2) + + # Strip the last skip_footer data + if skip_footer > 0: + rows = rows[:-skip_footer] + if usemask: + masks = masks[:-skip_footer] + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + rows = list( + zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + else: + rows = list( + zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + column_types = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(column_types) + if v == np.unicode_] + + if byte_converters and strcolidx: + # convert strings back to bytes for backward compatibility + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated. Set the encoding, use None for the " + "system default.", + np.VisibleDeprecationWarning, stacklevel=2) + def encode_unicode_cols(row_tup): + row = list(row_tup) + for i in strcolidx: + row[i] = row[i].encode('latin1') + return tuple(row) + + try: + data = [encode_unicode_cols(r) for r in data] + except UnicodeEncodeError: + pass + else: + for i in strcolidx: + column_types[i] = np.bytes_ + + # Update string types to be the right length + sized_column_types = column_types[:] + for i, col_type in enumerate(column_types): + if np.issubdtype(col_type, np.character): + n_chars = max(len(row[i]) for row in data) + sized_column_types[i] = (col_type, n_chars) + + if names is None: + # If the dtype is uniform (before sizing strings) + base = { + c_type + for c, c_type in zip(converters, column_types) + if c._checked} + if len(base) == 1: + uniform_type, = base + (ddtype, mdtype) = (uniform_type, bool) + else: + ddtype = [(defaultfmt % i, dt) + for (i, dt) in enumerate(sized_column_types)] + if usemask: + mdtype = [(defaultfmt % i, bool) + for (i, dt) in enumerate(sized_column_types)] + else: + ddtype = list(zip(names, sized_column_types)) + mdtype = list(zip(names, [bool] * len(sized_column_types))) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names is not None: + dtype.names = names + # Case 1. We have a structured type + if len(dtype_flat) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if 'O' in (_.char for _ in dtype_flat): + if has_nested_fields(dtype): + raise NotImplementedError( + "Nested fields involving objects are not supported...") + else: + output = np.array(data, dtype=dtype) + else: + rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) + output = rows.view(dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array( + masks, dtype=np.dtype([('', bool) for t in dtype_flat])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for i, ttype in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if np.issubdtype(ttype, np.character): + ttype = (ttype, max(len(row[i]) for row in data)) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names is not None: + mdtype = [(_, bool) for _ in dtype.names] + else: + mdtype = bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + names = output.dtype.names + if usemask and names: + for (name, conv) in zip(names, converters): + missing_values = [conv(_) for _ in conv.missing_values + if _ != ''] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + output = np.squeeze(output) + if unpack: + if names is None: + return output.T + elif len(names) == 1: + # squeeze single-name dtypes too + return output[names[0]] + else: + # For structured arrays with multiple fields, + # return an array for each field. + return [output[field] for field in names] + return output + + +_genfromtxt_with_like = array_function_dispatch( + _genfromtxt_dispatcher +)(genfromtxt) + + +def ndfromtxt(fname, **kwargs): + """ + Load ASCII data stored in a file and return it as a single array. + + .. deprecated:: 1.17 + ndfromtxt` is a deprecated alias of `genfromtxt` which + overwrites the ``usemask`` argument with `False` even when + explicitly called as ``ndfromtxt(..., usemask=True)``. + Use `genfromtxt` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function. + + """ + kwargs['usemask'] = False + # Numpy 1.17 + warnings.warn( + "np.ndfromtxt is a deprecated alias of np.genfromtxt, " + "prefer the latter.", + DeprecationWarning, stacklevel=2) + return genfromtxt(fname, **kwargs) + + +def mafromtxt(fname, **kwargs): + """ + Load ASCII data stored in a text file and return a masked array. + + .. deprecated:: 1.17 + np.mafromtxt is a deprecated alias of `genfromtxt` which + overwrites the ``usemask`` argument with `True` even when + explicitly called as ``mafromtxt(..., usemask=False)``. + Use `genfromtxt` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + """ + kwargs['usemask'] = True + # Numpy 1.17 + warnings.warn( + "np.mafromtxt is a deprecated alias of np.genfromtxt, " + "prefer the latter.", + DeprecationWarning, stacklevel=2) + return genfromtxt(fname, **kwargs) + + +def recfromtxt(fname, **kwargs): + """ + Load ASCII data from a file and return it in a record array. + + If ``usemask=False`` a standard `recarray` is returned, + if ``usemask=True`` a MaskedRecords array is returned. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + kwargs.setdefault("dtype", None) + usemask = kwargs.get('usemask', False) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, **kwargs): + """ + Load ASCII data stored in a comma-separated file. + + The returned array is a record array (if ``usemask=False``, see + `recarray`) or a masked record array (if ``usemask=True``, + see `ma.mrecords.MaskedRecords`). + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + # Set default kwargs for genfromtxt as relevant to csv import. + kwargs.setdefault("case_sensitive", "lower") + kwargs.setdefault("names", True) + kwargs.setdefault("delimiter", ",") + kwargs.setdefault("dtype", None) + output = genfromtxt(fname, **kwargs) + + usemask = kwargs.get("usemask", False) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output diff --git a/MLPY/Lib/site-packages/numpy/lib/npyio.pyi b/MLPY/Lib/site-packages/numpy/lib/npyio.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c7c6026c3517dc816159bfd3869462222a107e34 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/npyio.pyi @@ -0,0 +1,104 @@ +from typing import Mapping, List, Any + +from numpy import ( + DataSource as DataSource, +) + +from numpy.core.multiarray import ( + packbits as packbits, + unpackbits as unpackbits, +) + +__all__: List[str] + +def loads(*args, **kwargs): ... + +class BagObj: + def __init__(self, obj): ... + def __getattribute__(self, key): ... + def __dir__(self): ... + +def zipfile_factory(file, *args, **kwargs): ... + +class NpzFile(Mapping[Any, Any]): + zip: Any + fid: Any + files: Any + allow_pickle: Any + pickle_kwargs: Any + f: Any + def __init__(self, fid, own_fid=..., allow_pickle=..., pickle_kwargs=...): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + def close(self): ... + def __del__(self): ... + def __iter__(self): ... + def __len__(self): ... + def __getitem__(self, key): ... + def iteritems(self): ... + def iterkeys(self): ... + +def load(file, mmap_mode=..., allow_pickle=..., fix_imports=..., encoding=...): ... +def save(file, arr, allow_pickle=..., fix_imports=...): ... +def savez(file, *args, **kwds): ... +def savez_compressed(file, *args, **kwds): ... +def loadtxt( + fname, + dtype=..., + comments=..., + delimiter=..., + converters=..., + skiprows=..., + usecols=..., + unpack=..., + ndmin=..., + encoding=..., + max_rows=..., + *, + like=..., +): ... +def savetxt( + fname, + X, + fmt=..., + delimiter=..., + newline=..., + header=..., + footer=..., + comments=..., + encoding=..., +): ... +def fromregex(file, regexp, dtype, encoding=...): ... +def genfromtxt( + fname, + dtype=..., + comments=..., + delimiter=..., + skip_header=..., + skip_footer=..., + converters=..., + missing_values=..., + filling_values=..., + usecols=..., + names=..., + excludelist=..., + deletechars=..., + replace_space=..., + autostrip=..., + case_sensitive=..., + defaultfmt=..., + unpack=..., + usemask=..., + loose=..., + invalid_raise=..., + max_rows=..., + encoding=..., + *, + like=..., +): ... +def recfromtxt(fname, **kwargs): ... +def recfromcsv(fname, **kwargs): ... + +# NOTE: Deprecated +# def ndfromtxt(fname, **kwargs): ... +# def mafromtxt(fname, **kwargs): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/polynomial.py b/MLPY/Lib/site-packages/numpy/lib/polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..606fab2d3d9592b30e13e74238f387d03308ef09 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/polynomial.py @@ -0,0 +1,1444 @@ +""" +Functions to operate on polynomials. + +""" +__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', + 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', + 'polyfit', 'RankWarning'] + +import functools +import re +import warnings +import numpy.core.numeric as NX + +from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, + ones) +from numpy.core import overrides +from numpy.core.overrides import set_module +from numpy.lib.twodim_base import diag, vander +from numpy.lib.function_base import trim_zeros +from numpy.lib.type_check import iscomplex, real, imag, mintypecode +from numpy.linalg import eigvals, lstsq, inv + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +@set_module('numpy') +class RankWarning(UserWarning): + """ + Issued by `polyfit` when the Vandermonde matrix is rank deficient. + + For more information, a way to suppress the warning, and an example of + `RankWarning` being issued, see `polyfit`. + + """ + pass + + +def _poly_dispatcher(seq_of_zeros): + return seq_of_zeros + + +@array_function_dispatch(_poly_dispatcher) +def poly(seq_of_zeros): + """ + Find the coefficients of a polynomial with the given sequence of roots. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the coefficients of the polynomial whose leading coefficient + is one for the given sequence of zeros (multiple roots must be included + in the sequence as many times as their multiplicity; see Examples). + A square matrix (or array, which will be treated as a matrix) can also + be given, in which case the coefficients of the characteristic polynomial + of the matrix are returned. + + Parameters + ---------- + seq_of_zeros : array_like, shape (N,) or (N, N) + A sequence of polynomial roots, or a square array or matrix object. + + Returns + ------- + c : ndarray + 1D array of polynomial coefficients from highest to lowest degree: + + ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` + where c[0] always equals 1. + + Raises + ------ + ValueError + If input is the wrong shape (the input must be a 1-D or square + 2-D array). + + See Also + -------- + polyval : Compute polynomial values. + roots : Return the roots of a polynomial. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + Specifying the roots of a polynomial still leaves one degree of + freedom, typically represented by an undetermined leading + coefficient. [1]_ In the case of this function, that coefficient - + the first one in the returned array - is always taken as one. (If + for some reason you have one other point, the only automatic way + presently to leverage that information is to use ``polyfit``.) + + The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` + matrix **A** is given by + + :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, + + where **I** is the `n`-by-`n` identity matrix. [2]_ + + References + ---------- + .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry, + Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. + + .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," + Academic Press, pg. 182, 1980. + + Examples + -------- + Given a sequence of a polynomial's zeros: + + >>> np.poly((0, 0, 0)) # Multiple root example + array([1., 0., 0., 0.]) + + The line above represents z**3 + 0*z**2 + 0*z + 0. + + >>> np.poly((-1./2, 0, 1./2)) + array([ 1. , 0. , -0.25, 0. ]) + + The line above represents z**3 - z/4 + + >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) # random + + Given a square array object: + + >>> P = np.array([[0, 1./3], [-1./2, 0]]) + >>> np.poly(P) + array([1. , 0. , 0.16666667]) + + Note how in all cases the leading coefficient is always 1. + + """ + seq_of_zeros = atleast_1d(seq_of_zeros) + sh = seq_of_zeros.shape + + if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: + seq_of_zeros = eigvals(seq_of_zeros) + elif len(sh) == 1: + dt = seq_of_zeros.dtype + # Let object arrays slip through, e.g. for arbitrary precision + if dt != object: + seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) + else: + raise ValueError("input must be 1d or non-empty square 2d array.") + + if len(seq_of_zeros) == 0: + return 1.0 + dt = seq_of_zeros.dtype + a = ones((1,), dtype=dt) + for k in range(len(seq_of_zeros)): + a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt), + mode='full') + + if issubclass(a.dtype.type, NX.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = NX.asarray(seq_of_zeros, complex) + if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): + a = a.real.copy() + + return a + + +def _roots_dispatcher(p): + return p + + +@array_function_dispatch(_roots_dispatcher) +def roots(p): + """ + Return the roots of a polynomial with coefficients given in p. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The values in the rank-1 array `p` are coefficients of a polynomial. + If the length of `p` is n+1 then the polynomial is described by:: + + p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] + + Parameters + ---------- + p : array_like + Rank-1 array of polynomial coefficients. + + Returns + ------- + out : ndarray + An array containing the roots of the polynomial. + + Raises + ------ + ValueError + When `p` cannot be converted to a rank-1 array. + + See also + -------- + poly : Find the coefficients of a polynomial with a given sequence + of roots. + polyval : Compute polynomial values. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + The algorithm relies on computing the eigenvalues of the + companion matrix [1]_. + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> coeff = [3.2, 2, 1] + >>> np.roots(coeff) + array([-0.3125+0.46351241j, -0.3125-0.46351241j]) + + """ + # If input is scalar, this makes it an array + p = atleast_1d(p) + if p.ndim != 1: + raise ValueError("Input must be a rank-1 array.") + + # find non-zero array entries + non_zero = NX.nonzero(NX.ravel(p))[0] + + # Return an empty array if polynomial is all zeros + if len(non_zero) == 0: + return NX.array([]) + + # find the number of trailing zeros -- this is the number of roots at 0. + trailing_zeros = len(p) - non_zero[-1] - 1 + + # strip leading and trailing zeros + p = p[int(non_zero[0]):int(non_zero[-1])+1] + + # casting: if incoming array isn't floating point, make it floating point. + if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): + p = p.astype(float) + + N = len(p) + if N > 1: + # build companion matrix and find its eigenvalues (the roots) + A = diag(NX.ones((N-2,), p.dtype), -1) + A[0,:] = -p[1:] / p[0] + roots = eigvals(A) + else: + roots = NX.array([]) + + # tack any zeros onto the back of the array + roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) + return roots + + +def _polyint_dispatcher(p, m=None, k=None): + return (p,) + + +@array_function_dispatch(_polyint_dispatcher) +def polyint(p, m=1, k=None): + """ + Return an antiderivative (indefinite integral) of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The returned order `m` antiderivative `P` of polynomial `p` satisfies + :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` + integration constants `k`. The constants determine the low-order + polynomial part + + .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} + + of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. + + Parameters + ---------- + p : array_like or poly1d + Polynomial to integrate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of the antiderivative. (Default: 1) + k : list of `m` scalars or scalar, optional + Integration constants. They are given in the order of integration: + those corresponding to highest-order terms come first. + + If ``None`` (default), all constants are assumed to be zero. + If `m = 1`, a single scalar can be given instead of a list. + + See Also + -------- + polyder : derivative of a polynomial + poly1d.integ : equivalent method + + Examples + -------- + The defining property of the antiderivative: + + >>> p = np.poly1d([1,1,1]) + >>> P = np.polyint(p) + >>> P + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary + >>> np.polyder(P) == p + True + + The integration constants default to zero, but can be specified: + + >>> P = np.polyint(p, 3) + >>> P(0) + 0.0 + >>> np.polyder(P)(0) + 0.0 + >>> np.polyder(P, 2)(0) + 0.0 + >>> P = np.polyint(p, 3, k=[6,5,3]) + >>> P + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary + + Note that 3 = 6 / 2!, and that the constants are given in the order of + integrations. Constant of the highest-order polynomial term comes first: + + >>> np.polyder(P, 2)(0) + 6.0 + >>> np.polyder(P, 1)(0) + 5.0 + >>> P(0) + 3.0 + + """ + m = int(m) + if m < 0: + raise ValueError("Order of integral must be positive (see polyder)") + if k is None: + k = NX.zeros(m, float) + k = atleast_1d(k) + if len(k) == 1 and m > 1: + k = k[0]*NX.ones(m, float) + if len(k) < m: + raise ValueError( + "k must be a scalar or a rank-1 array of length 1 or >m.") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + if m == 0: + if truepoly: + return poly1d(p) + return p + else: + # Note: this must work also with object and integer arrays + y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) + val = polyint(y, m - 1, k=k[1:]) + if truepoly: + return poly1d(val) + return val + + +def _polyder_dispatcher(p, m=None): + return (p,) + + +@array_function_dispatch(_polyder_dispatcher) +def polyder(p, m=1): + """ + Return the derivative of the specified order of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of differentiation (default: 1) + + Returns + ------- + der : poly1d + A new polynomial representing the derivative. + + See Also + -------- + polyint : Anti-derivative of a polynomial. + poly1d : Class for one-dimensional polynomials. + + Examples + -------- + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + + >>> p = np.poly1d([1,1,1,1]) + >>> p2 = np.polyder(p) + >>> p2 + poly1d([3, 2, 1]) + + which evaluates to: + + >>> p2(2.) + 17.0 + + We can verify this, approximating the derivative with + ``(f(x + h) - f(x))/h``: + + >>> (p(2. + 0.001) - p(2.)) / 0.001 + 17.007000999997857 + + The fourth-order derivative of a 3rd-order polynomial is zero: + + >>> np.polyder(p, 2) + poly1d([6, 2]) + >>> np.polyder(p, 3) + poly1d([6]) + >>> np.polyder(p, 4) + poly1d([0]) + + """ + m = int(m) + if m < 0: + raise ValueError("Order of derivative must be positive (see polyint)") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + n = len(p) - 1 + y = p[:-1] * NX.arange(n, 0, -1) + if m == 0: + val = p + else: + val = polyder(y, m - 1) + if truepoly: + val = poly1d(val) + return val + + +def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None): + return (x, y, w) + + +@array_function_dispatch(_polyfit_dispatcher) +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Least squares polynomial fit. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + to points `(x, y)`. Returns a vector of coefficients `p` that minimises + the squared error in the order `deg`, `deg-1`, ... `0`. + + The `Polynomial.fit ` class + method is recommended for new code as it is more stable numerically. See + the documentation of the method for more information. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (M,), optional + Weights to apply to the y-coordinates of the sample points. For + gaussian uncertainties, use 1/sigma (not 1/sigma**2). + cov : bool or str, optional + If given and not `False`, return not just the estimate but also its + covariance matrix. By default, the covariance are scaled by + chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed + to be unreliable except in a relative sense and everything is scaled + such that the reduced chi2 is unity. This scaling is omitted if + ``cov='unscaled'``, as is relevant for the case that the weights are + 1/sigma**2, with sigma known to be a reliable estimate of the + uncertainty. + + Returns + ------- + p : ndarray, shape (deg + 1,) or (deg + 1, K) + Polynomial coefficients, highest power first. If `y` was 2-D, the + coefficients for `k`-th data set are in ``p[:,k]``. + + residuals, rank, singular_values, rcond + Present only if `full` = True. Residuals is sum of squared residuals + of the least-squares fit, the effective rank of the scaled Vandermonde + coefficient matrix, its singular values, and the specified value of + `rcond`. For more details, see `linalg.lstsq`. + + V : ndarray, shape (M,M) or (M,M,K) + Present only if `full` = False and `cov`=True. The covariance + matrix of the polynomial coefficient estimates. The diagonal of + this matrix are the variance estimates for each coefficient. If y + is a 2-D array, then the covariance matrix for the `k`-th data set + are in ``V[:,:,k]`` + + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. + + The warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + polyval : Compute polynomial values. + linalg.lstsq : Computes a least-squares fit. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution minimizes the squared error + + .. math :: + E = \\sum_{j=0}^k |p(x_j) - y_j|^2 + + in the equations:: + + x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] + x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] + ... + x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] + + The coefficient matrix of the coefficients `p` is a Vandermonde matrix. + + `polyfit` issues a `RankWarning` when the least-squares fit is badly + conditioned. This implies that the best fit is not well-defined due + to numerical error. The results may be improved by lowering the polynomial + degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter + can also be set to a value smaller than its default, but the resulting + fit may be spurious: including contributions from the small singular + values can add numerical noise to the result. + + Note that fitting polynomial coefficients is inherently badly conditioned + when the degree of the polynomial is large or the interval of sample points + is badly centered. The quality of the fit should always be checked in these + cases. When polynomial fits are not satisfactory, splines may be a good + alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + .. [2] Wikipedia, "Polynomial interpolation", + https://en.wikipedia.org/wiki/Polynomial_interpolation + + Examples + -------- + >>> import warnings + >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) + >>> z = np.polyfit(x, y, 3) + >>> z + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary + + It is convenient to use `poly1d` objects for dealing with polynomials: + + >>> p = np.poly1d(z) + >>> p(0.5) + 0.6143849206349179 # may vary + >>> p(3.5) + -0.34732142857143039 # may vary + >>> p(10) + 22.579365079365115 # may vary + + High-order polynomials may oscillate wildly: + + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', np.RankWarning) + ... p30 = np.poly1d(np.polyfit(x, y, 30)) + ... + >>> p30(4) + -0.80000000000000204 # may vary + >>> p30(5) + -0.99999999999999445 # may vary + >>> p30(4.5) + -0.10547061179440398 # may vary + + Illustration: + + >>> import matplotlib.pyplot as plt + >>> xp = np.linspace(-2, 6, 100) + >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') + >>> plt.ylim(-2,2) + (-2, 2) + >>> plt.show() + + """ + order = int(deg) + 1 + x = NX.asarray(x) + 0.0 + y = NX.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if x.shape[0] != y.shape[0]: + raise TypeError("expected x and y to have same length") + + # set rcond + if rcond is None: + rcond = len(x)*finfo(x.dtype).eps + + # set up least squares equation for powers of x + lhs = vander(x, order) + rhs = y + + # apply weighting + if w is not None: + w = NX.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + lhs *= w[:, NX.newaxis] + if rhs.ndim == 2: + rhs *= w[:, NX.newaxis] + else: + rhs *= w + + # scale lhs to improve condition number and solve + scale = NX.sqrt((lhs*lhs).sum(axis=0)) + lhs /= scale + c, resids, rank, s = lstsq(lhs, rhs, rcond) + c = (c.T/scale).T # broadcast scale coefficients + + # warn on rank reduction, which indicates an ill conditioned matrix + if rank != order and not full: + msg = "Polyfit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=4) + + if full: + return c, resids, rank, s, rcond + elif cov: + Vbase = inv(dot(lhs.T, lhs)) + Vbase /= NX.outer(scale, scale) + if cov == "unscaled": + fac = 1 + else: + if len(x) <= order: + raise ValueError("the number of data points must exceed order " + "to scale the covariance matrix") + # note, this used to be: fac = resids / (len(x) - order - 2.0) + # it was deciced that the "- 2" (originally justified by "Bayesian + # uncertainty analysis") is not was the user expects + # (see gh-11196 and gh-11197) + fac = resids / (len(x) - order) + if y.ndim == 1: + return c, Vbase * fac + else: + return c, Vbase[:,:, NX.newaxis] * fac + else: + return c + + +def _polyval_dispatcher(p, x): + return (p, x) + + +@array_function_dispatch(_polyval_dispatcher) +def polyval(p, x): + """ + Evaluate a polynomial at specific values. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + If `p` is of length N, this function returns the value: + + ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` + + If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``. + If `x` is another polynomial then the composite polynomial ``p(x(t))`` + is returned. + + Parameters + ---------- + p : array_like or poly1d object + 1D array of polynomial coefficients (including coefficients equal + to zero) from highest degree to the constant term, or an + instance of poly1d. + x : array_like or poly1d object + A number, an array of numbers, or an instance of poly1d, at + which to evaluate `p`. + + Returns + ------- + values : ndarray or poly1d + If `x` is a poly1d instance, the result is the composition of the two + polynomials, i.e., `x` is "substituted" in `p` and the simplified + result is returned. In addition, the type of `x` - array_like or + poly1d - governs the type of the output: `x` array_like => `values` + array_like, `x` a poly1d object => `values` is also. + + See Also + -------- + poly1d: A polynomial class. + + Notes + ----- + Horner's scheme [1]_ is used to evaluate the polynomial. Even so, + for polynomials of high degree the values may be inaccurate due to + rounding errors. Use carefully. + + If `x` is a subtype of `ndarray` the return value will be of the same type. + + References + ---------- + .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. + trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand + Reinhold Co., 1985, pg. 720. + + Examples + -------- + >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 + 76 + >>> np.polyval([3,0,1], np.poly1d(5)) + poly1d([76]) + >>> np.polyval(np.poly1d([3,0,1]), 5) + 76 + >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) + poly1d([76]) + + """ + p = NX.asarray(p) + if isinstance(x, poly1d): + y = 0 + else: + x = NX.asanyarray(x) + y = NX.zeros_like(x) + for i in range(len(p)): + y = y * x + p[i] + return y + + +def _binary_op_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_binary_op_dispatcher) +def polyadd(a1, a2): + """ + Find the sum of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the polynomial resulting from the sum of two input polynomials. + Each input must be either a poly1d object or a 1D sequence of polynomial + coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The sum of the inputs. If either input is a poly1d object, then the + output is also a poly1d object. Otherwise, it is a 1D array of + polynomial coefficients from highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + + Examples + -------- + >>> np.polyadd([1, 2], [9, 5, 4]) + array([9, 6, 6]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2]) + >>> p2 = np.poly1d([9, 5, 4]) + >>> print(p1) + 1 x + 2 + >>> print(p2) + 2 + 9 x + 5 x + 4 + >>> print(np.polyadd(p1, p2)) + 2 + 9 x + 6 x + 6 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 + a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) + a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 + NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polysub(a1, a2): + """ + Difference (subtraction) of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Given two polynomials `a1` and `a2`, returns ``a1 - a2``. + `a1` and `a2` can be either array_like sequences of the polynomials' + coefficients (including coefficients equal to zero), or `poly1d` objects. + + Parameters + ---------- + a1, a2 : array_like or poly1d + Minuend and subtrahend polynomials, respectively. + + Returns + ------- + out : ndarray or poly1d + Array or `poly1d` object of the difference polynomial's coefficients. + + See Also + -------- + polyval, polydiv, polymul, polyadd + + Examples + -------- + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + + >>> np.polysub([2, 10, -2], [3, 10, -4]) + array([-1, 0, 2]) + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 - a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) - a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 - NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polymul(a1, a2): + """ + Find the product of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Finds the polynomial resulting from the multiplication of the two input + polynomials. Each input must be either a poly1d object or a 1D sequence + of polynomial coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The polynomial resulting from the multiplication of the inputs. If + either inputs is a poly1d object, then the output is also a poly1d + object. Otherwise, it is a 1D array of polynomial coefficients from + highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + convolve : Array convolution. Same output as polymul, but has parameter + for overlap mode. + + Examples + -------- + >>> np.polymul([1, 2, 3], [9, 5, 1]) + array([ 9, 23, 38, 17, 3]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2, 3]) + >>> p2 = np.poly1d([9, 5, 1]) + >>> print(p1) + 2 + 1 x + 2 x + 3 + >>> print(p2) + 2 + 9 x + 5 x + 1 + >>> print(np.polymul(p1, p2)) + 4 3 2 + 9 x + 23 x + 38 x + 17 x + 3 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1, a2 = poly1d(a1), poly1d(a2) + val = NX.convolve(a1, a2) + if truepoly: + val = poly1d(val) + return val + + +def _polydiv_dispatcher(u, v): + return (u, v) + + +@array_function_dispatch(_polydiv_dispatcher) +def polydiv(u, v): + """ + Returns the quotient and remainder of polynomial division. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The input arrays are the coefficients (including any coefficients + equal to zero) of the "numerator" (dividend) and "denominator" + (divisor) polynomials, respectively. + + Parameters + ---------- + u : array_like or poly1d + Dividend polynomial's coefficients. + + v : array_like or poly1d + Divisor polynomial's coefficients. + + Returns + ------- + q : ndarray + Coefficients, including those equal to zero, of the quotient. + r : ndarray + Coefficients, including those equal to zero, of the remainder. + + See Also + -------- + poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub + polyval + + Notes + ----- + Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need + not equal `v.ndim`. In other words, all four possible combinations - + ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, + ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. + + Examples + -------- + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + + >>> x = np.array([3.0, 5.0, 2.0]) + >>> y = np.array([2.0, 1.0]) + >>> np.polydiv(x, y) + (array([1.5 , 1.75]), array([0.25])) + + """ + truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d)) + u = atleast_1d(u) + 0.0 + v = atleast_1d(v) + 0.0 + # w has the common type + w = u[0] + v[0] + m = len(u) - 1 + n = len(v) - 1 + scale = 1. / v[0] + q = NX.zeros((max(m - n + 1, 1),), w.dtype) + r = u.astype(w.dtype) + for k in range(0, m-n+1): + d = scale * r[k] + q[k] = d + r[k:k+n+1] -= d*v + while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): + r = r[1:] + if truepoly: + return poly1d(q), poly1d(r) + return q, r + +_poly_mat = re.compile(r"\*\*([0-9]*)") +def _raise_power(astr, wrap=70): + n = 0 + line1 = '' + line2 = '' + output = ' ' + while True: + mat = _poly_mat.search(astr, n) + if mat is None: + break + span = mat.span() + power = mat.groups()[0] + partstr = astr[n:span[0]] + n = span[1] + toadd2 = partstr + ' '*(len(power)-1) + toadd1 = ' '*(len(partstr)-1) + power + if ((len(line2) + len(toadd2) > wrap) or + (len(line1) + len(toadd1) > wrap)): + output += line1 + "\n" + line2 + "\n " + line1 = toadd1 + line2 = toadd2 + else: + line2 += partstr + ' '*(len(power)-1) + line1 += ' '*(len(partstr)-1) + power + output += line1 + "\n" + line2 + return output + astr[n:] + + +@set_module('numpy') +class poly1d: + """ + A one-dimensional polynomial class. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + A convenience class, used to encapsulate "natural" operations on + polynomials so that said operations may take on their customary + form in code (see Examples). + + Parameters + ---------- + c_or_r : array_like + The polynomial's coefficients, in decreasing powers, or if + the value of the second parameter is True, the polynomial's + roots (values where the polynomial evaluates to 0). For example, + ``poly1d([1, 2, 3])`` returns an object that represents + :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns + one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. + r : bool, optional + If True, `c_or_r` specifies the polynomial's roots; the default + is False. + variable : str, optional + Changes the variable used when printing `p` from `x` to `variable` + (see Examples). + + Examples + -------- + Construct the polynomial :math:`x^2 + 2x + 3`: + + >>> p = np.poly1d([1, 2, 3]) + >>> print(np.poly1d(p)) + 2 + 1 x + 2 x + 3 + + Evaluate the polynomial at :math:`x = 0.5`: + + >>> p(0.5) + 4.25 + + Find the roots: + + >>> p.r + array([-1.+1.41421356j, -1.-1.41421356j]) + >>> p(p.r) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary + + These numbers in the previous line represent (0, 0) to machine precision + + Show the coefficients: + + >>> p.c + array([1, 2, 3]) + + Display the order (the leading zero-coefficients are removed): + + >>> p.order + 2 + + Show the coefficient of the k-th power in the polynomial + (which is equivalent to ``p.c[-(i+1)]``): + + >>> p[1] + 2 + + Polynomials can be added, subtracted, multiplied, and divided + (returns quotient and remainder): + + >>> p * p + poly1d([ 1, 4, 10, 12, 9]) + + >>> (p**3 + 4) / p + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.])) + + ``asarray(p)`` gives the coefficient array, so polynomials can be + used in all functions that accept arrays: + + >>> p**2 # square of polynomial + poly1d([ 1, 4, 10, 12, 9]) + + >>> np.square(p) # square of individual coefficients + array([1, 4, 9]) + + The variable used in the string representation of `p` can be modified, + using the `variable` parameter: + + >>> p = np.poly1d([1,2,3], variable='z') + >>> print(p) + 2 + 1 z + 2 z + 3 + + Construct a polynomial from its roots: + + >>> np.poly1d([1, 2], True) + poly1d([ 1., -3., 2.]) + + This is the same polynomial as obtained by: + + >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) + poly1d([ 1, -3, 2]) + + """ + __hash__ = None + + @property + def coeffs(self): + """ The polynomial coefficients """ + return self._coeffs + + @coeffs.setter + def coeffs(self, value): + # allowing this makes p.coeffs *= 2 legal + if value is not self._coeffs: + raise AttributeError("Cannot set attribute") + + @property + def variable(self): + """ The name of the polynomial variable """ + return self._variable + + # calculated attributes + @property + def order(self): + """ The order or degree of the polynomial """ + return len(self._coeffs) - 1 + + @property + def roots(self): + """ The roots of the polynomial, where self(x) == 0 """ + return roots(self._coeffs) + + # our internal _coeffs property need to be backed by __dict__['coeffs'] for + # scipy to work correctly. + @property + def _coeffs(self): + return self.__dict__['coeffs'] + @_coeffs.setter + def _coeffs(self, coeffs): + self.__dict__['coeffs'] = coeffs + + # alias attributes + r = roots + c = coef = coefficients = coeffs + o = order + + def __init__(self, c_or_r, r=False, variable=None): + if isinstance(c_or_r, poly1d): + self._variable = c_or_r._variable + self._coeffs = c_or_r._coeffs + + if set(c_or_r.__dict__) - set(self.__dict__): + msg = ("In the future extra properties will not be copied " + "across when constructing one poly1d from another") + warnings.warn(msg, FutureWarning, stacklevel=2) + self.__dict__.update(c_or_r.__dict__) + + if variable is not None: + self._variable = variable + return + if r: + c_or_r = poly(c_or_r) + c_or_r = atleast_1d(c_or_r) + if c_or_r.ndim > 1: + raise ValueError("Polynomial must be 1d only.") + c_or_r = trim_zeros(c_or_r, trim='f') + if len(c_or_r) == 0: + c_or_r = NX.array([0], dtype=c_or_r.dtype) + self._coeffs = c_or_r + if variable is None: + variable = 'x' + self._variable = variable + + def __array__(self, t=None): + if t: + return NX.asarray(self.coeffs, t) + else: + return NX.asarray(self.coeffs) + + def __repr__(self): + vals = repr(self.coeffs) + vals = vals[6:-1] + return "poly1d(%s)" % vals + + def __len__(self): + return self.order + + def __str__(self): + thestr = "0" + var = self.variable + + # Remove leading zeros + coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] + N = len(coeffs)-1 + + def fmt_float(q): + s = '%.4g' % q + if s.endswith('.0000'): + s = s[:-5] + return s + + for k in range(len(coeffs)): + if not iscomplex(coeffs[k]): + coefstr = fmt_float(real(coeffs[k])) + elif real(coeffs[k]) == 0: + coefstr = '%sj' % fmt_float(imag(coeffs[k])) + else: + coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])), + fmt_float(imag(coeffs[k]))) + + power = (N-k) + if power == 0: + if coefstr != '0': + newstr = '%s' % (coefstr,) + else: + if k == 0: + newstr = '0' + else: + newstr = '' + elif power == 1: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = var + else: + newstr = '%s %s' % (coefstr, var) + else: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = '%s**%d' % (var, power,) + else: + newstr = '%s %s**%d' % (coefstr, var, power) + + if k > 0: + if newstr != '': + if newstr.startswith('-'): + thestr = "%s - %s" % (thestr, newstr[1:]) + else: + thestr = "%s + %s" % (thestr, newstr) + else: + thestr = newstr + return _raise_power(thestr) + + def __call__(self, val): + return polyval(self.coeffs, val) + + def __neg__(self): + return poly1d(-self.coeffs) + + def __pos__(self): + return self + + def __mul__(self, other): + if isscalar(other): + return poly1d(self.coeffs * other) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __rmul__(self, other): + if isscalar(other): + return poly1d(other * self.coeffs) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __add__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __radd__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __pow__(self, val): + if not isscalar(val) or int(val) != val or val < 0: + raise ValueError("Power to non-negative integers only.") + res = [1] + for _ in range(val): + res = polymul(self.coeffs, res) + return poly1d(res) + + def __sub__(self, other): + other = poly1d(other) + return poly1d(polysub(self.coeffs, other.coeffs)) + + def __rsub__(self, other): + other = poly1d(other) + return poly1d(polysub(other.coeffs, self.coeffs)) + + def __div__(self, other): + if isscalar(other): + return poly1d(self.coeffs/other) + else: + other = poly1d(other) + return polydiv(self, other) + + __truediv__ = __div__ + + def __rdiv__(self, other): + if isscalar(other): + return poly1d(other/self.coeffs) + else: + other = poly1d(other) + return polydiv(other, self) + + __rtruediv__ = __rdiv__ + + def __eq__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + if self.coeffs.shape != other.coeffs.shape: + return False + return (self.coeffs == other.coeffs).all() + + def __ne__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + return not self.__eq__(other) + + + def __getitem__(self, val): + ind = self.order - val + if val > self.order: + return 0 + if val < 0: + return 0 + return self.coeffs[ind] + + def __setitem__(self, key, val): + ind = self.order - key + if key < 0: + raise ValueError("Does not support negative powers.") + if key > self.order: + zr = NX.zeros(key-self.order, self.coeffs.dtype) + self._coeffs = NX.concatenate((zr, self.coeffs)) + ind = 0 + self._coeffs[ind] = val + return + + def __iter__(self): + return iter(self.coeffs) + + def integ(self, m=1, k=0): + """ + Return an antiderivative (indefinite integral) of this polynomial. + + Refer to `polyint` for full documentation. + + See Also + -------- + polyint : equivalent function + + """ + return poly1d(polyint(self.coeffs, m=m, k=k)) + + def deriv(self, m=1): + """ + Return a derivative of this polynomial. + + Refer to `polyder` for full documentation. + + See Also + -------- + polyder : equivalent function + + """ + return poly1d(polyder(self.coeffs, m=m)) + +# Stuff to do on module import + +warnings.simplefilter('always', RankWarning) diff --git a/MLPY/Lib/site-packages/numpy/lib/polynomial.pyi b/MLPY/Lib/site-packages/numpy/lib/polynomial.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1cac694527c7539385984706501338eed84ba279 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/polynomial.pyi @@ -0,0 +1,19 @@ +from typing import List + +from numpy import ( + RankWarning as RankWarning, + poly1d as poly1d, +) + +__all__: List[str] + +def poly(seq_of_zeros): ... +def roots(p): ... +def polyint(p, m=..., k=...): ... +def polyder(p, m=...): ... +def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... +def polyval(p, x): ... +def polyadd(a1, a2): ... +def polysub(a1, a2): ... +def polymul(a1, a2): ... +def polydiv(u, v): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/recfunctions.py b/MLPY/Lib/site-packages/numpy/lib/recfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..b1263d1ab44529fc71e51083b6c9328939c10367 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/recfunctions.py @@ -0,0 +1,1594 @@ +""" +Collection of utilities to manipulate structured arrays. + +Most of these functions were initially implemented by John Hunter for +matplotlib. They have been rewritten and extended for convenience. + +""" +import itertools +import numpy as np +import numpy.ma as ma +from numpy import ndarray, recarray +from numpy.ma import MaskedArray +from numpy.ma.mrecords import MaskedRecords +from numpy.core.overrides import array_function_dispatch +from numpy.lib._iotools import _is_string_like +from numpy.testing import suppress_warnings + +_check_fill_value = np.ma.core._check_fill_value + + +__all__ = [ + 'append_fields', 'apply_along_fields', 'assign_fields_by_name', + 'drop_fields', 'find_duplicates', 'flatten_descr', + 'get_fieldstructure', 'get_names', 'get_names_flat', + 'join_by', 'merge_arrays', 'rec_append_fields', + 'rec_drop_fields', 'rec_join', 'recursive_fill_fields', + 'rename_fields', 'repack_fields', 'require_fields', + 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured', + ] + + +def _recursive_fill_fields_dispatcher(input, output): + return (input, output) + + +@array_function_dispatch(_recursive_fill_fields_dispatcher) +def recursive_fill_fields(input, output): + """ + Fills fields from output with fields from input, + with support for nested structures. + + Parameters + ---------- + input : ndarray + Input array. + output : ndarray + Output array. + + Notes + ----- + * `output` should be at least the same size as `input` + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) + >>> b = np.zeros((3,), dtype=a.dtype) + >>> rfn.recursive_fill_fields(a, b) + array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) + >>> dt.descr + [(('a', 'A'), '>> _get_fieldspec(dt) + [(('a', 'A'), dtype('int64')), ('b', dtype(('>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names(np.empty((1,), dtype=int)) + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' + + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names(adtype) + ('a', ('b', ('ba', 'bb'))) + """ + listnames = [] + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + listnames.append((name, tuple(get_names(current)))) + else: + listnames.append(name) + return tuple(listnames) + + +def get_names_flat(adtype): + """ + Returns the field names of the input datatype as a tuple. Nested structure + are flattened beforehand. + + Parameters + ---------- + adtype : dtype + Input datatype + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names_flat(adtype) + ('a', 'b', 'ba', 'bb') + """ + listnames = [] + names = adtype.names + for name in names: + listnames.append(name) + current = adtype[name] + if current.names is not None: + listnames.extend(get_names_flat(current)) + return tuple(listnames) + + +def flatten_descr(ndtype): + """ + Flatten a structured data-type description. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) + (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) + + """ + names = ndtype.names + if names is None: + return (('', ndtype),) + else: + descr = [] + for field in names: + (typ, _) = ndtype.fields[field] + if typ.names is not None: + descr.extend(flatten_descr(typ)) + else: + descr.append((field, typ)) + return tuple(descr) + + +def _zip_dtype(seqarrays, flatten=False): + newdtype = [] + if flatten: + for a in seqarrays: + newdtype.extend(flatten_descr(a.dtype)) + else: + for a in seqarrays: + current = a.dtype + if current.names is not None and len(current.names) == 1: + # special case - dtypes of 1 field are flattened + newdtype.extend(_get_fieldspec(current)) + else: + newdtype.append(('', current)) + return np.dtype(newdtype) + + +def _zip_descr(seqarrays, flatten=False): + """ + Combine the dtype description of a series of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays + flatten : {boolean}, optional + Whether to collapse nested descriptions. + """ + return _zip_dtype(seqarrays, flatten=flatten).descr + + +def get_fieldstructure(adtype, lastname=None, parents=None,): + """ + Returns a dictionary with fields indexing lists of their parent fields. + + This function is used to simplify access to fields nested in other fields. + + Parameters + ---------- + adtype : np.dtype + Input datatype + lastname : optional + Last processed field name (used internally during recursion). + parents : dictionary + Dictionary of parent fields (used interbally during recursion). + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('A', int), + ... ('B', [('BA', int), + ... ('BB', [('BBA', int), ('BBB', int)])])]) + >>> rfn.get_fieldstructure(ndtype) + ... # XXX: possible regression, order of BBA and BBB is swapped + {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + + """ + if parents is None: + parents = {} + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + if lastname: + parents[name] = [lastname, ] + else: + parents[name] = [] + parents.update(get_fieldstructure(current, name, parents)) + else: + lastparent = [_ for _ in (parents.get(lastname, []) or [])] + if lastparent: + lastparent.append(lastname) + elif lastname: + lastparent = [lastname, ] + parents[name] = lastparent or [] + return parents + + +def _izip_fields_flat(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays, + collapsing any nested structure. + + """ + for element in iterable: + if isinstance(element, np.void): + yield from _izip_fields_flat(tuple(element)) + else: + yield element + + +def _izip_fields(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays. + + """ + for element in iterable: + if (hasattr(element, '__iter__') and + not isinstance(element, str)): + yield from _izip_fields(element) + elif isinstance(element, np.void) and len(tuple(element)) == 1: + # this statement is the same from the previous expression + yield from _izip_fields(element) + else: + yield element + + +def _izip_records(seqarrays, fill_value=None, flatten=True): + """ + Returns an iterator of concatenated items from a sequence of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays. + fill_value : {None, integer} + Value used to pad shorter iterables. + flatten : {True, False}, + Whether to + """ + + # Should we flatten the items, or just use a nested approach + if flatten: + zipfunc = _izip_fields_flat + else: + zipfunc = _izip_fields + + for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value): + yield tuple(zipfunc(tup)) + + +def _fix_output(output, usemask=True, asrecarray=False): + """ + Private function: return a recarray, a ndarray, a MaskedArray + or a MaskedRecords depending on the input parameters + """ + if not isinstance(output, MaskedArray): + usemask = False + if usemask: + if asrecarray: + output = output.view(MaskedRecords) + else: + output = ma.filled(output) + if asrecarray: + output = output.view(recarray) + return output + + +def _fix_defaults(output, defaults=None): + """ + Update the fill_value and masked data of `output` + from the default given in a dictionary defaults. + """ + names = output.dtype.names + (data, mask, fill_value) = (output.data, output.mask, output.fill_value) + for (k, v) in (defaults or {}).items(): + if k in names: + fill_value[k] = v + data[k][mask[k]] = v + return output + + +def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None, + usemask=None, asrecarray=None): + return seqarrays + + +@array_function_dispatch(_merge_arrays_dispatcher) +def merge_arrays(seqarrays, fill_value=-1, flatten=False, + usemask=False, asrecarray=False): + """ + Merge arrays field by field. + + Parameters + ---------- + seqarrays : sequence of ndarrays + Sequence of arrays + fill_value : {float}, optional + Filling value used to pad missing data on the shorter arrays. + flatten : {False, True}, optional + Whether to collapse nested fields. + usemask : {False, True}, optional + Whether to return a masked array or not. + asrecarray : {False, True}, optional + Whether to return a recarray (MaskedRecords) or not. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) + array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64), + ... np.array([10., 20., 30.])), usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]), + ... np.array([10., 20., 30.])), + ... usemask=False, asrecarray=True) + rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) + >>> rfn.drop_fields(a, 'a') + array([((2., 3),), ((5., 6),)], + dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') + array([(1, (3,)), (4, (6,))], dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) + array([(1,), (4,)], dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) + >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) + array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], + dtype=[('A', ' 1: + data = merge_arrays(data, flatten=True, usemask=usemask, + fill_value=fill_value) + else: + data = data.pop() + # + output = ma.masked_all( + max(len(base), len(data)), + dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype)) + output = recursive_fill_fields(base, output) + output = recursive_fill_fields(data, output) + # + return _fix_output(output, usemask=usemask, asrecarray=asrecarray) + + +def _rec_append_fields_dispatcher(base, names, data, dtypes=None): + yield base + yield from data + + +@array_function_dispatch(_rec_append_fields_dispatcher) +def rec_append_fields(base, names, data, dtypes=None): + """ + Add new fields to an existing array. + + The names of the fields are given with the `names` arguments, + the corresponding values with the `data` arguments. + If a single field is appended, `names`, `data` and `dtypes` do not have + to be lists but just values. + + Parameters + ---------- + base : array + Input array to extend. + names : string, sequence + String or sequence of strings corresponding to the names + of the new fields. + data : array or sequence of arrays + Array or sequence of arrays storing the fields to add to the base. + dtypes : sequence of datatypes, optional + Datatype or sequence of datatypes. + If None, the datatypes are estimated from the `data`. + + See Also + -------- + append_fields + + Returns + ------- + appended_array : np.recarray + """ + return append_fields(base, names, data=data, dtypes=dtypes, + asrecarray=True, usemask=False) + + +def _repack_fields_dispatcher(a, align=None, recurse=None): + return (a,) + + +@array_function_dispatch(_repack_fields_dispatcher) +def repack_fields(a, align=False, recurse=False): + """ + Re-pack the fields of a structured array or dtype in memory. + + The memory layout of structured datatypes allows fields at arbitrary + byte offsets. This means the fields can be separated by padding bytes, + their offsets can be non-monotonically increasing, and they can overlap. + + This method removes any overlaps and reorders the fields in memory so they + have increasing byte offsets, and adds or removes padding bytes depending + on the `align` option, which behaves like the `align` option to `np.dtype`. + + If `align=False`, this method produces a "packed" memory layout in which + each field starts at the byte the previous field ended, and any padding + bytes are removed. + + If `align=True`, this methods produces an "aligned" memory layout in which + each field's offset is a multiple of its alignment, and the total itemsize + is a multiple of the largest alignment, by adding padding bytes as needed. + + Parameters + ---------- + a : ndarray or dtype + array or dtype for which to repack the fields. + align : boolean + If true, use an "aligned" memory layout, otherwise use a "packed" layout. + recurse : boolean + If True, also repack nested structures. + + Returns + ------- + repacked : ndarray or dtype + Copy of `a` with fields repacked, or `a` itself if no repacking was + needed. + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + ... + >>> dt = np.dtype('u1, >> dt + dtype({'names':['f0','f1','f2'], 'formats':['u1','>> print_offsets(dt) + offsets: [0, 8, 16] + itemsize: 24 + >>> packed_dt = rfn.repack_fields(dt) + >>> packed_dt + dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt) + offsets: [0, 1, 9] + itemsize: 17 + + """ + if not isinstance(a, np.dtype): + dt = repack_fields(a.dtype, align=align, recurse=recurse) + return a.astype(dt, copy=False) + + if a.names is None: + return a + + fieldinfo = [] + for name in a.names: + tup = a.fields[name] + if recurse: + fmt = repack_fields(tup[0], align=align, recurse=True) + else: + fmt = tup[0] + + if len(tup) == 3: + name = (tup[2], name) + + fieldinfo.append((name, fmt)) + + dt = np.dtype(fieldinfo, align=align) + return np.dtype((a.type, dt)) + +def _get_fields_and_offsets(dt, offset=0): + """ + Returns a flat list of (dtype, count, offset) tuples of all the + scalar fields in the dtype "dt", including nested fields, in left + to right order. + """ + + # counts up elements in subarrays, including nested subarrays, and returns + # base dtype and count + def count_elem(dt): + count = 1 + while dt.shape != (): + for size in dt.shape: + count *= size + dt = dt.base + return dt, count + + fields = [] + for name in dt.names: + field = dt.fields[name] + f_dt, f_offset = field[0], field[1] + f_dt, n = count_elem(f_dt) + + if f_dt.names is None: + fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset)) + else: + subfields = _get_fields_and_offsets(f_dt, f_offset + offset) + size = f_dt.itemsize + + for i in range(n): + if i == 0: + # optimization: avoid list comprehension if no subarray + fields.extend(subfields) + else: + fields.extend([(d, c, o + i*size) for d, c, o in subfields]) + return fields + + +def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None, + casting=None): + return (arr,) + +@array_function_dispatch(_structured_to_unstructured_dispatcher) +def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): + """ + Converts an n-D structured array into an (n+1)-D unstructured array. + + The new array will have a new last dimension equal in size to the + number of field-elements of the input array. If not supplied, the output + datatype is determined from the numpy type promotion rules applied to all + the field datatypes. + + Nested fields, as well as each element of any subarray fields, all count + as a single field-elements. + + Parameters + ---------- + arr : ndarray + Structured array or dtype to convert. Cannot contain object datatype. + dtype : dtype, optional + The dtype of the output unstructured array. + copy : bool, optional + See copy argument to `ndarray.astype`. If true, always return a copy. + If false, and `dtype` requirements are satisfied, a view is returned. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `ndarray.astype`. Controls what kind of data + casting may occur. + + Returns + ------- + unstructured : ndarray + Unstructured array with one more dimension. + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a + array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]), + (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])], + dtype=[('a', '>> rfn.structured_to_unstructured(a) + array([[0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + fields = _get_fields_and_offsets(arr.dtype) + n_fields = len(fields) + if n_fields == 0 and dtype is None: + raise ValueError("arr has no fields. Unable to guess dtype") + elif n_fields == 0: + # too many bugs elsewhere for this to work now + raise NotImplementedError("arr with no fields is not supported") + + dts, counts, offsets = zip(*fields) + names = ['f{}'.format(n) for n in range(n_fields)] + + if dtype is None: + out_dtype = np.result_type(*[dt.base for dt in dts]) + else: + out_dtype = dtype + + # Use a series of views and casts to convert to an unstructured array: + + # first view using flattened fields (doesn't work for object arrays) + # Note: dts may include a shape for subarrays + flattened_fields = np.dtype({'names': names, + 'formats': dts, + 'offsets': offsets, + 'itemsize': arr.dtype.itemsize}) + with suppress_warnings() as sup: # until 1.16 (gh-12447) + sup.filter(FutureWarning, "Numpy has detected") + arr = arr.view(flattened_fields) + + # next cast to a packed format with all fields converted to new dtype + packed_fields = np.dtype({'names': names, + 'formats': [(out_dtype, dt.shape) for dt in dts]}) + arr = arr.astype(packed_fields, copy=copy, casting=casting) + + # finally is it safe to view the packed fields as the unstructured type + return arr.view((out_dtype, (sum(counts),))) + + +def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None, + align=None, copy=None, casting=None): + return (arr,) + +@array_function_dispatch(_unstructured_to_structured_dispatcher) +def unstructured_to_structured(arr, dtype=None, names=None, align=False, + copy=False, casting='unsafe'): + """ + Converts an n-D unstructured array into an (n-1)-D structured array. + + The last dimension of the input array is converted into a structure, with + number of field-elements equal to the size of the last dimension of the + input array. By default all output fields have the input array's dtype, but + an output structured dtype with an equal number of fields-elements can be + supplied instead. + + Nested fields, as well as each element of any subarray fields, all count + towards the number of field-elements. + + Parameters + ---------- + arr : ndarray + Unstructured array or dtype to convert. + dtype : dtype, optional + The structured dtype of the output array + names : list of strings, optional + If dtype is not supplied, this specifies the field names for the output + dtype, in order. The field dtypes will be the same as the input array. + align : boolean, optional + Whether to create an aligned memory layout. + copy : bool, optional + See copy argument to `ndarray.astype`. If true, always return a copy. + If false, and `dtype` requirements are satisfied, a view is returned. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `ndarray.astype`. Controls what kind of data + casting may occur. + + Returns + ------- + structured : ndarray + Structured array with fewer dimensions. + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a = np.arange(20).reshape((4,5)) + >>> a + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]]) + >>> rfn.unstructured_to_structured(a, dt) + array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> rfn.apply_along_fields(np.mean, b) + array([ 2.66666667, 5.33333333, 8.66666667, 11. ]) + >>> rfn.apply_along_fields(np.mean, b[['x', 'z']]) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + uarr = structured_to_unstructured(arr) + return func(uarr, axis=-1) + # works and avoids axis requirement, but very, very slow: + #return np.apply_along_axis(func, -1, uarr) + +def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None): + return dst, src + +@array_function_dispatch(_assign_fields_by_name_dispatcher) +def assign_fields_by_name(dst, src, zero_unassigned=True): + """ + Assigns values from one structured array to another by field name. + + Normally in numpy >= 1.14, assignment of one structured array to another + copies fields "by position", meaning that the first field from the src is + copied to the first field of the dst, and so on, regardless of field name. + + This function instead copies "by field name", such that fields in the dst + are assigned from the identically named field in the src. This applies + recursively for nested structures. This is how structure assignment worked + in numpy >= 1.6 to <= 1.13. + + Parameters + ---------- + dst : ndarray + src : ndarray + The source and destination arrays during assignment. + zero_unassigned : bool, optional + If True, fields in the dst for which there was no matching + field in the src are filled with the value 0 (zero). This + was the behavior of numpy <= 1.13. If False, those fields + are not modified. + """ + + if dst.dtype.names is None: + dst[...] = src + return + + for name in dst.dtype.names: + if name not in src.dtype.names: + if zero_unassigned: + dst[name] = 0 + else: + assign_fields_by_name(dst[name], src[name], + zero_unassigned) + +def _require_fields_dispatcher(array, required_dtype): + return (array,) + +@array_function_dispatch(_require_fields_dispatcher) +def require_fields(array, required_dtype): + """ + Casts a structured array to a new dtype using assignment by field-name. + + This function assigns from the old to the new array by name, so the + value of a field in the output array is the value of the field with the + same name in the source array. This has the effect of creating a new + ndarray containing only the fields "required" by the required_dtype. + + If a field name in the required_dtype does not exist in the + input array, that field is created and set to 0 in the output array. + + Parameters + ---------- + a : ndarray + array to cast + required_dtype : dtype + datatype for output array + + Returns + ------- + out : ndarray + array with the new dtype, with field values copied from the fields in + the input array with the same name + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')]) + array([(1., 1), (1., 1), (1., 1), (1., 1)], + dtype=[('b', '>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')]) + array([(1., 0), (1., 0), (1., 0), (1., 0)], + dtype=[('b', '>> from numpy.lib import recfunctions as rfn + >>> x = np.array([1, 2,]) + >>> rfn.stack_arrays(x) is x + True + >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) + >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)]) + >>> test = rfn.stack_arrays((z,zz)) + >>> test + masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0), + (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)], + mask=[(False, False, True), (False, False, True), + (False, False, False), (False, False, False), + (False, False, False)], + fill_value=(b'N/A', 1.e+20, 1.e+20), + dtype=[('A', 'S3'), ('B', ' '%s'" % + (cdtype, fdtype)) + # Only one field: use concatenate + if len(newdescr) == 1: + output = ma.concatenate(seqarrays) + else: + # + output = ma.masked_all((np.sum(nrecords),), newdescr) + offset = np.cumsum(np.r_[0, nrecords]) + seen = [] + for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): + names = a.dtype.names + if names is None: + output['f%i' % len(seen)][i:j] = a + else: + for name in n: + output[name][i:j] = a[name] + if name not in seen: + seen.append(name) + # + return _fix_output(_fix_defaults(output, defaults), + usemask=usemask, asrecarray=asrecarray) + + +def _find_duplicates_dispatcher( + a, key=None, ignoremask=None, return_index=None): + return (a,) + + +@array_function_dispatch(_find_duplicates_dispatcher) +def find_duplicates(a, key=None, ignoremask=True, return_index=False): + """ + Find the duplicates in a structured array along a given key + + Parameters + ---------- + a : array-like + Input array + key : {string, None}, optional + Name of the fields along which to check the duplicates. + If None, the search is performed by records + ignoremask : {True, False}, optional + Whether masked data should be discarded or considered as duplicates. + return_index : {False, True}, optional + Whether to return the indices of the duplicated values. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = [('a', int)] + >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], + ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) + (masked_array(data=[(1,), (1,), (2,), (2,)], + mask=[(False,), (False,), (False,), (False,)], + fill_value=(999999,), + dtype=[('a', '= nb1)] - nb1 + (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) + if jointype == 'inner': + (r1spc, r2spc) = (0, 0) + elif jointype == 'outer': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) + (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) + elif jointype == 'leftouter': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) + # Select the entries from each input + (s1, s2) = (r1[idx_1], r2[idx_2]) + # + # Build the new description of the output array ....... + # Start with the key fields + ndtype = _get_fieldspec(r1k.dtype) + + # Add the fields from r1 + for fname, fdtype in _get_fieldspec(r1.dtype): + if fname not in key: + ndtype.append((fname, fdtype)) + + # Add the fields from r2 + for fname, fdtype in _get_fieldspec(r2.dtype): + # Have we seen the current name already ? + # we need to rebuild this list every time + names = list(name for name, dtype in ndtype) + try: + nameidx = names.index(fname) + except ValueError: + #... we haven't: just add the description to the current list + ndtype.append((fname, fdtype)) + else: + # collision + _, cdtype = ndtype[nameidx] + if fname in key: + # The current field is part of the key: take the largest dtype + ndtype[nameidx] = (fname, max(fdtype, cdtype)) + else: + # The current field is not part of the key: add the suffixes, + # and place the new field adjacent to the old one + ndtype[nameidx:nameidx + 1] = [ + (fname + r1postfix, cdtype), + (fname + r2postfix, fdtype) + ] + # Rebuild a dtype from the new fields + ndtype = np.dtype(ndtype) + # Find the largest nb of common fields : + # r1cmn and r2cmn should be equal, but... + cmn = max(r1cmn, r2cmn) + # Construct an empty array + output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) + names = output.dtype.names + for f in r1names: + selected = s1[f] + if f not in names or (f in r2names and not r2postfix and f not in key): + f += r1postfix + current = output[f] + current[:r1cmn] = selected[:r1cmn] + if jointype in ('outer', 'leftouter'): + current[cmn:cmn + r1spc] = selected[r1cmn:] + for f in r2names: + selected = s2[f] + if f not in names or (f in r1names and not r1postfix and f not in key): + f += r2postfix + current = output[f] + current[:r2cmn] = selected[:r2cmn] + if (jointype == 'outer') and r2spc: + current[-r2spc:] = selected[r2cmn:] + # Sort and finalize the output + output.sort(order=key) + kwargs = dict(usemask=usemask, asrecarray=asrecarray) + return _fix_output(_fix_defaults(output, defaults), **kwargs) + + +def _rec_join_dispatcher( + key, r1, r2, jointype=None, r1postfix=None, r2postfix=None, + defaults=None): + return (r1, r2) + + +@array_function_dispatch(_rec_join_dispatcher) +def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None): + """ + Join arrays `r1` and `r2` on keys. + Alternative to join_by, that always returns a np.recarray. + + See Also + -------- + join_by : equivalent function + """ + kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, + defaults=defaults, usemask=False, asrecarray=True) + return join_by(key, r1, r2, **kwargs) diff --git a/MLPY/Lib/site-packages/numpy/lib/scimath.py b/MLPY/Lib/site-packages/numpy/lib/scimath.py new file mode 100644 index 0000000000000000000000000000000000000000..1c4e94f0fb4b4665afe9af456b295689c2d1d31c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/scimath.py @@ -0,0 +1,617 @@ +""" +Wrapper functions to more user-friendly calling of certain math functions +whose output data-type is different than the input data-type in certain +domains of the input. + +For example, for functions like `log` with branch cuts, the versions in this +module provide the mathematically valid answers in the complex plane:: + + >>> import math + >>> from numpy.lib import scimath + >>> scimath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions are +correctly handled. See their respective docstrings for specific examples. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + sqrt + log + log2 + logn + log10 + power + arccos + arcsin + arctanh + +""" +import numpy.core.numeric as nx +import numpy.core.numerictypes as nt +from numpy.core.numeric import asarray, any +from numpy.core.overrides import array_function_dispatch +from numpy.lib.type_check import isreal + + +__all__ = [ + 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', + 'arctanh' + ] + + +_ln2 = nx.log(2.0) + + +def _tocomplex(arr): + """Convert its input `arr` to a complex array. + + The input is returned as a complex array of the smallest type that will fit + the original data: types like single, byte, short, etc. become csingle, + while others become cdouble. + + A copy of the input is always made. + + Parameters + ---------- + arr : array + + Returns + ------- + array + An array with the same input data as the input but in complex form. + + Examples + -------- + + First, consider an input of type short: + + >>> a = np.array([1,2,3],np.short) + + >>> ac = np.lib.scimath._tocomplex(a); ac + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> ac.dtype + dtype('complex64') + + If the input is of type double, the output is correspondingly of the + complex double type as well: + + >>> b = np.array([1,2,3],np.double) + + >>> bc = np.lib.scimath._tocomplex(b); bc + array([1.+0.j, 2.+0.j, 3.+0.j]) + + >>> bc.dtype + dtype('complex128') + + Note that even if the input was complex to begin with, a copy is still + made, since the astype() method always copies: + + >>> c = np.array([1,2,3],np.csingle) + + >>> cc = np.lib.scimath._tocomplex(c); cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> c *= 2; c + array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + + >>> cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + """ + if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, + nt.ushort, nt.csingle)): + return arr.astype(nt.csingle) + else: + return arr.astype(nt.cdouble) + + +def _fix_real_lt_zero(x): + """Convert `x` to complex if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_real_lt_zero([-1,2]) + array([-1.+0.j, 2.+0.j]) + + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = _tocomplex(x) + return x + + +def _fix_int_lt_zero(x): + """Convert `x` to double if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_int_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_int_lt_zero([-1,2]) + array([-1., 2.]) + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = x * 1.0 + return x + + +def _fix_real_abs_gt_1(x): + """Convert `x` to complex if it has real components x_i with abs(x_i)>1. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) + array([0, 1]) + + >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) + array([0.+0.j, 2.+0.j]) + """ + x = asarray(x) + if any(isreal(x) & (abs(x) > 1)): + x = _tocomplex(x) + return x + + +def _unary_dispatcher(x): + return (x,) + + +@array_function_dispatch(_unary_dispatcher) +def sqrt(x): + """ + Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike `numpy.sqrt` which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray or scalar + The square root of `x`. If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.sqrt + + Examples + -------- + For real, non-negative inputs this works just like `numpy.sqrt`: + + >>> np.lib.scimath.sqrt(1) + 1.0 + >>> np.lib.scimath.sqrt([1, 4]) + array([1., 2.]) + + But it automatically handles negative inputs: + + >>> np.lib.scimath.sqrt(-1) + 1j + >>> np.lib.scimath.sqrt([-1,4]) + array([0.+1.j, 2.+0.j]) + + """ + x = _fix_real_lt_zero(x) + return nx.sqrt(x) + + +@array_function_dispatch(_unary_dispatcher) +def log(x): + """ + Compute the natural logarithm of `x`. + + Return the "principal value" (for a description of this, see `numpy.log`) + of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` + returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the + complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log is (are) required. + + Returns + ------- + out : ndarray or scalar + The log of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log + + Notes + ----- + For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` + (note, however, that otherwise `numpy.log` and this `log` are identical, + i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, + notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> np.emath.log(np.exp(1)) + 1.0 + + Negative arguments are handled "correctly" (recall that + ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): + + >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) + True + + """ + x = _fix_real_lt_zero(x) + return nx.log(x) + + +@array_function_dispatch(_unary_dispatcher) +def log10(x): + """ + Compute the logarithm base 10 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this + is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` + returns ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose log base 10 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array object is returned. + + See Also + -------- + numpy.log10 + + Notes + ----- + For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` + (note, however, that otherwise `numpy.log10` and this `log10` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + + (We set the printing precision so the example can be auto-tested) + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log10(10**1) + 1.0 + + >>> np.emath.log10([-10**1, -10**2, 10**2]) + array([1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log10(x) + + +def _logn_dispatcher(n, x): + return (n, x,) + + +@array_function_dispatch(_logn_dispatcher) +def logn(n, x): + """ + Take log base n of x. + + If `x` contains negative inputs, the answer is computed and returned in the + complex domain. + + Parameters + ---------- + n : array_like + The integer base(s) in which the log is taken. + x : array_like + The value(s) whose log base `n` is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base `n` of the `x` value(s). If `x` was a scalar, so is + `out`, otherwise an array is returned. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.lib.scimath.logn(2, [4, 8]) + array([2., 3.]) + >>> np.lib.scimath.logn(2, [-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + n = _fix_real_lt_zero(n) + return nx.log(x)/nx.log(n) + + +@array_function_dispatch(_unary_dispatcher) +def log2(x): + """ + Compute the logarithm base 2 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is + a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns + ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log base 2 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log2 + + Notes + ----- + For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` + (note, however, that otherwise `numpy.log2` and this `log2` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + We set the printing precision so the example can be auto-tested: + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log2(8) + 3.0 + >>> np.emath.log2([-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log2(x) + + +def _power_dispatcher(x, p): + return (x, p) + + +@array_function_dispatch(_power_dispatcher) +def power(x, p): + """ + Return x to the power p, (x**p). + + If `x` contains negative values, the output is converted to the + complex domain. + + Parameters + ---------- + x : array_like + The input value(s). + p : array_like of ints + The power(s) to which `x` is raised. If `x` contains multiple values, + `p` has to either be a scalar, or contain the same number of values + as `x`. In the latter case, the result is + ``x[0]**p[0], x[1]**p[1], ...``. + + Returns + ------- + out : ndarray or scalar + The result of ``x**p``. If `x` and `p` are scalars, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.power + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.lib.scimath.power([2, 4], 2) + array([ 4, 16]) + >>> np.lib.scimath.power([2, 4], -2) + array([0.25 , 0.0625]) + >>> np.lib.scimath.power([-2, 4], 2) + array([ 4.-0.j, 16.+0.j]) + + """ + x = _fix_real_lt_zero(x) + p = _fix_int_lt_zero(p) + return nx.power(x, p) + + +@array_function_dispatch(_unary_dispatcher) +def arccos(x): + """ + Compute the inverse cosine of x. + + Return the "principal value" (for a description of this, see + `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arccos is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arccos + + Notes + ----- + For an arccos() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arccos`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arccos(1) # a scalar is returned + 0.0 + + >>> np.emath.arccos([1,2]) + array([0.-0.j , 0.-1.317j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arccos(x) + + +@array_function_dispatch(_unary_dispatcher) +def arcsin(x): + """ + Compute the inverse sine of x. + + Return the "principal value" (for a description of this, see + `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is + returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arcsin is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse sine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arcsin + + Notes + ----- + For an arcsin() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arcsin`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arcsin(0) + 0.0 + + >>> np.emath.arcsin([0,1]) + array([0. , 1.5708]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arcsin(x) + + +@array_function_dispatch(_unary_dispatcher) +def arctanh(x): + """ + Compute the inverse hyperbolic tangent of `x`. + + Return the "principal value" (for a description of this, see + `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that + ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is + complex, the result is complex. Finally, `x = 1` returns``inf`` and + ``x=-1`` returns ``-inf``. + + Parameters + ---------- + x : array_like + The value(s) whose arctanh is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was + a scalar so is `out`, otherwise an array is returned. + + + See Also + -------- + numpy.arctanh + + Notes + ----- + For an arctanh() that returns ``NAN`` when real `x` is not in the + interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does + return +/-inf for ``x = +/-1``). + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.emath.arctanh(np.eye(2)) + array([[inf, 0.], + [ 0., inf]]) + >>> np.emath.arctanh([1j]) + array([0.+0.7854j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arctanh(x) diff --git a/MLPY/Lib/site-packages/numpy/lib/scimath.pyi b/MLPY/Lib/site-packages/numpy/lib/scimath.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4edaadf4ef1c63f8f89cb548ae8bc5eef92ea081 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/scimath.pyi @@ -0,0 +1,13 @@ +from typing import List + +__all__: List[str] + +def sqrt(x): ... +def log(x): ... +def log10(x): ... +def logn(n, x): ... +def log2(x): ... +def power(x, p): ... +def arccos(x): ... +def arcsin(x): ... +def arctanh(x): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/setup.py b/MLPY/Lib/site-packages/numpy/lib/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..91b31b113d38c1238814079840bbbaf822532a68 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/setup.py @@ -0,0 +1,12 @@ +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('lib', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_dir('tests/data') + config.add_data_files('*.pyi') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/MLPY/Lib/site-packages/numpy/lib/shape_base.py b/MLPY/Lib/site-packages/numpy/lib/shape_base.py new file mode 100644 index 0000000000000000000000000000000000000000..71cb77f9209272d0d478a8cb9a1d65fe166f3784 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/shape_base.py @@ -0,0 +1,1260 @@ +import functools + +import numpy.core.numeric as _nx +from numpy.core.numeric import ( + asarray, zeros, outer, concatenate, array, asanyarray + ) +from numpy.core.fromnumeric import reshape, transpose +from numpy.core.multiarray import normalize_axis_index +from numpy.core import overrides +from numpy.core import vstack, atleast_3d +from numpy.core.numeric import normalize_axis_tuple +from numpy.core.shape_base import _arrays_for_stack_dispatcher +from numpy.lib.index_tricks import ndindex +from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells + + +__all__ = [ + 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', + 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis', + 'put_along_axis' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _make_along_axis_idx(arr_shape, indices, axis): + # compute dimensions to iterate over + if not _nx.issubdtype(indices.dtype, _nx.integer): + raise IndexError('`indices` must be an integer array') + if len(arr_shape) != indices.ndim: + raise ValueError( + "`indices` and `arr` must have the same number of dimensions") + shape_ones = (1,) * indices.ndim + dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim)) + + # build a fancy index, consisting of orthogonal aranges, with the + # requested index inserted at the right location + fancy_index = [] + for dim, n in zip(dest_dims, arr_shape): + if dim is None: + fancy_index.append(indices) + else: + ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:] + fancy_index.append(_nx.arange(n).reshape(ind_shape)) + + return tuple(fancy_index) + + +def _take_along_axis_dispatcher(arr, indices, axis): + return (arr, indices) + + +@array_function_dispatch(_take_along_axis_dispatcher) +def take_along_axis(arr, indices, axis): + """ + Take values from the input array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to look up values in the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Source array + indices : ndarray (Ni..., J, Nk...) + Indices to take along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions Ni and Nj only need to broadcast + against `arr`. + axis : int + The axis to take 1d slices along. If axis is None, the input array is + treated as if it had first been flattened to 1d, for consistency with + `sort` and `argsort`. + + Returns + ------- + out: ndarray (Ni..., J, Nk...) + The indexed result. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + out = np.empty(Ni + (J,) + Nk) + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + out_1d = out [ii + s_[:,] + kk] + for j in range(J): + out_1d[j] = a_1d[indices_1d[j]] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + out_1d[:] = a_1d[indices_1d] + + See Also + -------- + take : Take along an axis, using the same indices for every 1d slice + put_along_axis : + Put values into the destination array by matching 1d index and data slices + + Examples + -------- + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can sort either by using sort directly, or argsort and this function + + >>> np.sort(a, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + >>> ai = np.argsort(a, axis=1); ai + array([[0, 2, 1], + [1, 2, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + + The same works for max and min, if you expand the dimensions: + + >>> np.expand_dims(np.max(a, axis=1), axis=1) + array([[30], + [60]]) + >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) + >>> ai + array([[1], + [0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[30], + [60]]) + + If we want to get the max and min at the same time, we can stack the + indices first + + >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1) + >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1) + >>> ai = np.concatenate([ai_min, ai_max], axis=1) + >>> ai + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 30], + [40, 60]]) + """ + # normalize inputs + if axis is None: + arr = arr.flat + arr_shape = (len(arr),) # flatiter has no .shape + axis = 0 + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + return arr[_make_along_axis_idx(arr_shape, indices, axis)] + + +def _put_along_axis_dispatcher(arr, indices, values, axis): + return (arr, indices, values) + + +@array_function_dispatch(_put_along_axis_dispatcher) +def put_along_axis(arr, indices, values, axis): + """ + Put values into the destination array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to place values into the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Destination array. + indices : ndarray (Ni..., J, Nk...) + Indices to change along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast + against `arr`. + values : array_like (Ni..., J, Nk...) + values to insert at those indices. Its shape and dimension are + broadcast to match that of `indices`. + axis : int + The axis to take 1d slices along. If axis is None, the destination + array is treated as if a flattened 1d view had been created of it. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + values_1d = values [ii + s_[:,] + kk] + for j in range(J): + a_1d[indices_1d[j]] = values_1d[j] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + a_1d[indices_1d] = values_1d + + See Also + -------- + take_along_axis : + Take values from the input array by matching 1d index and data slices + + Examples + -------- + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can replace the maximum values with: + + >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) + >>> ai + array([[1], + [0]]) + >>> np.put_along_axis(a, ai, 99, axis=1) + >>> a + array([[10, 99, 20], + [99, 40, 50]]) + + """ + # normalize inputs + if axis is None: + arr = arr.flat + axis = 0 + arr_shape = (len(arr),) # flatiter has no .shape + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + + +def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): + return (arr,) + + +@array_function_dispatch(_apply_along_axis_dispatcher) +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + Apply a function to 1-D slices along the given axis. + + Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays + and `a` is a 1-D slice of `arr` along `axis`. + + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + f = func1d(arr[ii + s_[:,] + kk]) + Nj = f.shape + for jj in ndindex(Nj): + out[ii + jj + kk] = f[jj] + + Equivalently, eliminating the inner loop, this can be expressed as:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) + + Parameters + ---------- + func1d : function (M,) -> (Nj...) + This function should accept 1-D arrays. It is applied to 1-D + slices of `arr` along the specified axis. + axis : integer + Axis along which `arr` is sliced. + arr : ndarray (Ni..., M, Nk...) + Input array. + args : any + Additional arguments to `func1d`. + kwargs : any + Additional named arguments to `func1d`. + + .. versionadded:: 1.9.0 + + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The output array. The shape of `out` is identical to the shape of + `arr`, except along the `axis` dimension. This axis is removed, and + replaced with new dimensions equal to the shape of the return value + of `func1d`. So if `func1d` returns a scalar `out` will have one + fewer dimensions than `arr`. + + See Also + -------- + apply_over_axes : Apply a function repeatedly over multiple axes. + + Examples + -------- + >>> def my_func(a): + ... \"\"\"Average first and last element of a 1-D array\"\"\" + ... return (a[0] + a[-1]) * 0.5 + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(my_func, 0, b) + array([4., 5., 6.]) + >>> np.apply_along_axis(my_func, 1, b) + array([2., 5., 8.]) + + For a function that returns a 1D array, the number of dimensions in + `outarr` is the same as `arr`. + + >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) + >>> np.apply_along_axis(sorted, 1, b) + array([[1, 7, 8], + [3, 4, 9], + [2, 5, 6]]) + + For a function that returns a higher dimensional array, those dimensions + are inserted in place of the `axis` dimension. + + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(np.diag, -1, b) + array([[[1, 0, 0], + [0, 2, 0], + [0, 0, 3]], + [[4, 0, 0], + [0, 5, 0], + [0, 0, 6]], + [[7, 0, 0], + [0, 8, 0], + [0, 0, 9]]]) + """ + # handle negative axes + arr = asanyarray(arr) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + + # arr, with the iteration axis at the end + in_dims = list(range(nd)) + inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis]) + + # compute indices for the iteration axes, and append a trailing ellipsis to + # prevent 0d arrays decaying to scalars, which fixes gh-8642 + inds = ndindex(inarr_view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + + # invoke the function on the first item + try: + ind0 = next(inds) + except StopIteration as e: + raise ValueError( + 'Cannot apply_along_axis when any iteration dimensions are 0' + ) from None + res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs)) + + # build a buffer for storing evaluations of func1d. + # remove the requested axis, and add the new ones on the end. + # laid out so that each write is contiguous. + # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) + buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype) + + # permutation of axes such that out = buff.transpose(buff_permute) + buff_dims = list(range(buff.ndim)) + buff_permute = ( + buff_dims[0 : axis] + + buff_dims[buff.ndim-res.ndim : buff.ndim] + + buff_dims[axis : buff.ndim-res.ndim] + ) + + # matrices have a nasty __array_prepare__ and __array_wrap__ + if not isinstance(res, matrix): + buff = res.__array_prepare__(buff) + + # save the first result, then compute and save all remaining results + buff[ind0] = res + for ind in inds: + buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs)) + + if not isinstance(res, matrix): + # wrap the array, to preserve subclasses + buff = res.__array_wrap__(buff) + + # finally, rotate the inserted axes back to where they belong + return transpose(buff, buff_permute) + + else: + # matrices have to be transposed first, because they collapse dimensions! + out_arr = transpose(buff, buff_permute) + return res.__array_wrap__(out_arr) + + +def _apply_over_axes_dispatcher(func, a, axes): + return (a,) + + +@array_function_dispatch(_apply_over_axes_dispatcher) +def apply_over_axes(func, a, axes): + """ + Apply a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, where `axis` is the first + element of `axes`. The result `res` of the function call must have + either the same dimensions as `a` or one less dimension. If `res` + has one less dimension than `a`, a dimension is inserted before + `axis`. The call to `func` is then repeated for each axis in `axes`, + with `res` as the first argument. + + Parameters + ---------- + func : function + This function must take two arguments, `func(a, axis)`. + a : array_like + Input array. + axes : array_like + Axes over which `func` is applied; the elements must be integers. + + Returns + ------- + apply_over_axis : ndarray + The output array. The number of dimensions is the same as `a`, + but the shape can be different. This depends on whether `func` + changes the shape of its output with respect to its input. + + See Also + -------- + apply_along_axis : + Apply a function to 1-D slices of an array along the given axis. + + Notes + ----- + This function is equivalent to tuple axis arguments to reorderable ufuncs + with keepdims=True. Tuple axis arguments to ufuncs have been available since + version 1.7.0. + + Examples + -------- + >>> a = np.arange(24).reshape(2,3,4) + >>> a + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + + Sum over axes 0 and 2. The result has same number of dimensions + as the original array: + + >>> np.apply_over_axes(np.sum, a, [0,2]) + array([[[ 60], + [ 92], + [124]]]) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.sum(a, axis=(0,2), keepdims=True) + array([[[ 60], + [ 92], + [124]]]) + + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +def _expand_dims_dispatcher(a, axis): + return (a,) + + +@array_function_dispatch(_expand_dims_dispatcher) +def expand_dims(a, axis): + """ + Expand the shape of an array. + + Insert a new axis that will appear at the `axis` position in the expanded + array shape. + + Parameters + ---------- + a : array_like + Input array. + axis : int or tuple of ints + Position in the expanded axes where the new axis (or axes) is placed. + + .. deprecated:: 1.13.0 + Passing an axis where ``axis > a.ndim`` will be treated as + ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will + be treated as ``axis == 0``. This behavior is deprecated. + + .. versionchanged:: 1.18.0 + A tuple of axes is now supported. Out of range axes as + described above are now forbidden and raise an `AxisError`. + + Returns + ------- + result : ndarray + View of `a` with the number of dimensions increased. + + See Also + -------- + squeeze : The inverse operation, removing singleton dimensions + reshape : Insert, remove, and combine dimensions, and resize existing ones + doc.indexing, atleast_1d, atleast_2d, atleast_3d + + Examples + -------- + >>> x = np.array([1, 2]) + >>> x.shape + (2,) + + The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``: + + >>> y = np.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + + The following is equivalent to ``x[:, np.newaxis]``: + + >>> y = np.expand_dims(x, axis=1) + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + ``axis`` may also be a tuple: + + >>> y = np.expand_dims(x, axis=(0, 1)) + >>> y + array([[[1, 2]]]) + + >>> y = np.expand_dims(x, axis=(2, 0)) + >>> y + array([[[1], + [2]]]) + + Note that some examples may use ``None`` instead of ``np.newaxis``. These + are the same objects: + + >>> np.newaxis is None + True + + """ + if isinstance(a, matrix): + a = asarray(a) + else: + a = asanyarray(a) + + if type(axis) not in (tuple, list): + axis = (axis,) + + out_ndim = len(axis) + a.ndim + axis = normalize_axis_tuple(axis, out_ndim) + + shape_it = iter(a.shape) + shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] + + return a.reshape(shape) + + +row_stack = vstack + + +def _column_stack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_column_stack_dispatcher) +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + if not overrides.ARRAY_FUNCTION_ENABLED: + # raise warning if necessary + _arrays_for_stack_dispatcher(tup, stacklevel=2) + + arrays = [] + for v in tup: + arr = asanyarray(v) + if arr.ndim < 2: + arr = array(arr, copy=False, subok=True, ndmin=2).T + arrays.append(arr) + return _nx.concatenate(arrays, 1) + + +def _dstack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_dstack_dispatcher) +def dstack(tup): + """ + Stack arrays in sequence depth wise (along third axis). + + This is equivalent to concatenation along the third axis after 2-D arrays + of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape + `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by + `dsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of arrays + The arrays must have the same shape along all but the third axis. + 1-D or 2-D arrays must have the same shape. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 3-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + column_stack : Stack 1-D arrays as columns into a 2-D array. + dsplit : Split array along third axis. + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.dstack((a,b)) + array([[[1, 2], + [2, 3], + [3, 4]]]) + + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.dstack((a,b)) + array([[[1, 2]], + [[2, 3]], + [[3, 4]]]) + + """ + if not overrides.ARRAY_FUNCTION_ENABLED: + # raise warning if necessary + _arrays_for_stack_dispatcher(tup, stacklevel=2) + + arrs = atleast_3d(*tup) + if not isinstance(arrs, list): + arrs = [arrs] + return _nx.concatenate(arrs, 2) + + +def _replace_zero_by_x_arrays(sub_arys): + for i in range(len(sub_arys)): + if _nx.ndim(sub_arys[i]) == 0: + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + return sub_arys + + +def _array_split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_array_split_dispatcher) +def array_split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays. + + Please refer to the ``split`` documentation. The only difference + between these functions is that ``array_split`` allows + `indices_or_sections` to be an integer that does *not* equally + divide the axis. For an array of length l that should be split + into n sections, it returns l % n sub-arrays of size l//n + 1 + and the rest of size l//n. + + See Also + -------- + split : Split array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(8.0) + >>> np.array_split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] + + >>> x = np.arange(9) + >>> np.array_split(x, 4) + [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])] + + """ + try: + Ntotal = ary.shape[axis] + except AttributeError: + Ntotal = len(ary) + try: + # handle array case. + Nsections = len(indices_or_sections) + 1 + div_points = [0] + list(indices_or_sections) + [Ntotal] + except TypeError: + # indices_or_sections is a scalar, not an array. + Nsections = int(indices_or_sections) + if Nsections <= 0: + raise ValueError('number sections must be larger than 0.') from None + Neach_section, extras = divmod(Ntotal, Nsections) + section_sizes = ([0] + + extras * [Neach_section+1] + + (Nsections-extras) * [Neach_section]) + div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() + + sub_arys = [] + sary = _nx.swapaxes(ary, axis, 0) + for i in range(Nsections): + st = div_points[i] + end = div_points[i + 1] + sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) + + return sub_arys + + +def _split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_split_dispatcher) +def split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays as views into `ary`. + + Parameters + ---------- + ary : ndarray + Array to be divided into sub-arrays. + indices_or_sections : int or 1-D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If such a split is not possible, + an error is raised. + + If `indices_or_sections` is a 1-D array of sorted integers, the entries + indicate where along `axis` the array is split. For example, + ``[2, 3]`` would, for ``axis=0``, result in + + - ary[:2] + - ary[2:3] + - ary[3:] + + If an index exceeds the dimension of the array along `axis`, + an empty sub-array is returned correspondingly. + axis : int, optional + The axis along which to split, default is 0. + + Returns + ------- + sub-arrays : list of ndarrays + A list of sub-arrays as views into `ary`. + + Raises + ------ + ValueError + If `indices_or_sections` is given as an integer, but + a split does not result in equal division. + + See Also + -------- + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. Does not raise an exception if + an equal division cannot be made. + hsplit : Split array into multiple sub-arrays horizontally (column-wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> x = np.arange(9.0) + >>> np.split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] + + >>> x = np.arange(8.0) + >>> np.split(x, [3, 5, 6, 10]) + [array([0., 1., 2.]), + array([3., 4.]), + array([5.]), + array([6., 7.]), + array([], dtype=float64)] + + """ + try: + len(indices_or_sections) + except TypeError: + sections = indices_or_sections + N = ary.shape[axis] + if N % sections: + raise ValueError( + 'array split does not result in an equal division') from None + return array_split(ary, indices_or_sections, axis) + + +def _hvdsplit_dispatcher(ary, indices_or_sections): + return (ary, indices_or_sections) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def hsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays horizontally (column-wise). + + Please refer to the `split` documentation. `hsplit` is equivalent + to `split` with ``axis=1``, the array is always split along the second + axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.hsplit(x, 2) + [array([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + array([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])] + >>> np.hsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + array([[ 3.], + [ 7.], + [11.], + [15.]]), + array([], shape=(4, 0), dtype=float64)] + + With a higher dimensional array the split is still along the second axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.hsplit(x, 2) + [array([[[0., 1.]], + [[4., 5.]]]), + array([[[2., 3.]], + [[6., 7.]]])] + + """ + if _nx.ndim(ary) == 0: + raise ValueError('hsplit only works on arrays of 1 or more dimensions') + if ary.ndim > 1: + return split(ary, indices_or_sections, 1) + else: + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def vsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays vertically (row-wise). + + Please refer to the ``split`` documentation. ``vsplit`` is equivalent + to ``split`` with `axis=0` (default), the array is always split along the + first axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.vsplit(x, 2) + [array([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])] + >>> np.vsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)] + + With a higher dimensional array the split is still along the first axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.vsplit(x, 2) + [array([[[0., 1.], + [2., 3.]]]), array([[[4., 5.], + [6., 7.]]])] + + """ + if _nx.ndim(ary) < 2: + raise ValueError('vsplit only works on arrays of 2 or more dimensions') + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def dsplit(ary, indices_or_sections): + """ + Split array into multiple sub-arrays along the 3rd axis (depth). + + Please refer to the `split` documentation. `dsplit` is equivalent + to `split` with ``axis=2``, the array is always split along the third + axis provided the array dimension is greater than or equal to 3. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> x + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> np.dsplit(x, 2) + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), array([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])] + >>> np.dsplit(x, np.array([3, 6])) + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + array([], shape=(2, 2, 0), dtype=float64)] + """ + if _nx.ndim(ary) < 3: + raise ValueError('dsplit only works on arrays of 3 or more dimensions') + return split(ary, indices_or_sections, 2) + +def get_array_prepare(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None + """ + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_prepare__) for i, x in enumerate(args) + if hasattr(x, '__array_prepare__')) + if wrappers: + return wrappers[-1][-1] + return None + +def get_array_wrap(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None + """ + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_wrap__) for i, x in enumerate(args) + if hasattr(x, '__array_wrap__')) + if wrappers: + return wrappers[-1][-1] + return None + + +def _kron_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_kron_dispatcher) +def kron(a, b): + """ + Kronecker product of two arrays. + + Computes the Kronecker product, a composite array made of blocks of the + second array scaled by the first. + + Parameters + ---------- + a, b : array_like + + Returns + ------- + out : ndarray + + See Also + -------- + outer : The outer product + + Notes + ----- + The function assumes that the number of dimensions of `a` and `b` + are the same, if necessary prepending the smallest with ones. + If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. + The elements are products of elements from `a` and `b`, organized + explicitly by:: + + kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] + + where:: + + kt = it * st + jt, t = 0,...,N + + In the common 2-D case (N=1), the block structure can be visualized:: + + [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], + [ ... ... ], + [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] + + + Examples + -------- + >>> np.kron([1,10,100], [5,6,7]) + array([ 5, 6, 7, ..., 500, 600, 700]) + >>> np.kron([5,6,7], [1,10,100]) + array([ 5, 50, 500, ..., 7, 70, 700]) + + >>> np.kron(np.eye(2), np.ones((2,2))) + array([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> a = np.arange(100).reshape((2,5,2,5)) + >>> b = np.arange(24).reshape((2,3,4)) + >>> c = np.kron(a,b) + >>> c.shape + (2, 10, 6, 20) + >>> I = (1,3,0,2) + >>> J = (0,2,1) + >>> J1 = (0,) + J # extend to ndim=4 + >>> S1 = (1,) + b.shape + >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) + >>> c[K] == a[I]*b[J] + True + + """ + b = asanyarray(b) + a = array(a, copy=False, subok=True, ndmin=b.ndim) + ndb, nda = b.ndim, a.ndim + if (nda == 0 or ndb == 0): + return _nx.multiply(a, b) + as_ = a.shape + bs = b.shape + if not a.flags.contiguous: + a = reshape(a, as_) + if not b.flags.contiguous: + b = reshape(b, bs) + nd = ndb + if (ndb != nda): + if (ndb > nda): + as_ = (1,)*(ndb-nda) + as_ + else: + bs = (1,)*(nda-ndb) + bs + nd = nda + result = outer(a, b).reshape(as_+bs) + axis = nd-1 + for _ in range(nd): + result = concatenate(result, axis=axis) + wrapper = get_array_prepare(a, b) + if wrapper is not None: + result = wrapper(result) + wrapper = get_array_wrap(a, b) + if wrapper is not None: + result = wrapper(result) + return result + + +def _tile_dispatcher(A, reps): + return (A, reps) + + +@array_function_dispatch(_tile_dispatcher) +def tile(A, reps): + """ + Construct an array by repeating A the number of times given by reps. + + If `reps` has length ``d``, the result will have dimension of + ``max(d, A.ndim)``. + + If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, + or shape (1, 1, 3) for 3-D replication. If this is not the desired + behavior, promote `A` to d-dimensions manually before calling this + function. + + If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. + Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as + (1, 1, 2, 2). + + Note : Although tile may be used for broadcasting, it is strongly + recommended to use numpy's broadcasting operations and functions. + + Parameters + ---------- + A : array_like + The input array. + reps : array_like + The number of repetitions of `A` along each axis. + + Returns + ------- + c : ndarray + The tiled output array. + + See Also + -------- + repeat : Repeat elements of an array. + broadcast_to : Broadcast an array to a new shape + + Examples + -------- + >>> a = np.array([0, 1, 2]) + >>> np.tile(a, 2) + array([0, 1, 2, 0, 1, 2]) + >>> np.tile(a, (2, 2)) + array([[0, 1, 2, 0, 1, 2], + [0, 1, 2, 0, 1, 2]]) + >>> np.tile(a, (2, 1, 2)) + array([[[0, 1, 2, 0, 1, 2]], + [[0, 1, 2, 0, 1, 2]]]) + + >>> b = np.array([[1, 2], [3, 4]]) + >>> np.tile(b, 2) + array([[1, 2, 1, 2], + [3, 4, 3, 4]]) + >>> np.tile(b, (2, 1)) + array([[1, 2], + [3, 4], + [1, 2], + [3, 4]]) + + >>> c = np.array([1,2,3,4]) + >>> np.tile(c,(4,1)) + array([[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]]) + """ + try: + tup = tuple(reps) + except TypeError: + tup = (reps,) + d = len(tup) + if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): + # Fixes the problem that the function does not make a copy if A is a + # numpy array and the repetitions are 1 in all dimensions + return _nx.array(A, copy=True, subok=True, ndmin=d) + else: + # Note that no copy of zero-sized arrays is made. However since they + # have no data there is no risk of an inadvertent overwrite. + c = _nx.array(A, copy=False, subok=True, ndmin=d) + if (d < c.ndim): + tup = (1,)*(c.ndim-d) + tup + shape_out = tuple(s*t for s, t in zip(c.shape, tup)) + n = c.size + if n > 0: + for dim_in, nrep in zip(c.shape, tup): + if nrep != 1: + c = c.reshape(-1, n).repeat(nrep, 0) + n //= dim_in + return c.reshape(shape_out) diff --git a/MLPY/Lib/site-packages/numpy/lib/shape_base.pyi b/MLPY/Lib/site-packages/numpy/lib/shape_base.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d3747e7b292491ab0239e3e4c1c27c98f1f3067d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/shape_base.pyi @@ -0,0 +1,24 @@ +from typing import List + +from numpy.core.shape_base import vstack + +__all__: List[str] + +row_stack = vstack + +def take_along_axis(arr, indices, axis): ... +def put_along_axis(arr, indices, values, axis): ... +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def expand_dims(a, axis): ... +def column_stack(tup): ... +def dstack(tup): ... +def array_split(ary, indices_or_sections, axis=...): ... +def split(ary, indices_or_sections, axis=...): ... +def hsplit(ary, indices_or_sections): ... +def vsplit(ary, indices_or_sections): ... +def dsplit(ary, indices_or_sections): ... +def get_array_prepare(*args): ... +def get_array_wrap(*args): ... +def kron(a, b): ... +def tile(A, reps): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/stride_tricks.py b/MLPY/Lib/site-packages/numpy/lib/stride_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..e5e7632f618159176263ad320ef7725dd2cadf9d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/stride_tricks.py @@ -0,0 +1,545 @@ +""" +Utilities that manipulate strides to achieve desirable effects. + +An explanation of strides can be found in the "ndarray.rst" file in the +NumPy reference guide. + +""" +import numpy as np +from numpy.core.numeric import normalize_axis_tuple +from numpy.core.overrides import array_function_dispatch, set_module + +__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes'] + + +class DummyArray: + """Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + + +def _maybe_view_as_subclass(original_array, new_array): + if type(original_array) is not type(new_array): + # if input was an ndarray subclass and subclasses were OK, + # then view the result as that subclass. + new_array = new_array.view(type=type(original_array)) + # Since we have done something akin to a view from original_array, we + # should let the subclass finalize (if it has it implemented, i.e., is + # not None). + if new_array.__array_finalize__: + new_array.__array_finalize__(original_array) + return new_array + + +def as_strided(x, shape=None, strides=None, subok=False, writeable=True): + """ + Create a view into the array with the given shape and strides. + + .. warning:: This function has to be used with extreme care, see notes. + + Parameters + ---------- + x : ndarray + Array to create a new. + shape : sequence of int, optional + The shape of the new array. Defaults to ``x.shape``. + strides : sequence of int, optional + The strides of the new array. Defaults to ``x.strides``. + subok : bool, optional + .. versionadded:: 1.10 + + If True, subclasses are preserved. + writeable : bool, optional + .. versionadded:: 1.12 + + If set to False, the returned array will always be readonly. + Otherwise it will be writable if the original array was. It + is advisable to set this to False if possible (see Notes). + + Returns + ------- + view : ndarray + + See also + -------- + broadcast_to : broadcast an array to a given shape. + reshape : reshape an array. + lib.stride_tricks.sliding_window_view : + userfriendly and safe function for the creation of sliding window views. + + Notes + ----- + ``as_strided`` creates a view into the array given the exact strides + and shape. This means it manipulates the internal data structure of + ndarray and, if done incorrectly, the array elements can point to + invalid memory and can corrupt results or crash your program. + It is advisable to always use the original ``x.strides`` when + calculating new strides to avoid reliance on a contiguous memory + layout. + + Furthermore, arrays created with this function often contain self + overlapping memory, so that two elements are identical. + Vectorized write operations on such arrays will typically be + unpredictable. They may even give different results for small, large, + or transposed arrays. + Since writing to these arrays has to be tested and done with great + care, you may want to use ``writeable=False`` to avoid accidental write + operations. + + For these reasons it is advisable to avoid ``as_strided`` when + possible. + """ + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=False, subok=subok) + interface = dict(x.__array_interface__) + if shape is not None: + interface['shape'] = tuple(shape) + if strides is not None: + interface['strides'] = tuple(strides) + + array = np.asarray(DummyArray(interface, base=x)) + # The route via `__interface__` does not preserve structured + # dtypes. Since dtype should remain unchanged, we set it explicitly. + array.dtype = x.dtype + + view = _maybe_view_as_subclass(x, array) + + if view.flags.writeable and not writeable: + view.flags.writeable = False + + return view + + +def _sliding_window_view_dispatcher(x, window_shape, axis=None, *, + subok=None, writeable=None): + return (x,) + + +@array_function_dispatch(_sliding_window_view_dispatcher) +def sliding_window_view(x, window_shape, axis=None, *, + subok=False, writeable=False): + """ + Create a sliding window view into the array with the given window shape. + + Also known as rolling or moving window, the window slides across all + dimensions of the array and extracts subsets of the array at all window + positions. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + x : array_like + Array to create the sliding window view from. + window_shape : int or tuple of int + Size of window over each axis that takes part in the sliding window. + If `axis` is not present, must have same length as the number of input + array dimensions. Single integers `i` are treated as if they were the + tuple `(i,)`. + axis : int or tuple of int, optional + Axis or axes along which the sliding window is applied. + By default, the sliding window is applied to all axes and + `window_shape[i]` will refer to axis `i` of `x`. + If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to + the axis `axis[i]` of `x`. + Single integers `i` are treated as if they were the tuple `(i,)`. + subok : bool, optional + If True, sub-classes will be passed-through, otherwise the returned + array will be forced to be a base-class array (default). + writeable : bool, optional + When true, allow writing to the returned view. The default is false, + as this should be used with caution: the returned view contains the + same memory location multiple times, so writing to one location will + cause others to change. + + Returns + ------- + view : ndarray + Sliding window view of the array. The sliding window dimensions are + inserted at the end, and the original dimensions are trimmed as + required by the size of the sliding window. + That is, ``view.shape = x_shape_trimmed + window_shape``, where + ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less + than the corresponding window size. + + See Also + -------- + lib.stride_tricks.as_strided: A lower-level and less safe routine for + creating arbitrary views from custom shape and strides. + broadcast_to: broadcast an array to a given shape. + + Notes + ----- + For many applications using a sliding window view can be convenient, but + potentially very slow. Often specialized solutions exist, for example: + + - `scipy.signal.fftconvolve` + + - filtering functions in `scipy.ndimage` + + - moving window functions provided by + `bottleneck `_. + + As a rough estimate, a sliding window approach with an input size of `N` + and a window size of `W` will scale as `O(N*W)` where frequently a special + algorithm can achieve `O(N)`. That means that the sliding window variant + for a window size of 100 can be a 100 times slower than a more specialized + version. + + Nevertheless, for small window sizes, when no custom algorithm exists, or + as a prototyping and developing tool, this function can be a good solution. + + Examples + -------- + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + + This also works in more dimensions, e.g. + + >>> i, j = np.ogrid[:3, :4] + >>> x = 10*i + j + >>> x.shape + (3, 4) + >>> x + array([[ 0, 1, 2, 3], + [10, 11, 12, 13], + [20, 21, 22, 23]]) + >>> shape = (2,2) + >>> v = sliding_window_view(x, shape) + >>> v.shape + (2, 3, 2, 2) + >>> v + array([[[[ 0, 1], + [10, 11]], + [[ 1, 2], + [11, 12]], + [[ 2, 3], + [12, 13]]], + [[[10, 11], + [20, 21]], + [[11, 12], + [21, 22]], + [[12, 13], + [22, 23]]]]) + + The axis can be specified explicitly: + + >>> v = sliding_window_view(x, 3, 0) + >>> v.shape + (1, 4, 3) + >>> v + array([[[ 0, 10, 20], + [ 1, 11, 21], + [ 2, 12, 22], + [ 3, 13, 23]]]) + + The same axis can be used several times. In that case, every use reduces + the corresponding original dimension: + + >>> v = sliding_window_view(x, (2, 3), (1, 1)) + >>> v.shape + (3, 1, 2, 3) + >>> v + array([[[[ 0, 1, 2], + [ 1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + + Combining with stepped slicing (`::step`), this can be used to take sliding + views which skip elements: + + >>> x = np.arange(7) + >>> sliding_window_view(x, 5)[:, ::2] + array([[0, 2, 4], + [1, 3, 5], + [2, 4, 6]]) + + or views which move by multiple elements + + >>> x = np.arange(7) + >>> sliding_window_view(x, 3)[::2, :] + array([[0, 1, 2], + [2, 3, 4], + [4, 5, 6]]) + + A common application of `sliding_window_view` is the calculation of running + statistics. The simplest example is the + `moving average `_: + + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + >>> moving_average = v.mean(axis=-1) + >>> moving_average + array([1., 2., 3., 4.]) + + Note that a sliding window approach is often **not** optimal (see Notes). + """ + window_shape = (tuple(window_shape) + if np.iterable(window_shape) + else (window_shape,)) + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=False, subok=subok) + + window_shape_array = np.array(window_shape) + if np.any(window_shape_array < 0): + raise ValueError('`window_shape` cannot contain negative values') + + if axis is None: + axis = tuple(range(x.ndim)) + if len(window_shape) != len(axis): + raise ValueError(f'Since axis is `None`, must provide ' + f'window_shape for all dimensions of `x`; ' + f'got {len(window_shape)} window_shape elements ' + f'and `x.ndim` is {x.ndim}.') + else: + axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True) + if len(window_shape) != len(axis): + raise ValueError(f'Must provide matching length window_shape and ' + f'axis; got {len(window_shape)} window_shape ' + f'elements and {len(axis)} axes elements.') + + out_strides = x.strides + tuple(x.strides[ax] for ax in axis) + + # note: same axis can be windowed repeatedly + x_shape_trimmed = list(x.shape) + for ax, dim in zip(axis, window_shape): + if x_shape_trimmed[ax] < dim: + raise ValueError( + 'window shape cannot be larger than input array shape') + x_shape_trimmed[ax] -= dim - 1 + out_shape = tuple(x_shape_trimmed) + window_shape + return as_strided(x, strides=out_strides, shape=out_shape, + subok=subok, writeable=writeable) + + +def _broadcast_to(array, shape, subok, readonly): + shape = tuple(shape) if np.iterable(shape) else (shape,) + array = np.array(array, copy=False, subok=subok) + if not shape and array.shape: + raise ValueError('cannot broadcast a non-scalar to a scalar array') + if any(size < 0 for size in shape): + raise ValueError('all elements of broadcast shape must be non-' + 'negative') + extras = [] + it = np.nditer( + (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, + op_flags=['readonly'], itershape=shape, order='C') + with it: + # never really has writebackifcopy semantics + broadcast = it.itviews[0] + result = _maybe_view_as_subclass(array, broadcast) + # In a future version this will go away + if not readonly and array.flags._writeable_no_warn: + result.flags.writeable = True + result.flags._warn_on_write = True + return result + + +def _broadcast_to_dispatcher(array, shape, subok=None): + return (array,) + + +@array_function_dispatch(_broadcast_to_dispatcher, module='numpy') +def broadcast_to(array, shape, subok=False): + """Broadcast an array to a new shape. + + Parameters + ---------- + array : array_like + The array to broadcast. + shape : tuple + The shape of the desired array. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + + Returns + ------- + broadcast : array + A readonly view on the original array with the given shape. It is + typically not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. + + Raises + ------ + ValueError + If the array is not compatible with the new shape according to NumPy's + broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_shapes + + Notes + ----- + .. versionadded:: 1.10.0 + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> np.broadcast_to(x, (3, 3)) + array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + """ + return _broadcast_to(array, shape, subok=subok, readonly=True) + + +def _broadcast_shape(*args): + """Returns the shape of the arrays that would result from broadcasting the + supplied arrays against each other. + """ + # use the old-iterator because np.nditer does not handle size 0 arrays + # consistently + b = np.broadcast(*args[:32]) + # unfortunately, it cannot handle 32 or more arguments directly + for pos in range(32, len(args), 31): + # ironically, np.broadcast does not properly handle np.broadcast + # objects (it treats them as scalars) + # use broadcasting to avoid allocating the full array + b = broadcast_to(0, b.shape) + b = np.broadcast(b, *args[pos:(pos + 31)]) + return b.shape + + +@set_module('numpy') +def broadcast_shapes(*args): + """ + Broadcast the input shapes into a single shape. + + :ref:`Learn more about broadcasting here `. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + `*args` : tuples of ints, or ints + The shapes to be broadcast against each other. + + Returns + ------- + tuple + Broadcasted shape. + + Raises + ------ + ValueError + If the shapes are not compatible and cannot be broadcast according + to NumPy's broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_to + + Examples + -------- + >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) + (3, 2) + + >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) + (5, 6, 7) + """ + arrays = [np.empty(x, dtype=[]) for x in args] + return _broadcast_shape(*arrays) + + +def _broadcast_arrays_dispatcher(*args, subok=None): + return args + + +@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy') +def broadcast_arrays(*args, subok=False): + """ + Broadcast any number of arrays against each other. + + Parameters + ---------- + `*args` : array_likes + The arrays to broadcast. + + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned arrays will be forced to be a base-class array (default). + + Returns + ------- + broadcasted : list of arrays + These arrays are views on the original arrays. They are typically + not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. If you need + to write to the arrays, make copies first. While you can set the + ``writable`` flag True, writing to a single output value may end up + changing more than one location in the output array. + + .. deprecated:: 1.17 + The output is currently marked so that if written to, a deprecation + warning will be emitted. A future version will set the + ``writable`` flag False so writing to it will raise an error. + + See Also + -------- + broadcast + broadcast_to + broadcast_shapes + + Examples + -------- + >>> x = np.array([[1,2,3]]) + >>> y = np.array([[4],[5]]) + >>> np.broadcast_arrays(x, y) + [array([[1, 2, 3], + [1, 2, 3]]), array([[4, 4, 4], + [5, 5, 5]])] + + Here is a useful idiom for getting contiguous copies instead of + non-contiguous views. + + >>> [np.array(a) for a in np.broadcast_arrays(x, y)] + [array([[1, 2, 3], + [1, 2, 3]]), array([[4, 4, 4], + [5, 5, 5]])] + + """ + # nditer is not used here to avoid the limit of 32 arrays. + # Otherwise, something like the following one-liner would suffice: + # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], + # order='C').itviews + + args = [np.array(_m, copy=False, subok=subok) for _m in args] + + shape = _broadcast_shape(*args) + + if all(array.shape == shape for array in args): + # Common case where nothing needs to be broadcasted. + return args + + return [_broadcast_to(array, shape, subok=subok, readonly=False) + for array in args] diff --git a/MLPY/Lib/site-packages/numpy/lib/stride_tricks.pyi b/MLPY/Lib/site-packages/numpy/lib/stride_tricks.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a4dc96e6a20f4a50f2761372a29cb2185071a602 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/stride_tricks.pyi @@ -0,0 +1,16 @@ +from typing import Any, List + +from numpy.typing import _ShapeLike, _Shape + +__all__: List[str] + +class DummyArray: + __array_interface__: Any + base: Any + def __init__(self, interface, base=...): ... + +def as_strided(x, shape=..., strides=..., subok=..., writeable=...): ... +def sliding_window_view(x, window_shape, axis=..., *, subok=..., writeable=...): ... +def broadcast_to(array, shape, subok=...): ... +def broadcast_shapes(*args: _ShapeLike) -> _Shape: ... +def broadcast_arrays(*args, subok=...): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__init__.py b/MLPY/Lib/site-packages/numpy/lib/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4047493583439ce72a609d93dda347d1ce0c32b9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d38798ea6429206e3e481798a29d9b36369012f2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test__datasource.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8322343fb93bc0546656f6e4de6dcb470c4dc7ad Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8a04597194623969f07651862f620754abc53fe Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b9d7fb34b62838523f97b9c19b6412ff88b339a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraypad.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..add50472a1705621688163371178b2caa74c6155 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7559ff250c24db82a0ac531c7591a2672cee253b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_financial_expired.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_financial_expired.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba935bc5b3b115118405cc1106fc4834861549e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_financial_expired.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25710b04964f373516b2c1e7d0c0c31bf78e0c7a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1941c97d4ed34d3c977725a5a0d32ae1816c35c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4100bf926f37e6addb2456100d44cf22e2595bbb Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90a58b99de991d3ec65aec2eb92d673028abec14 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_index_tricks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8ac65e3bf1d167ecc2d8b23a3a1c9b3115009b4 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_io.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..103c771e910a3ecf74f4078a8e4a6ccb78143604 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ef74f136dc9cb53f80d07f864164f8ab0e09cc2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..565153285a40a338798f9b1b3fba7138ed5829be Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a4a364be95601edb68f92f3d7cc5a2c2c40e429 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c34dd949c764a363ac7b4f9ca3ca1aae60cbfec Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fab8995272728f440383a3c7650fcb5e13d6a7e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7202c7aa4dfbc5c08b040d1c9c6cf75a7466c101 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d33368a19a990593ac7d99805742f01befa299c1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cfd5a38565fce31d84f2cb6dad9ecd95fdbd144 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3afec2e7efe6a9c5d991cf1c4bc850797582efbe Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7af80c053ad758735515cb83fd15e70dd118efc7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_ufunclike.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7e5cc51ef011161244cee338e29fc98b3fd298d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npy b/MLPY/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npy new file mode 100644 index 0000000000000000000000000000000000000000..0d41ae5223b7235d16081318703875ef495bbfc7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:178732502fbf4c1f504852c0a3673b738e2b0ba35460882b7c6c7d3ea58f48a9 +size 258 diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npz b/MLPY/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npz new file mode 100644 index 0000000000000000000000000000000000000000..85da7be0c3d77ff8d71ab7671e0d23188b7b36c4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c68d771c14f415b159daabd9cf42d61836f74ae40049269787baca7d57098f1e +size 366 diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npy b/MLPY/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npy new file mode 100644 index 0000000000000000000000000000000000000000..4b833c1ce2ab92b9e02706a430f9ea58a1e086c9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a534d587c7b3a7e97000addf920bdd294f00ae9e4d30ace7543f8ce84c7fb80d +size 341 diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npz b/MLPY/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npz new file mode 100644 index 0000000000000000000000000000000000000000..df1ed78f455a17de597a2a1a125c13e595717298 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a90474812e7b7bdb5ad7a77fbc242369a28cef880f765c023e4a79e4ffaaaddc +size 449 diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/data/python3.npy b/MLPY/Lib/site-packages/numpy/lib/tests/data/python3.npy new file mode 100644 index 0000000000000000000000000000000000000000..1de3124ee4eaf42cfc3625cdfa88767de07df9c4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/data/python3.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f469dde101a2c65e283d2ed487028f8180ebcb9457cf60c9d9b952314668fed +size 96 diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/data/win64python2.npy b/MLPY/Lib/site-packages/numpy/lib/tests/data/win64python2.npy new file mode 100644 index 0000000000000000000000000000000000000000..46edfc4d6869ed05b8438bc5bfa5d77d9ec3e9ba --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/data/win64python2.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a039c807558149ad5fa7ad12436c69d49c5e194cf617b92785f8cb60ec63297 +size 96 diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test__datasource.py b/MLPY/Lib/site-packages/numpy/lib/tests/test__datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..1e5819862dfa7209673cf793ca2460d9c16d238f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test__datasource.py @@ -0,0 +1,350 @@ +import os +import pytest +from tempfile import mkdtemp, mkstemp, NamedTemporaryFile +from shutil import rmtree + +import numpy.lib._datasource as datasource +from numpy.testing import assert_, assert_equal, assert_raises + +import urllib.request as urllib_request +from urllib.parse import urlparse +from urllib.error import URLError + + +def urlopen_stub(url, data=None): + '''Stub to replace urlopen for testing.''' + if url == valid_httpurl(): + tmpfile = NamedTemporaryFile(prefix='urltmp_') + return tmpfile + else: + raise URLError('Name or service not known') + +# setup and teardown +old_urlopen = None + + +def setup_module(): + global old_urlopen + + old_urlopen = urllib_request.urlopen + urllib_request.urlopen = urlopen_stub + + +def teardown_module(): + urllib_request.urlopen = old_urlopen + +# A valid website for more robust testing +http_path = 'http://www.google.com/' +http_file = 'index.html' + +http_fakepath = 'http://fake.abc.web/site/' +http_fakefile = 'fake.txt' + +malicious_files = ['/etc/shadow', '../../shadow', + '..\\system.dat', 'c:\\windows\\system.dat'] + +magic_line = b'three is the magic number' + + +# Utility functions used by many tests +def valid_textfile(filedir): + # Generate and return a valid temporary file. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) + os.close(fd) + return path + + +def invalid_textfile(filedir): + # Generate and return an invalid filename. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) + os.close(fd) + os.remove(path) + return path + + +def valid_httpurl(): + return http_path+http_file + + +def invalid_httpurl(): + return http_fakepath+http_fakefile + + +def valid_baseurl(): + return http_path + + +def invalid_baseurl(): + return http_fakepath + + +def valid_httpfile(): + return http_file + + +def invalid_httpfile(): + return http_fakefile + + +class TestDataSourceOpen: + def setup(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + fh = self.ds.open(valid_httpurl()) + assert_(fh) + fh.close() + + def test_InvalidHTTP(self): + url = invalid_httpurl() + assert_raises(IOError, self.ds.open, url) + try: + self.ds.open(url) + except IOError as e: + # Regression test for bug fixed in r4342. + assert_(e.errno is None) + + def test_InvalidHTTPCacheURLError(self): + assert_raises(URLError, self.ds._cache, invalid_httpurl()) + + def test_ValidFile(self): + local_file = valid_textfile(self.tmpdir) + fh = self.ds.open(local_file) + assert_(fh) + fh.close() + + def test_InvalidFile(self): + invalid_file = invalid_textfile(self.tmpdir) + assert_raises(IOError, self.ds.open, invalid_file) + + def test_ValidGzipFile(self): + try: + import gzip + except ImportError: + # We don't have the gzip capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for Gzip files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') + fp = gzip.open(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + def test_ValidBz2File(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + +class TestDataSourceExists: + def setup(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + assert_(self.ds.exists(valid_httpurl())) + + def test_InvalidHTTP(self): + assert_equal(self.ds.exists(invalid_httpurl()), False) + + def test_ValidFile(self): + # Test valid file in destpath + tmpfile = valid_textfile(self.tmpdir) + assert_(self.ds.exists(tmpfile)) + # Test valid local file not in destpath + localdir = mkdtemp() + tmpfile = valid_textfile(localdir) + assert_(self.ds.exists(tmpfile)) + rmtree(localdir) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.ds.exists(tmpfile), False) + + +class TestDataSourceAbspath: + def setup(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.ds = datasource.DataSource(self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_equal(local_path, self.ds.abspath(valid_httpurl())) + + def test_ValidFile(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_equal(tmpfile, self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_equal(tmpfile, self.ds.abspath(tmpfile)) + + def test_InvalidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_(invalidhttp != self.ds.abspath(valid_httpurl())) + + def test_InvalidFile(self): + invalidfile = valid_textfile(self.tmpdir) + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_(invalidfile != self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_(invalidfile != self.ds.abspath(tmpfile)) + + def test_sandboxing(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + + tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) + + assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(tmpfile).startswith(self.tmpdir)) + assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_ValidFile() + self.test_InvalidHTTP() + self.test_InvalidFile() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryAbspath: + def setup(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.repos._destpath, netloc, + upath.strip(os.sep).strip('/')) + filepath = self.repos.abspath(valid_httpfile()) + assert_equal(local_path, filepath) + + def test_sandboxing(self): + tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) + assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryExists: + def setup(self): + self.tmpdir = mkdtemp() + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidFile(self): + # Create local temp file + tmpfile = valid_textfile(self.tmpdir) + assert_(self.repos.exists(tmpfile)) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.repos.exists(tmpfile), False) + + def test_RemoveHTTPFile(self): + assert_(self.repos.exists(valid_httpurl())) + + def test_CachedHTTPFile(self): + localfile = valid_httpurl() + # Create a locally cached temp file with an URL based + # directory structure. This is similar to what Repository.open + # would do. + scheme, netloc, upath, pms, qry, frg = urlparse(localfile) + local_path = os.path.join(self.repos._destpath, netloc) + os.mkdir(local_path, 0o0700) + tmpfile = valid_textfile(local_path) + assert_(self.repos.exists(tmpfile)) + + +class TestOpenFunc: + def setup(self): + self.tmpdir = mkdtemp() + + def teardown(self): + rmtree(self.tmpdir) + + def test_DataSourceOpen(self): + local_file = valid_textfile(self.tmpdir) + # Test case where destpath is passed in + fp = datasource.open(local_file, destpath=self.tmpdir) + assert_(fp) + fp.close() + # Test case where default destpath is used + fp = datasource.open(local_file) + assert_(fp) + fp.close() + +def test_del_attr_handling(): + # DataSource __del__ can be called + # even if __init__ fails when the + # Exception object is caught by the + # caller as happens in refguide_check + # is_deprecated() function + + ds = datasource.DataSource() + # simulate failed __init__ by removing key attribute + # produced within __init__ and expected by __del__ + del ds._istmpdest + # should not raise an AttributeError if __del__ + # gracefully handles failed __init__: + ds.__del__() diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test__iotools.py b/MLPY/Lib/site-packages/numpy/lib/tests/test__iotools.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e78dd8ccc8b259f389c1517464c80e9e95365f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test__iotools.py @@ -0,0 +1,353 @@ +import time +from datetime import date + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_allclose, assert_raises, + ) +from numpy.lib._iotools import ( + LineSplitter, NameValidator, StringConverter, + has_nested_fields, easy_dtype, flatten_dtype + ) + + +class TestLineSplitter: + "Tests the LineSplitter class." + + def test_no_delimiter(self): + "Test LineSplitter w/o delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter()(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + test = LineSplitter('')(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + + def test_space_delimiter(self): + "Test space delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(' ')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + test = LineSplitter(' ')(strg) + assert_equal(test, ['1 2 3 4', '5']) + + def test_tab_delimiter(self): + "Test tab delimiter" + strg = " 1\t 2\t 3\t 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1', '2', '3', '4', '5 6']) + strg = " 1 2\t 3 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1 2', '3 4', '5 6']) + + def test_other_delimiter(self): + "Test LineSplitter on delimiter" + strg = "1,2,3,4,,5" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + # + strg = " 1,2,3,4,,5 # test" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + # gh-11028 bytes comment/delimiters should get encoded + strg = b" 1,2,3,4,,5 % test" + test = LineSplitter(delimiter=b',', comments=b'%')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + def test_constant_fixed_width(self): + "Test LineSplitter w/ fixed-width fields" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(3)(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5', '']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(20)(strg) + assert_equal(test, ['1 3 4 5 6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(30)(strg) + assert_equal(test, ['1 3 4 5 6']) + + def test_variable_fixed_width(self): + strg = " 1 3 4 5 6# test" + test = LineSplitter((3, 6, 6, 3))(strg) + assert_equal(test, ['1', '3', '4 5', '6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter((6, 6, 9))(strg) + assert_equal(test, ['1', '3 4', '5 6']) + +# ----------------------------------------------------------------------------- + + +class TestNameValidator: + + def test_case_sensitivity(self): + "Test case sensitivity" + names = ['A', 'a', 'b', 'c'] + test = NameValidator().validate(names) + assert_equal(test, ['A', 'a', 'b', 'c']) + test = NameValidator(case_sensitive=False).validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='upper').validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='lower').validate(names) + assert_equal(test, ['a', 'a_1', 'b', 'c']) + + # check exceptions + assert_raises(ValueError, NameValidator, case_sensitive='foobar') + + def test_excludelist(self): + "Test excludelist" + names = ['dates', 'data', 'Other Data', 'mask'] + validator = NameValidator(excludelist=['dates', 'data', 'mask']) + test = validator.validate(names) + assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) + + def test_missing_names(self): + "Test validate missing names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist), ['a', 'b', 'c']) + namelist = ('', 'b', 'c') + assert_equal(validator(namelist), ['f0', 'b', 'c']) + namelist = ('a', 'b', '') + assert_equal(validator(namelist), ['a', 'b', 'f0']) + namelist = ('', 'f0', '') + assert_equal(validator(namelist), ['f1', 'f0', 'f2']) + + def test_validate_nb_names(self): + "Test validate nb names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist, nbfields=1), ('a',)) + assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), + ['a', 'b', 'c', 'g0', 'g1']) + + def test_validate_wo_names(self): + "Test validate no names" + namelist = None + validator = NameValidator() + assert_(validator(namelist) is None) + assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) + +# ----------------------------------------------------------------------------- + + +def _bytes_to_date(s): + return date(*time.strptime(s, "%Y-%m-%d")[:3]) + + +class TestStringConverter: + "Test StringConverter" + + def test_creation(self): + "Test creation of a StringConverter" + converter = StringConverter(int, -99999) + assert_equal(converter._status, 1) + assert_equal(converter.default, -99999) + + def test_upgrade(self): + "Tests the upgrade method." + + converter = StringConverter() + assert_equal(converter._status, 0) + + # test int + assert_equal(converter.upgrade('0'), 0) + assert_equal(converter._status, 1) + + # On systems where long defaults to 32-bit, the statuses will be + # offset by one, so we check for this here. + import numpy.core.numeric as nx + status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) + + # test int > 2**32 + assert_equal(converter.upgrade('17179869184'), 17179869184) + assert_equal(converter._status, 1 + status_offset) + + # test float + assert_allclose(converter.upgrade('0.'), 0.0) + assert_equal(converter._status, 2 + status_offset) + + # test complex + assert_equal(converter.upgrade('0j'), complex('0j')) + assert_equal(converter._status, 3 + status_offset) + + # test str + # note that the longdouble type has been skipped, so the + # _status increases by 2. Everything should succeed with + # unicode conversion (8). + for s in ['a', b'a']: + res = converter.upgrade(s) + assert_(type(res) is str) + assert_equal(res, 'a') + assert_equal(converter._status, 8 + status_offset) + + def test_missing(self): + "Tests the use of missing values." + converter = StringConverter(missing_values=('missing', + 'missed')) + converter.upgrade('0') + assert_equal(converter('0'), 0) + assert_equal(converter(''), converter.default) + assert_equal(converter('missing'), converter.default) + assert_equal(converter('missed'), converter.default) + try: + converter('miss') + except ValueError: + pass + + def test_upgrademapper(self): + "Tests updatemapper" + dateparser = _bytes_to_date + _original_mapper = StringConverter._mapper[:] + try: + StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) + convert = StringConverter(dateparser, date(2000, 1, 1)) + test = convert('2001-01-01') + assert_equal(test, date(2001, 1, 1)) + test = convert('2009-01-01') + assert_equal(test, date(2009, 1, 1)) + test = convert('') + assert_equal(test, date(2000, 1, 1)) + finally: + StringConverter._mapper = _original_mapper + + def test_string_to_object(self): + "Make sure that string-to-object functions are properly recognized" + old_mapper = StringConverter._mapper[:] # copy of list + conv = StringConverter(_bytes_to_date) + assert_equal(conv._mapper, old_mapper) + assert_(hasattr(conv, 'default')) + + def test_keep_default(self): + "Make sure we don't lose an explicit default" + converter = StringConverter(None, missing_values='', + default=-999) + converter.upgrade('3.14159265') + assert_equal(converter.default, -999) + assert_equal(converter.type, np.dtype(float)) + # + converter = StringConverter( + None, missing_values='', default=0) + converter.upgrade('3.14159265') + assert_equal(converter.default, 0) + assert_equal(converter.type, np.dtype(float)) + + def test_keep_default_zero(self): + "Check that we don't lose a default of 0" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal(converter.default, 0) + + def test_keep_missing_values(self): + "Check that we're not losing missing values" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal( + converter.missing_values, {'', 'N/A'}) + + def test_int64_dtype(self): + "Check that int64 integer types can be specified" + converter = StringConverter(np.int64, default=0) + val = "-9223372036854775807" + assert_(converter(val) == -9223372036854775807) + val = "9223372036854775807" + assert_(converter(val) == 9223372036854775807) + + def test_uint64_dtype(self): + "Check that uint64 integer types can be specified" + converter = StringConverter(np.uint64, default=0) + val = "9223372043271415339" + assert_(converter(val) == 9223372043271415339) + + +class TestMiscFunctions: + + def test_has_nested_dtype(self): + "Test has_nested_dtype" + ndtype = np.dtype(float) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + assert_equal(has_nested_fields(ndtype), True) + + def test_easy_dtype(self): + "Test ndtype on dtypes" + # Simple case + ndtype = float + assert_equal(easy_dtype(ndtype), np.dtype(float)) + # As string w/o names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', "i4"), ('f1', "f8")])) + # As string w/o names but different default format + assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), + np.dtype([('field_000', "i4"), ('field_001', "f8")])) + # As string w/ names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (too many) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (not enough) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names=", b"), + np.dtype([('f0', "i4"), ('b', "f8")])) + # ... (with different default format) + assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), + np.dtype([('a', "i4"), ('f00', "f8")])) + # As list of tuples w/o names + ndtype = [('A', int), ('B', float)] + assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) + # As list of tuples w/ names + assert_equal(easy_dtype(ndtype, names="a,b"), + np.dtype([('a', int), ('b', float)])) + # As list of tuples w/ not enough names + assert_equal(easy_dtype(ndtype, names="a"), + np.dtype([('a', int), ('f0', float)])) + # As list of tuples w/ too many names + assert_equal(easy_dtype(ndtype, names="a,b,c"), + np.dtype([('a', int), ('b', float)])) + # As list of types w/o names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', int), ('f1', float), ('f2', float)])) + # As list of types w names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', int), ('b', float), ('c', float)])) + # As simple dtype w/ names + ndtype = np.dtype(float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([(_, float) for _ in ('a', 'b', 'c')])) + # As simple dtype w/o names (but multiple fields) + ndtype = np.dtype(float) + assert_equal( + easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), + np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) + + def test_flatten_dtype(self): + "Testing flatten_dtype" + # Standard dtype + dt = np.dtype([("a", "f8"), ("b", "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + # Recursive dtype + dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) + # dtype with shaped fields + dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, int]) + dt_flat = flatten_dtype(dt, True) + assert_equal(dt_flat, [float] * 2 + [int] * 3) + # dtype w/ titles + dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test__version.py b/MLPY/Lib/site-packages/numpy/lib/tests/test__version.py new file mode 100644 index 0000000000000000000000000000000000000000..679f764639799d06cd716985e26f23f07c03a670 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test__version.py @@ -0,0 +1,64 @@ +"""Tests for the NumpyVersion class. + +""" +from numpy.testing import assert_, assert_raises +from numpy.lib import NumpyVersion + + +def test_main_versions(): + assert_(NumpyVersion('1.8.0') == '1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']: + assert_(NumpyVersion('1.8.0') < ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert_(NumpyVersion('1.8.0') > ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert_(NumpyVersion('1.9.0') < '1.10.0') + assert_(NumpyVersion('1.11.0') < '1.11.1') + assert_(NumpyVersion('1.11.0') == '1.11.0') + assert_(NumpyVersion('1.99.11') < '1.99.12') + + +def test_alpha_beta_rc(): + assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert_(NumpyVersion('1.8.0rc1') < ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert_(NumpyVersion('1.8.0rc1') > ver) + + assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') + + +def test_dev_version(): + assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: + assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') + + +def test_dev_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') + assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') + + +def test_dev0_version(): + assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') + + +def test_dev0_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') + assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') + + +def test_raises(): + for ver in ['1.9', '1,9.0', '1.7.x']: + assert_raises(ValueError, NumpyVersion, ver) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_arraypad.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_arraypad.py new file mode 100644 index 0000000000000000000000000000000000000000..23001446cfc524754e086e66e43c2bd91da1f1c5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_arraypad.py @@ -0,0 +1,1364 @@ +"""Tests for the array padding functions. + +""" +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, assert_allclose, assert_equal +from numpy.lib.arraypad import _as_pairs + + +_numeric_dtypes = ( + np.sctypes["uint"] + + np.sctypes["int"] + + np.sctypes["float"] + + np.sctypes["complex"] +) +_all_modes = { + 'constant': {'constant_values': 0}, + 'edge': {}, + 'linear_ramp': {'end_values': 0}, + 'maximum': {'stat_length': None}, + 'mean': {'stat_length': None}, + 'median': {'stat_length': None}, + 'minimum': {'stat_length': None}, + 'reflect': {'reflect_type': 'even'}, + 'symmetric': {'reflect_type': 'even'}, + 'wrap': {}, + 'empty': {} +} + + +class TestAsPairs: + def test_single_value(self): + """Test casting for a single value.""" + expected = np.array([[3, 3]] * 10) + for x in (3, [3], [[3]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # Test with dtype=object + obj = object() + assert_equal( + _as_pairs(obj, 10), + np.array([[obj, obj]] * 10) + ) + + def test_two_values(self): + """Test proper casting for two different values.""" + # Broadcasting in the first dimension with numbers + expected = np.array([[3, 4]] * 10) + for x in ([3, 4], [[3, 4]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # and with dtype=object + obj = object() + assert_equal( + _as_pairs(["a", obj], 10), + np.array([["a", obj]] * 10) + ) + + # Broadcasting in the second / last dimension with numbers + assert_equal( + _as_pairs([[3], [4]], 2), + np.array([[3, 3], [4, 4]]) + ) + # and with dtype=object + assert_equal( + _as_pairs([["a"], [obj]], 2), + np.array([["a", "a"], [obj, obj]]) + ) + + def test_with_none(self): + expected = ((None, None), (None, None), (None, None)) + assert_equal( + _as_pairs(None, 3, as_index=False), + expected + ) + assert_equal( + _as_pairs(None, 3, as_index=True), + expected + ) + + def test_pass_through(self): + """Test if `x` already matching desired output are passed through.""" + expected = np.arange(12).reshape((6, 2)) + assert_equal( + _as_pairs(expected, 6), + expected + ) + + def test_as_index(self): + """Test results if `as_index=True`.""" + assert_equal( + _as_pairs([2.6, 3.3], 10, as_index=True), + np.array([[3, 3]] * 10, dtype=np.intp) + ) + assert_equal( + _as_pairs([2.6, 4.49], 10, as_index=True), + np.array([[3, 4]] * 10, dtype=np.intp) + ) + for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]], + [[1, 2]] * 9 + [[1, -2]]): + with pytest.raises(ValueError, match="negative values"): + _as_pairs(x, 10, as_index=True) + + def test_exceptions(self): + """Ensure faulty usage is discovered.""" + with pytest.raises(ValueError, match="more dimensions than allowed"): + _as_pairs([[[3]]], 10) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs([[1, 2], [3, 4]], 3) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs(np.ones((2, 3)), 3) + + +class TestConditionalShortcuts: + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_padding_shortcuts(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(0, 0) for _ in test.shape] + assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_shallow_statistic_range(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(1, 1) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode='edge'), + np.pad(test, pad_amt, mode=mode, stat_length=1)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_clip_statistic_range(self, mode): + test = np.arange(30).reshape(5, 6) + pad_amt = [(3, 3) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode=mode), + np.pad(test, pad_amt, mode=mode, stat_length=30)) + + +class TestStatistic: + def test_check_mean_stat_length(self): + a = np.arange(100).astype('f') + a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) + b = np.array( + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. + ]) + assert_array_equal(a, b) + + def test_check_maximum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] + ) + assert_array_equal(a, b) + + def test_check_maximum_2(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_maximum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum', stat_length=10) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_minimum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_minimum_2(self): + a = np.arange(100) + 2 + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + assert_array_equal(a, b) + + def test_check_minimum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'minimum', stat_length=10) + b = np.array( + [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] + ) + assert_array_equal(a, b) + + def test_check_median(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'median') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + def test_check_median_01(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a, 1, 'median') + b = np.array( + [[4, 4, 5, 4, 4], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [4, 4, 5, 4, 4]] + ) + assert_array_equal(a, b) + + def test_check_median_02(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a.T, 1, 'median').T + b = np.array( + [[5, 4, 5, 4, 5], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [5, 4, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_median_stat_length(self): + a = np.arange(100).astype('f') + a[1] = 2. + a[97] = 96. + a = np.pad(a, (25, 20), 'median', stat_length=(3, 5)) + b = np.array( + [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., + + 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., + + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] + ) + assert_array_equal(a, b) + + def test_check_mean_shape_one(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'mean', stat_length=2) + b = np.array( + [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_mean_2(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'mean') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("mode", [ + "mean", + "median", + "minimum", + "maximum" + ]) + def test_same_prepend_append(self, mode): + """ Test that appended and prepended values are equal """ + # This test is constructed to trigger floating point rounding errors in + # a way that caused gh-11216 for mode=='mean' + a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64) + a = np.pad(a, (1, 1), mode) + assert_equal(a[0], a[-1]) + + @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"]) + @pytest.mark.parametrize( + "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))] + ) + def test_check_negative_stat_length(self, mode, stat_length): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, 2, mode, stat_length=stat_length) + + def test_simple_stat_length(self): + a = np.arange(30) + a = np.reshape(a, (6, 5)) + a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) + b = np.array( + [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + + [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], + [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], + + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] + ) + assert_array_equal(a, b) + + @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning") + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in (true_divide|double_scalars):" + "RuntimeWarning" + ) + @pytest.mark.parametrize("mode", ["mean", "median"]) + def test_zero_stat_length_valid(self, mode): + arr = np.pad([1., 2.], (1, 2), mode, stat_length=0) + expected = np.array([np.nan, 1., 2., np.nan, np.nan]) + assert_equal(arr, expected) + + @pytest.mark.parametrize("mode", ["minimum", "maximum"]) + def test_zero_stat_length_invalid(self, mode): + match = "stat_length of 0 yields no value for padding" + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=(1, 0)) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=(1, 0)) + + +class TestConstant: + def test_check_constant(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20)) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] + ) + assert_array_equal(a, b) + + def test_check_constant_zeros(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_constant_float(self): + # If input array is int, but constant_values are float, the dtype of + # the array to be padded is kept + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, (1, 2), mode='constant', + constant_values=1.1) + expected = np.array( + [[ 1, 1, 1, 1, 1, 1, 1, 1, 1], + + [ 1, 0, 1, 2, 3, 4, 5, 1, 1], + [ 1, 6, 7, 8, 9, 10, 11, 1, 1], + [ 1, 12, 13, 14, 15, 16, 17, 1, 1], + [ 1, 18, 19, 20, 21, 22, 23, 1, 1], + [ 1, 24, 25, 26, 27, 28, 29, 1, 1], + + [ 1, 1, 1, 1, 1, 1, 1, 1, 1], + [ 1, 1, 1, 1, 1, 1, 1, 1, 1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float2(self): + # If input array is float, and constant_values are float, the dtype of + # the array to be padded is kept - here retaining the float constants + arr = np.arange(30).reshape(5, 6) + arr_float = arr.astype(np.float64) + test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant', + constant_values=1.1) + expected = np.array( + [[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + + [ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], + [ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], + [ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], + [ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], + [ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], + + [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float3(self): + a = np.arange(100, dtype=float) + a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) + b = np.array( + [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] + ) + assert_allclose(a, b) + + def test_check_constant_odd_pad_amount(self): + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, ((1,), (2,)), mode='constant', + constant_values=3) + expected = np.array( + [[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], + + [ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3], + [ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3], + [ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3], + [ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3], + [ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3], + + [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] + ) + assert_allclose(test, expected) + + def test_check_constant_pad_2d(self): + arr = np.arange(4).reshape(2, 2) + test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant', + constant_values=((1, 2), (3, 4))) + expected = np.array( + [[3, 1, 1, 4, 4, 4], + [3, 0, 1, 4, 4, 4], + [3, 2, 3, 4, 4, 4], + [3, 2, 2, 4, 4, 4], + [3, 2, 2, 4, 4, 4]] + ) + assert_allclose(test, expected) + + def test_check_large_integers(self): + uint64_max = 2 ** 64 - 1 + arr = np.full(5, uint64_max, dtype=np.uint64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, uint64_max, dtype=np.uint64) + assert_array_equal(test, expected) + + int64_max = 2 ** 63 - 1 + arr = np.full(5, int64_max, dtype=np.int64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, int64_max, dtype=np.int64) + assert_array_equal(test, expected) + + def test_check_object_array(self): + arr = np.empty(1, dtype=object) + obj_a = object() + arr[0] = obj_a + obj_b = object() + obj_c = object() + arr = np.pad(arr, pad_width=1, mode='constant', + constant_values=(obj_b, obj_c)) + + expected = np.empty((3,), dtype=object) + expected[0] = obj_b + expected[1] = obj_a + expected[2] = obj_c + + assert_array_equal(arr, expected) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="constant") + assert result.shape == (3, 4, 4) + + +class TestLinearRamp: + def test_check_simple(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) + b = np.array( + [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, + 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, + 0.80, 0.64, 0.48, 0.32, 0.16, + + 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, + 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, + 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, + 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, + 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, + 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, + 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, + 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, + 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, + + 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, + 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] + ) + assert_allclose(a, b, rtol=1e-5, atol=1e-5) + + def test_check_2d(self): + arr = np.arange(20).reshape(4, 5).astype(np.float64) + test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) + expected = np.array( + [[0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], + [0., 0., 0., 1., 2., 3., 4., 2., 0.], + [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], + [0., 5., 10., 11., 12., 13., 14., 7., 0.], + [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], + [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + assert_allclose(test, expected) + + @pytest.mark.xfail(exceptions=(AssertionError,)) + def test_object_array(self): + from fractions import Fraction + arr = np.array([Fraction(1, 2), Fraction(-1, 2)]) + actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0) + + # deliberately chosen to have a non-power-of-2 denominator such that + # rounding to floats causes a failure. + expected = np.array([ + Fraction( 0, 12), + Fraction( 3, 12), + Fraction( 6, 12), + Fraction(-6, 12), + Fraction(-4, 12), + Fraction(-2, 12), + Fraction(-0, 12), + ]) + assert_equal(actual, expected) + + def test_end_values(self): + """Ensure that end values are exact.""" + a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp") + assert_equal(a[:, 0], 0.) + assert_equal(a[:, -1], 0.) + assert_equal(a[0, :], 0.) + assert_equal(a[-1, :], 0.) + + @pytest.mark.parametrize("dtype", _numeric_dtypes) + def test_negative_difference(self, dtype): + """ + Check correct behavior of unsigned dtypes if there is a negative + difference between the edge to pad and `end_values`. Check both cases + to be independent of implementation. Test behavior for all other dtypes + in case dtype casting interferes with complex dtypes. See gh-14191. + """ + x = np.array([3], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=0) + expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + assert_equal(result, expected) + + x = np.array([0], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=3) + expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype) + assert_equal(result, expected) + + +class TestReflect: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect') + b = np.array( + [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect', reflect_type='odd') + b = np.array( + [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, + -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, + -5, -4, -3, -2, -1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'reflect') + b = np.array([3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'reflect') + b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 4, 'reflect') + b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestEmptyArray: + """Check how padding behaves on arrays with an empty dimension.""" + + @pytest.mark.parametrize( + # Keep parametrization ordered, otherwise pytest-xdist might believe + # that different tests were collected during parallelization + "mode", sorted(_all_modes.keys() - {"constant", "empty"}) + ) + def test_pad_empty_dimension(self, mode): + match = ("can't extend empty axis 0 using modes other than 'constant' " + "or 'empty'") + with pytest.raises(ValueError, match=match): + np.pad([], 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.ndarray(0), 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_pad_non_empty_dimension(self, mode): + result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode) + assert result.shape == (8, 0, 4) + + +class TestSymmetric: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric') + b = np.array( + [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, + 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, + 4, 3, 2, 1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, + 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd') + b = np.array( + [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, + -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, + -4, -3, -2, -1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + + assert_array_equal(a, b) + + def test_check_large_pad_odd(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd') + b = np.array( + [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'symmetric') + b = np.array([2, 1, 1, 2, 3, 3, 2]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'symmetric') + b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 6, 'symmetric') + b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestWrap: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'wrap') + b = np.array( + [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = np.arange(12) + a = np.reshape(a, (3, 4)) + a = np.pad(a, (10, 12), 'wrap') + b = np.array( + [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 3, 'wrap') + b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 4, 'wrap') + b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) + assert_array_equal(a, b) + + def test_pad_with_zero(self): + a = np.ones((3, 5)) + b = np.pad(a, (0, 5), mode="wrap") + assert_array_equal(a, b[:-5, :-5]) + + def test_repeated_wrapping(self): + """ + Check wrapping on each side individually if the wrapped area is longer + than the original array. + """ + a = np.arange(5) + b = np.pad(a, (12, 0), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][3:], b) + + a = np.arange(5) + b = np.pad(a, (0, 12), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][:-3], b) + + +class TestEdge: + def test_check_simple(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, ((2, 3), (3, 2)), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + def test_check_width_shape_1_2(self): + # Check a pad_width of the form ((1, 2),). + # Regression test for issue gh-7808. + a = np.array([1, 2, 3]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.array([1, 1, 2, 3, 3, 3]) + assert_array_equal(padded, expected) + + a = np.array([[1, 2, 3], [4, 5, 6]]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + a = np.arange(24).reshape(2, 3, 4) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + +class TestEmpty: + def test_simple(self): + arr = np.arange(24).reshape(4, 6) + result = np.pad(arr, [(2, 3), (3, 1)], mode="empty") + assert result.shape == (9, 10) + assert_equal(arr, result[2:-3, 3:-1]) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="empty") + assert result.shape == (3, 4, 4) + + +def test_legacy_vector_functionality(): + def _padwithtens(vector, pad_width, iaxis, kwargs): + vector[:pad_width[0]] = 10 + vector[-pad_width[1]:] = 10 + + a = np.arange(6).reshape(2, 3) + a = np.pad(a, 2, _padwithtens) + b = np.array( + [[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]] + ) + assert_array_equal(a, b) + + +def test_unicode_mode(): + a = np.pad([1], 2, mode=u'constant') + b = np.array([0, 0, 1, 0, 0]) + assert_array_equal(a, b) + + +@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"]) +def test_object_input(mode): + # Regression test for issue gh-11395. + a = np.full((4, 3), fill_value=None) + pad_amt = ((2, 3), (3, 2)) + b = np.full((9, 8), fill_value=None) + assert_array_equal(np.pad(a, pad_amt, mode=mode), b) + + +class TestPadWidth: + @pytest.mark.parametrize("pad_width", [ + (4, 5, 6, 7), + ((1,), (2,), (3,)), + ((1, 2), (3, 4), (5, 6)), + ((3, 4, 5), (0, 1, 2)), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "operands could not be broadcast together" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width_2(self, mode): + arr = np.arange(30).reshape((6, 5)) + match = ("input operand has more dimensions than allowed by the axis " + "remapping") + with pytest.raises(ValueError, match=match): + np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode) + + @pytest.mark.parametrize( + "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_negative_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("pad_width, dtype", [ + ("3", None), + ("word", None), + (None, None), + (object(), None), + (3.4, None), + (((2, 3, 4), (3, 2)), object), + (complex(1, -1), None), + (((-2.1, 3), (3, 2)), None), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_bad_type(self, pad_width, dtype, mode): + arr = np.arange(30).reshape((6, 5)) + match = "`pad_width` must be of integral type." + if dtype is not None: + # avoid DeprecationWarning when not specifying dtype + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width, dtype=dtype), mode) + else: + with pytest.raises(TypeError, match=match): + np.pad(arr, pad_width, mode) + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width), mode) + + def test_pad_width_as_ndarray(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape(6, 5) + assert_array_equal(arr, np.pad(arr, pad_width, mode=mode)) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_kwargs(mode): + """Test behavior of pad's kwargs for the given mode.""" + allowed = _all_modes[mode] + not_allowed = {} + for kwargs in _all_modes.values(): + if kwargs != allowed: + not_allowed.update(kwargs) + # Test if allowed keyword arguments pass + np.pad([1, 2, 3], 1, mode, **allowed) + # Test if prohibited keyword arguments of other modes raise an error + for key, value in not_allowed.items(): + match = "unsupported keyword arguments for mode '{}'".format(mode) + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 1, mode, **{key: value}) + + +def test_constant_zero_default(): + arr = np.array([1, 1]) + assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0]) + + +@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False]) +def test_unsupported_mode(mode): + match= "mode '{}' is not supported".format(mode) + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 4, mode=mode) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_non_contiguous_array(mode): + arr = np.arange(24).reshape(4, 6)[::2, ::2] + result = np.pad(arr, (2, 3), mode) + assert result.shape == (7, 8) + assert_equal(result[2:-3, 2:-3], arr) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_memory_layout_persistence(mode): + """Test if C and F order is preserved for all pad modes.""" + x = np.ones((5, 10), order='C') + assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"] + x = np.ones((5, 10), order='F') + assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] + + +@pytest.mark.parametrize("dtype", _numeric_dtypes) +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_dtype_persistence(dtype, mode): + arr = np.zeros((3, 2, 1), dtype=dtype) + result = np.pad(arr, 1, mode=mode) + assert result.dtype == dtype diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_arraysetops.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_arraysetops.py new file mode 100644 index 0000000000000000000000000000000000000000..6107baab694ef8538b45e7476d2c255060c8a6dc --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_arraysetops.py @@ -0,0 +1,767 @@ +"""Test functions for 1D array set operations. + +""" +import numpy as np + +from numpy.testing import (assert_array_equal, assert_equal, + assert_raises, assert_raises_regex) +from numpy.lib.arraysetops import ( + ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin + ) +import pytest + + +class TestSetOps: + + def test_intersect1d(self): + # unique inputs + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([1, 2, 5]) + c = intersect1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + # non-unique inputs + a = np.array([5, 5, 7, 1, 2]) + b = np.array([2, 1, 4, 3, 3, 1, 5]) + + ed = np.array([1, 2, 5]) + c = intersect1d(a, b) + assert_array_equal(c, ed) + assert_array_equal([], intersect1d([], [])) + + def test_intersect1d_array_like(self): + # See gh-11772 + class Test: + def __array__(self): + return np.arange(3) + + a = Test() + res = intersect1d(a, a) + assert_array_equal(res, a) + res = intersect1d([1, 2, 3], [1, 2, 3]) + assert_array_equal(res, [1, 2, 3]) + + def test_intersect1d_indices(self): + # unique inputs + a = np.array([1, 2, 3, 4]) + b = np.array([2, 1, 4, 6]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ee = np.array([1, 2, 4]) + assert_array_equal(c, ee) + assert_array_equal(a[i1], ee) + assert_array_equal(b[i2], ee) + + # non-unique inputs + a = np.array([1, 2, 2, 3, 4, 3, 2]) + b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ef = np.array([1, 2, 3, 4]) + assert_array_equal(c, ef) + assert_array_equal(a[i1], ef) + assert_array_equal(b[i2], ef) + + # non1d, unique inputs + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 6, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + # non1d, not assumed to be uniqueinputs + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + def test_setxor1d(self): + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([3, 4, 7]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 2, 3]) + b = np.array([6, 5, 4]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setxor1d([], [])) + + def test_ediff1d(self): + zero_elem = np.array([]) + one_elem = np.array([1]) + two_elem = np.array([1, 2]) + + assert_array_equal([], ediff1d(zero_elem)) + assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) + assert_array_equal([0], ediff1d(zero_elem, to_end=0)) + assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) + assert_array_equal([], ediff1d(one_elem)) + assert_array_equal([1], ediff1d(two_elem)) + assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9)) + assert_array_equal([5, 6, 1, 7, 8], + ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8])) + assert_array_equal([1, 9], ediff1d(two_elem, to_end=9)) + assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8])) + assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7)) + assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6])) + + @pytest.mark.parametrize("ary, prepend, append, expected", [ + # should fail because trying to cast + # np.nan standard floating point value + # into an integer array: + (np.array([1, 2, 3], dtype=np.int64), + None, + np.nan, + 'to_end'), + # should fail because attempting + # to downcast to int type: + (np.array([1, 2, 3], dtype=np.int64), + np.array([5, 7, 2], dtype=np.float32), + None, + 'to_begin'), + # should fail because attempting to cast + # two special floating point values + # to integers (on both sides of ary), + # `to_begin` is in the error message as the impl checks this first: + (np.array([1., 3., 9.], dtype=np.int8), + np.nan, + np.nan, + 'to_begin'), + ]) + def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected): + # verify resolution of gh-11490 + + # specifically, raise an appropriate + # Exception when attempting to append or + # prepend with an incompatible type + msg = 'dtype of `{}` must be compatible'.format(expected) + with assert_raises_regex(TypeError, msg): + ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + + @pytest.mark.parametrize( + "ary,prepend,append,expected", + [ + (np.array([1, 2, 3], dtype=np.int16), + 2**16, # will be cast to int16 under same kind rule. + 2**16 + 4, + np.array([0, 1, 1, 4], dtype=np.int16)), + (np.array([1, 2, 3], dtype=np.float32), + np.array([5], dtype=np.float64), + None, + np.array([5, 1, 1], dtype=np.float32)), + (np.array([1, 2, 3], dtype=np.int32), + 0, + 0, + np.array([0, 1, 1, 0], dtype=np.int32)), + (np.array([1, 2, 3], dtype=np.int64), + 3, + -9, + np.array([3, 1, 1, -9], dtype=np.int64)), + ] + ) + def test_ediff1d_scalar_handling(self, + ary, + prepend, + append, + expected): + # maintain backwards-compatibility + # of scalar prepend / append behavior + # in ediff1d following fix for gh-11490 + actual = np.ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + assert_equal(actual, expected) + assert actual.dtype == expected.dtype + + def test_isin(self): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + def _isin_slow(a, b): + b = np.asarray(b).flatten().tolist() + return a in b + isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) + + def assert_isin_equal(a, b): + x = isin(a, b) + y = isin_slow(a, b) + assert_array_equal(x, y) + + # multidimensional arrays in both arguments + a = np.arange(24).reshape([2, 3, 4]) + b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) + assert_isin_equal(a, b) + + # array-likes as both arguments + c = [(9, 8), (7, 6)] + d = (9, 7) + assert_isin_equal(c, d) + + # zero-d array: + f = np.array(3) + assert_isin_equal(f, b) + assert_isin_equal(a, f) + assert_isin_equal(f, f) + + # scalar: + assert_isin_equal(5, b) + assert_isin_equal(a, 6) + assert_isin_equal(5, 6) + + # empty array-like: + x = [] + assert_isin_equal(x, b) + assert_isin_equal(a, x) + assert_isin_equal(x, x) + + def test_in1d(self): + # we use two different sizes for the b array here to test the + # two different paths in in1d(). + for mult in (1, 10): + # One check without np.array to make sure lists are handled correct + a = [5, 7, 1, 2] + b = [2, 4, 3, 1, 5] * mult + ec = np.array([True, False, True, True]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a[0] = 8 + ec = np.array([False, False, True, True]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a[0], a[3] = 4, 8 + ec = np.array([True, False, True, False]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + ec = [False, True, False, True, True, True, True, True, True, + False, True, False, False, False] + c = in1d(a, b) + assert_array_equal(c, ec) + + b = b + [5, 5, 4] * mult + ec = [True, True, True, True, True, True, True, True, True, True, + True, False, True, True] + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5] * mult) + ec = np.array([True, False, True, True]) + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 1, 2]) + b = np.array([2, 4, 3, 3, 1, 5] * mult) + ec = np.array([True, False, True, True, True]) + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5, 5]) + b = np.array([2, 2] * mult) + ec = np.array([False, False]) + c = in1d(a, b) + assert_array_equal(c, ec) + + a = np.array([5]) + b = np.array([2]) + ec = np.array([False]) + c = in1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal(in1d([], []), []) + + def test_in1d_char_array(self): + a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) + b = np.array(['a', 'c']) + + ec = np.array([True, False, True, False, False, True, False, False]) + c = in1d(a, b) + + assert_array_equal(c, ec) + + def test_in1d_invert(self): + "Test in1d's invert parameter" + # We use two different sizes for the b array here to test the + # two different paths in in1d(). + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + def test_in1d_ravel(self): + # Test that in1d ravels its input arrays. This is not documented + # behavior however. The test is to ensure consistentency. + a = np.arange(6).reshape(2, 3) + b = np.arange(3, 9).reshape(3, 2) + long_b = np.arange(3, 63).reshape(30, 2) + ec = np.array([False, False, False, True, True, True]) + + assert_array_equal(in1d(a, b, assume_unique=True), ec) + assert_array_equal(in1d(a, b, assume_unique=False), ec) + assert_array_equal(in1d(a, long_b, assume_unique=True), ec) + assert_array_equal(in1d(a, long_b, assume_unique=False), ec) + + def test_in1d_first_array_is_object(self): + ar1 = [None] + ar2 = np.array([1]*10) + expected = np.array([False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_second_array_is_object(self): + ar1 = 1 + ar2 = np.array([None]*10) + expected = np.array([False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_both_arrays_are_object(self): + ar1 = [None] + ar2 = np.array([None]*10) + expected = np.array([True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_both_arrays_have_structured_dtype(self): + # Test arrays of a structured data type containing an integer field + # and a field of dtype `object` allowing for arbitrary Python objects + dt = np.dtype([('field1', int), ('field2', object)]) + ar1 = np.array([(1, None)], dtype=dt) + ar2 = np.array([(1, None)]*10, dtype=dt) + expected = np.array([True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_with_arrays_containing_tuples(self): + ar1 = np.array([(1,), 2], dtype=object) + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + result = np.in1d(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + # An integer is added at the end of the array to make sure + # that the array builder will create the array with tuples + # and after it's created the integer is removed. + # There's a bug in the array constructor that doesn't handle + # tuples properly and adding the integer fixes that. + ar1 = np.array([(1,), (2, 1), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), (2, 1), 1], dtype=object) + ar2 = ar2[:-1] + expected = np.array([True, True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + result = np.in1d(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + ar1 = np.array([(1,), (2, 3), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + result = np.in1d(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + def test_union1d(self): + a = np.array([5, 4, 7, 1, 2]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([1, 2, 3, 4, 5, 7]) + c = union1d(a, b) + assert_array_equal(c, ec) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = np.array([[0, 1, 2], [3, 4, 5]]) + y = np.array([0, 1, 2, 3, 4]) + ez = np.array([0, 1, 2, 3, 4, 5]) + z = union1d(x, y) + assert_array_equal(z, ez) + + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([6, 7]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + a = np.arange(21) + b = np.arange(19) + ec = np.array([19, 20]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setdiff1d([], [])) + a = np.array((), np.uint32) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_unique(self): + a = np.array([3, 2, 1]) + b = np.array([7, 5, 2]) + expected = np.array([3, 1]) + actual = setdiff1d(a, b, assume_unique=True) + assert_equal(actual, expected) + + def test_setdiff1d_char_array(self): + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + def test_manyways(self): + a = np.array([5, 7, 1, 2, 8]) + b = np.array([9, 8, 2, 4, 3, 1, 5]) + + c1 = setxor1d(a, b) + aux1 = intersect1d(a, b) + aux2 = union1d(a, b) + c2 = setdiff1d(aux2, aux1) + assert_array_equal(c1, c2) + + +class TestUnique: + + def test_unique_1d(self): + + def check_all(a, b, i1, i2, c, dt): + base_msg = 'check {0} failed for type {1}' + + msg = base_msg.format('values', dt) + v = unique(a) + assert_array_equal(v, b, msg) + + msg = base_msg.format('return_index', dt) + v, j = unique(a, True, False, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i1, msg) + + msg = base_msg.format('return_inverse', dt) + v, j = unique(a, False, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i2, msg) + + msg = base_msg.format('return_counts', dt) + v, j = unique(a, False, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j, c, msg) + + msg = base_msg.format('return_index and return_inverse', dt) + v, j1, j2 = unique(a, True, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + + msg = base_msg.format('return_index and return_counts', dt) + v, j1, j2 = unique(a, True, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format('return_inverse and return_counts', dt) + v, j1, j2 = unique(a, False, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i2, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format(('return_index, return_inverse ' + 'and return_counts'), dt) + v, j1, j2, j3 = unique(a, True, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert_array_equal(j3, c, msg) + + a = [5, 7, 1, 2, 1, 5, 7]*10 + b = [1, 2, 5, 7] + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3]*10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + for dt in types: + aa = np.array(a, dt) + bb = np.array(b, dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for object arrays + dt = 'O' + aa = np.empty(len(a), dt) + aa[:] = a + bb = np.empty(len(b), dt) + bb[:] = b + check_all(aa, bb, i1, i2, c, dt) + + # test for structured arrays + dt = [('', 'i'), ('', 'i')] + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for ticket #2799 + aa = [1. + 0.j, 1 - 1.j, 1] + assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + + # test for ticket #4785 + a = [(1, 2), (1, 2), (2, 3)] + unq = [1, 2, 3] + inv = [0, 1, 0, 1, 1, 2] + a1 = unique(a) + assert_array_equal(a1, unq) + a2, a2_inv = unique(a, return_inverse=True) + assert_array_equal(a2, unq) + assert_array_equal(a2_inv, inv) + + # test for chararrays with return_inverse (gh-5099) + a = np.chararray(5) + a[...] = '' + a2, a2_inv = np.unique(a, return_inverse=True) + assert_array_equal(a2_inv, np.zeros(5)) + + # test for ticket #9137 + a = [] + a1_idx = np.unique(a, return_index=True)[1] + a2_inv = np.unique(a, return_inverse=True)[1] + a3_idx, a3_inv = np.unique(a, return_index=True, + return_inverse=True)[1:] + assert_equal(a1_idx.dtype, np.intp) + assert_equal(a2_inv.dtype, np.intp) + assert_equal(a3_idx.dtype, np.intp) + assert_equal(a3_inv.dtype, np.intp) + + # test for ticket 2111 - float + a = [2.0, np.nan, 1.0, np.nan] + ua = [1.0, 2.0, np.nan] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - complex + a = [2.0-1j, np.nan, 1.0+1j, complex(0.0, np.nan), complex(1.0, np.nan)] + ua = [1.0+1j, 2.0-1j, complex(0.0, np.nan)] + ua_idx = [2, 0, 3] + ua_inv = [1, 2, 0, 2, 2] + ua_cnt = [1, 1, 3] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - datetime64 + nat = np.datetime64('nat') + a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat] + ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - timedelta + nat = np.timedelta64('nat') + a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat] + ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for gh-19300 + all_nans = [np.nan] * 4 + ua = [np.nan] + ua_idx = [0] + ua_inv = [0, 0, 0, 0] + ua_cnt = [4] + assert_equal(np.unique(all_nans), ua) + assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) + + def test_unique_axis_errors(self): + assert_raises(TypeError, self._run_axis_tests, object) + assert_raises(TypeError, self._run_axis_tests, + [('a', int), ('b', object)]) + + assert_raises(np.AxisError, unique, np.arange(10), axis=2) + assert_raises(np.AxisError, unique, np.arange(10), axis=-2) + + def test_unique_axis_list(self): + msg = "Unique failed on list of lists" + inp = [[0, 1, 0], [0, 1, 0]] + inp_arr = np.asarray(inp) + assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) + assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) + + def test_unique_axis(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + types.append([('a', int), ('b', int)]) + types.append([('a', int), ('b', float)]) + + for dtype in types: + self._run_axis_tests(dtype) + + msg = 'Non-bitwise-equal booleans test failed' + data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) + result = np.array([[False, True], [True, True]], dtype=bool) + assert_array_equal(unique(data, axis=0), result, msg) + + msg = 'Negative zero equality test failed' + data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) + result = np.array([[-0.0, 0.0]]) + assert_array_equal(unique(data, axis=0), result, msg) + + @pytest.mark.parametrize("axis", [0, -1]) + def test_unique_1d_with_axis(self, axis): + x = np.array([4, 3, 2, 3, 2, 1, 2, 2]) + uniq = unique(x, axis=axis) + assert_array_equal(uniq, [1, 2, 3, 4]) + + def test_unique_axis_zeros(self): + # issue 15559 + single_zero = np.empty(shape=(2, 0), dtype=np.int8) + uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True, + return_inverse=True, return_counts=True) + + # there's 1 element of shape (0,) along axis 0 + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(1, 0))) + assert_array_equal(idx, np.array([0])) + assert_array_equal(inv, np.array([0, 0])) + assert_array_equal(cnt, np.array([2])) + + # there's 0 elements of shape (2,) along axis 1 + uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True, + return_inverse=True, return_counts=True) + + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(2, 0))) + assert_array_equal(idx, np.array([])) + assert_array_equal(inv, np.array([])) + assert_array_equal(cnt, np.array([])) + + # test a "complicated" shape + shape = (0, 2, 0, 3, 0, 4, 0) + multiple_zeros = np.empty(shape=shape) + for axis in range(len(shape)): + expected_shape = list(shape) + if shape[axis] == 0: + expected_shape[axis] = 0 + else: + expected_shape[axis] = 1 + + assert_array_equal(unique(multiple_zeros, axis=axis), + np.empty(shape=expected_shape)) + + def test_unique_masked(self): + # issue 8664 + x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], + dtype='uint8') + y = np.ma.masked_equal(x, 0) + + v = np.unique(y) + v2, i, c = np.unique(y, return_index=True, return_counts=True) + + msg = 'Unique returned different results when asked for index' + assert_array_equal(v.data, v2.data, msg) + assert_array_equal(v.mask, v2.mask, msg) + + def test_unique_sort_order_with_axis(self): + # These tests fail if sorting along axis is done by treating subarrays + # as unsigned byte strings. See gh-10495. + fmt = "sort order incorrect for integer type '%s'" + for dt in 'bhilq': + a = np.array([[-1], [0]], dt) + b = np.unique(a, axis=0) + assert_array_equal(a, b, fmt % dt) + + def _run_axis_tests(self, dtype): + data = np.array([[0, 1, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [1, 0, 0, 0]]).astype(dtype) + + msg = 'Unique with 1d array and axis=0 failed' + result = np.array([0, 1]) + assert_array_equal(unique(data), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=0 failed' + result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=1 failed' + result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) + assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) + + msg = 'Unique with 3d array and axis=2 failed' + data3d = np.array([[[1, 1], + [1, 0]], + [[0, 1], + [0, 0]]]).astype(dtype) + result = np.take(data3d, [1, 0], axis=2) + assert_array_equal(unique(data3d, axis=2), result, msg) + + uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=0" + assert_array_equal(data[idx], uniq, msg) + msg = "Unique's return_inverse=True failed with axis=0" + assert_array_equal(uniq[inv], data) + msg = "Unique's return_counts=True failed with axis=0" + assert_array_equal(cnt, np.array([2, 2]), msg) + + uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=1" + assert_array_equal(data[:, idx], uniq) + msg = "Unique's return_inverse=True failed with axis=1" + assert_array_equal(uniq[:, inv], data) + msg = "Unique's return_counts=True failed with axis=1" + assert_array_equal(cnt, np.array([2, 1, 1]), msg) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_arrayterator.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_arrayterator.py new file mode 100644 index 0000000000000000000000000000000000000000..e8ba83ae827d20faa70981d9f034e9ed6087d576 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_arrayterator.py @@ -0,0 +1,46 @@ +from operator import mul +from functools import reduce + +import numpy as np +from numpy.random import randint +from numpy.lib import Arrayterator +from numpy.testing import assert_ + + +def test(): + np.random.seed(np.arange(10)) + + # Create a random array + ndims = randint(5)+1 + shape = tuple(randint(10)+1 for dim in range(ndims)) + els = reduce(mul, shape) + a = np.arange(els) + a.shape = shape + + buf_size = randint(2*els) + b = Arrayterator(a, buf_size) + + # Check that each block has at most ``buf_size`` elements + for block in b: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that all elements are iterated correctly + assert_(list(b.flat) == list(a.flat)) + + # Slice arrayterator + start = [randint(dim) for dim in shape] + stop = [randint(dim)+1 for dim in shape] + step = [randint(dim)+1 for dim in shape] + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + c = b[slice_] + d = a[slice_] + + # Check that each block has at most ``buf_size`` elements + for block in c: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that the arrayterator is sliced correctly + assert_(np.all(c.__array__() == d)) + + # Check that all elements are iterated correctly + assert_(list(c.flat) == list(d.flat)) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_financial_expired.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_financial_expired.py new file mode 100644 index 0000000000000000000000000000000000000000..277bc102fc3a29d3bb5be4305c5390135e60d3bf --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_financial_expired.py @@ -0,0 +1,13 @@ +import sys +import pytest +import numpy as np + + +@pytest.mark.skipif(sys.version_info[:2] < (3, 7), + reason="requires python 3.7 or higher") +def test_financial_expired(): + match = 'NEP 32' + with pytest.warns(DeprecationWarning, match=match): + func = np.fv + with pytest.raises(RuntimeError, match=match): + func(1, 2, 3) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_format.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_format.py new file mode 100644 index 0000000000000000000000000000000000000000..7511b581231324c82107ebf8371e65ebec6e58e7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_format.py @@ -0,0 +1,962 @@ +# doctest +r''' Test the .npy file format. + +Set up: + + >>> import sys + >>> from io import BytesIO + >>> from numpy.lib import format + >>> + >>> scalars = [ + ... np.uint8, + ... np.int8, + ... np.uint16, + ... np.int16, + ... np.uint32, + ... np.int32, + ... np.uint64, + ... np.int64, + ... np.float32, + ... np.float64, + ... np.complex64, + ... np.complex128, + ... object, + ... ] + >>> + >>> basic_arrays = [] + >>> + >>> for scalar in scalars: + ... for endian in '<>': + ... dtype = np.dtype(scalar).newbyteorder(endian) + ... basic = np.arange(15).astype(dtype) + ... basic_arrays.extend([ + ... np.array([], dtype=dtype), + ... np.array(10, dtype=dtype), + ... basic, + ... basic.reshape((3,5)), + ... basic.reshape((3,5)).T, + ... basic.reshape((3,5))[::-1,::2], + ... ]) + ... + >>> + >>> Pdescr = [ + ... ('x', 'i4', (2,)), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> PbufferT = [ + ... ([3,2], [[6.,4.],[6.,4.]], 8), + ... ([4,3], [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> Ndescr = [ + ... ('x', 'i4', (2,)), + ... ('Info', [ + ... ('value', 'c16'), + ... ('y2', 'f8'), + ... ('Info2', [ + ... ('name', 'S2'), + ... ('value', 'c16', (2,)), + ... ('y3', 'f8', (2,)), + ... ('z3', 'u4', (2,))]), + ... ('name', 'S2'), + ... ('z2', 'b1')]), + ... ('color', 'S2'), + ... ('info', [ + ... ('Name', 'U8'), + ... ('Value', 'c16')]), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> NbufferT = [ + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> record_arrays = [ + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + ... ] + +Test the magic string writing. + + >>> format.magic(1, 0) + '\x93NUMPY\x01\x00' + >>> format.magic(0, 0) + '\x93NUMPY\x00\x00' + >>> format.magic(255, 255) + '\x93NUMPY\xff\xff' + >>> format.magic(2, 5) + '\x93NUMPY\x02\x05' + +Test the magic string reading. + + >>> format.read_magic(BytesIO(format.magic(1, 0))) + (1, 0) + >>> format.read_magic(BytesIO(format.magic(0, 0))) + (0, 0) + >>> format.read_magic(BytesIO(format.magic(255, 255))) + (255, 255) + >>> format.read_magic(BytesIO(format.magic(2, 5))) + (2, 5) + +Test the header writing. + + >>> for arr in basic_arrays + record_arrays: + ... f = BytesIO() + ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... print(repr(f.getvalue())) + ... + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" + "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" +''' +import sys +import os +import shutil +import tempfile +import warnings +import pytest +from io import BytesIO + +import numpy as np +from numpy.testing import ( + assert_, assert_array_equal, assert_raises, assert_raises_regex, + assert_warns, + ) +from numpy.lib import format + + +# Generate some basic arrays to test with. +scalars = [ + np.uint8, + np.int8, + np.uint16, + np.int16, + np.uint32, + np.int32, + np.uint64, + np.int64, + np.float32, + np.float64, + np.complex64, + np.complex128, + object, +] +basic_arrays = [] +for scalar in scalars: + for endian in '<>': + dtype = np.dtype(scalar).newbyteorder(endian) + basic = np.arange(1500).astype(dtype) + basic_arrays.extend([ + # Empty + np.array([], dtype=dtype), + # Rank-0 + np.array(10, dtype=dtype), + # 1-D + basic, + # 2-D C-contiguous + basic.reshape((30, 50)), + # 2-D F-contiguous + basic.reshape((30, 50)).T, + # 2-D non-contiguous + basic.reshape((30, 50))[::-1, ::2], + ]) + +# More complicated record arrays. +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), + 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), + 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + +record_arrays = [ + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + np.zeros(1, dtype=[('c', (' 1, a) + assert_array_equal(b, [3, 2, 2, 3, 3]) + + def test_place(self): + # Make sure that non-np.ndarray objects + # raise an error instead of doing nothing + assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) + + a = np.array([1, 4, 3, 2, 5, 8, 7]) + place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) + assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) + + place(a, np.zeros(7), []) + assert_array_equal(a, np.arange(1, 8)) + + place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9]) + assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9]) + assert_raises_regex(ValueError, "Cannot insert from an empty array", + lambda: place(a, [0, 0, 0, 0, 0, 1, 0], [])) + + # See Issue #6974 + a = np.array(['12', '34']) + place(a, [0, 1], '9') + assert_array_equal(a, ['12', '9']) + + def test_both(self): + a = rand(10) + mask = a > 0.5 + ac = a.copy() + c = extract(mask, a) + place(a, mask, 0) + place(a, mask, c) + assert_array_equal(a, ac) + + +# _foo1 and _foo2 are used in some tests in TestVectorize. + +def _foo1(x, y=1.0): + return y*math.floor(x) + + +def _foo2(x, y=1.0, z=0.0): + return y*math.floor(x) + z + + +class TestVectorize: + + def test_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_scalar(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], 5) + assert_array_equal(r, [5, 8, 1, 4]) + + def test_large(self): + x = np.linspace(-3, 2, 10000) + f = vectorize(lambda x: x) + y = f(x) + assert_array_equal(y, x) + + def test_ufunc(self): + f = vectorize(math.cos) + args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) + r1 = f(args) + r2 = np.cos(args) + assert_array_almost_equal(r1, r2) + + def test_keywords(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(args, 2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order1(self): + # gh-1620: The second call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0), 1.0) + r2 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order2(self): + # gh-1620: The second call of f would crash with + # `ValueError: non-broadcastable output operand with shape () + # doesn't match the broadcast shape (3,)`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), 1.0) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order3(self): + # gh-1620: The third call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), y=1.0) + r3 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + assert_array_equal(r1, r3) + + def test_keywords_with_otypes_several_kwd_args1(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(10.4, z=100) + r2 = f(10.4, y=-1) + r3 = f(10.4) + assert_equal(r1, _foo2(10.4, z=100)) + assert_equal(r2, _foo2(10.4, y=-1)) + assert_equal(r3, _foo2(10.4)) + + def test_keywords_with_otypes_several_kwd_args2(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(z=100, x=10.4, y=-1) + r2 = f(1, 2, 3) + assert_equal(r1, _foo2(z=100, x=10.4, y=-1)) + assert_equal(r2, _foo2(1, 2, 3)) + + def test_keywords_no_func_code(self): + # This needs to test a function that has keywords but + # no func_code attribute, since otherwise vectorize will + # inspect the func_code. + import random + try: + vectorize(random.randrange) # Should succeed + except Exception: + raise AssertionError() + + def test_keywords2_ticket_2100(self): + # Test kwarg support: enhancement ticket 2100 + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(a=args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(b=1, a=args) + assert_array_equal(r1, r2) + r1 = f(args, b=2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords3_ticket_2100(self): + # Test excluded with mixed positional and kwargs: ticket 2100 + def mypolyval(x, p): + _p = list(p) + res = _p.pop(0) + while _p: + res = res * x + _p.pop(0) + return res + + vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) + ans = [3, 6] + assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) + + def test_keywords4_ticket_2100(self): + # Test vectorizing function with no positional args. + @vectorize + def f(**kw): + res = 1.0 + for _k in kw: + res *= kw[_k] + return res + + assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) + + def test_keywords5_ticket_2100(self): + # Test vectorizing function with no kwargs args. + @vectorize + def f(*v): + return np.prod(v) + + assert_array_equal(f([1, 2], [3, 4]), [3, 8]) + + def test_coverage1_ticket_2100(self): + def foo(): + return 1 + + f = vectorize(foo) + assert_array_equal(f(), 1) + + def test_assigning_docstring(self): + def foo(x): + """Original documentation""" + return x + + f = vectorize(foo) + assert_equal(f.__doc__, foo.__doc__) + + doc = "Provided documentation" + f = vectorize(foo, doc=doc) + assert_equal(f.__doc__, doc) + + def test_UnboundMethod_ticket_1156(self): + # Regression test for issue 1156 + class Foo: + b = 2 + + def bar(self, a): + return a ** self.b + + assert_array_equal(vectorize(Foo().bar)(np.arange(9)), + np.arange(9) ** 2) + assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), + np.arange(9) ** 2) + + def test_execution_order_ticket_1487(self): + # Regression test for dependence on execution order: issue 1487 + f1 = vectorize(lambda x: x) + res1a = f1(np.arange(3)) + res1b = f1(np.arange(0.1, 3)) + f2 = vectorize(lambda x: x) + res2b = f2(np.arange(0.1, 3)) + res2a = f2(np.arange(3)) + assert_equal(res1a, res2a) + assert_equal(res1b, res2b) + + def test_string_ticket_1892(self): + # Test vectorization over strings: issue 1892. + f = np.vectorize(lambda x: x) + s = '0123456789' * 10 + assert_equal(s, f(s)) + + def test_cache(self): + # Ensure that vectorized func called exactly once per argument. + _calls = [0] + + @vectorize + def f(x): + _calls[0] += 1 + return x ** 2 + + f.cache = True + x = np.arange(5) + assert_array_equal(f(x), x * x) + assert_equal(_calls[0], len(x)) + + def test_otypes(self): + f = np.vectorize(lambda x: x) + f.otypes = 'i' + x = np.arange(5) + assert_array_equal(f(x), x) + + def test_parse_gufunc_signature(self): + assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x)(y)->()') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x),(y)->') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('((x))->(x)') + + def test_signature_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract, signature='(),()->()') + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_signature_mean_last(self): + def mean(a): + return a.mean() + + f = vectorize(mean, signature='(n)->()') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [2, 3]) + + def test_signature_center(self): + def center(a): + return a - a.mean() + + f = vectorize(center, signature='(n)->(n)') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [[-1, 1], [-1, 1]]) + + def test_signature_two_outputs(self): + f = vectorize(lambda x: (x, x), signature='()->(),()') + r = f([1, 2, 3]) + assert_(isinstance(r, tuple) and len(r) == 2) + assert_array_equal(r[0], [1, 2, 3]) + assert_array_equal(r[1], [1, 2, 3]) + + def test_signature_outer(self): + f = vectorize(np.outer, signature='(a),(b)->(a,b)') + r = f([1, 2], [1, 2, 3]) + assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) + + r = f([[[1, 2]]], [1, 2, 3]) + assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) + + r = f([[1, 0], [2, 0]], [1, 2, 3]) + assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], + [[2, 4, 6], [0, 0, 0]]]) + + r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) + assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], + [[0, 0, 0], [0, 0, 0]]]) + + def test_signature_computed_size(self): + f = vectorize(lambda x: x[:-1], signature='(n)->(m)') + r = f([1, 2, 3]) + assert_array_equal(r, [1, 2]) + + r = f([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(r, [[1, 2], [2, 3]]) + + def test_signature_excluded(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo, signature='()->()', excluded={'b'}) + assert_array_equal(f([1, 2, 3]), [2, 3, 4]) + assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) + + def test_signature_otypes(self): + f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + + def test_signature_invalid_inputs(self): + f = vectorize(operator.add, signature='(n),(n)->(n)') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f([1, 2]) + with assert_raises_regex( + ValueError, 'does not have enough dimensions'): + f(1, 2) + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2], [1, 2, 3]) + + f = vectorize(operator.add, signature='()->()') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f(1, 2) + + def test_signature_invalid_outputs(self): + + f = vectorize(lambda x: x[:-1], signature='(n)->(n)') + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2, 3]) + + f = vectorize(lambda x: x, signature='()->(),()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f(1) + + f = vectorize(lambda x: (x, x), signature='()->()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f([1, 2]) + + def test_size_zero_output(self): + # see issue 5868 + f = np.vectorize(lambda x: x) + x = np.zeros([0, 5], dtype=int) + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f.otypes = 'i' + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='()->()') + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f = np.vectorize(lambda x: x, signature='()->()', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)') + assert_array_equal(f(x.T), x.T) + + f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') + with assert_raises_regex(ValueError, 'new output dimensions'): + f(x) + + +class TestLeaks: + class A: + iters = 20 + + def bound(self, *args): + return 0 + + @staticmethod + def unbound(*args): + return 0 + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.parametrize('name, incr', [ + ('bound', A.iters), + ('unbound', 0), + ]) + def test_frompyfunc_leaks(self, name, incr): + # exposed in gh-11867 as np.vectorized, but the problem stems from + # frompyfunc. + # class.attribute = np.frompyfunc() creates a + # reference cycle if is a bound class method. It requires a + # gc collection cycle to break the cycle (on CPython 3) + import gc + A_func = getattr(self.A, name) + gc.disable() + try: + refcount = sys.getrefcount(A_func) + for i in range(self.A.iters): + a = self.A() + a.f = np.frompyfunc(getattr(a, name), 1, 1) + out = a.f(np.arange(10)) + a = None + # A.func is part of a reference cycle if incr is non-zero + assert_equal(sys.getrefcount(A_func), refcount + incr) + for i in range(5): + gc.collect() + assert_equal(sys.getrefcount(A_func), refcount) + finally: + gc.enable() + +class TestDigitize: + + def test_forward(self): + x = np.arange(-6, 5) + bins = np.arange(-5, 5) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(5, -5, -1) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_random(self): + x = rand(10) + bin = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bin) != 0)) + + def test_right_basic(self): + x = [1, 5, 4, 10, 8, 11, 0] + bins = [1, 5, 10] + default_answer = [1, 2, 1, 3, 2, 3, 0] + assert_array_equal(digitize(x, bins), default_answer) + right_answer = [0, 1, 1, 2, 2, 3, 0] + assert_array_equal(digitize(x, bins, True), right_answer) + + def test_right_open(self): + x = np.arange(-6, 5) + bins = np.arange(-6, 4) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(4, -6, -1) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_random(self): + x = rand(10) + bins = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bins, True) != 10)) + + def test_monotonic(self): + x = [-1, 0, 1, 2] + bins = [0, 0, 1] + assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) + assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) + bins = [1, 1, 0] + assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) + assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) + bins = [1, 1, 1, 1] + assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) + assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) + bins = [0, 0, 1, 0] + assert_raises(ValueError, digitize, x, bins) + bins = [1, 1, 0, 1] + assert_raises(ValueError, digitize, x, bins) + + def test_casting_error(self): + x = [1, 2, 3 + 1.j] + bins = [1, 2, 3] + assert_raises(TypeError, digitize, x, bins) + x, bins = bins, x + assert_raises(TypeError, digitize, x, bins) + + def test_return_type(self): + # Functions returning indices should always return base ndarrays + class A(np.ndarray): + pass + a = np.arange(5).view(A) + b = np.arange(1, 3).view(A) + assert_(not isinstance(digitize(b, a, False), A)) + assert_(not isinstance(digitize(b, a, True), A)) + + def test_large_integers_increasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x - 1, x + 1]), 1) + + @pytest.mark.xfail( + reason="gh-11022: np.core.multiarray._monoticity loses precision") + def test_large_integers_decreasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x + 1, x - 1]), 1) + + +class TestUnwrap: + + def test_simple(self): + # check that unwrap removes jumps greater that 2*pi + assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) + + def test_period(self): + # check that unwrap removes jumps greater that 255 + assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255)) + # check simple case + simple_seq = np.array([0, 75, 150, 225, 300]) + wrap_seq = np.mod(simple_seq, 255) + assert_array_equal(unwrap(wrap_seq, period=255), simple_seq) + # check custom discont value + uneven_seq = np.array([0, 75, 150, 225, 300, 430]) + wrap_uneven = np.mod(uneven_seq, 250) + no_discont = unwrap(wrap_uneven, period=250) + assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180]) + sm_discont = unwrap(wrap_uneven, period=250, discont=140) + assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430]) + assert sm_discont.dtype == wrap_uneven.dtype + + +class TestFilterwindows: + + def test_hanning(self): + # check symmetry + w = hanning(10) + assert_equal(w, flipud(w)) + # check known value + assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + + def test_hamming(self): + # check symmetry + w = hamming(10) + assert_equal(w, flipud(w)) + # check known value + assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + + def test_bartlett(self): + # check symmetry + w = bartlett(10) + assert_equal(w, flipud(w)) + # check known value + assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + + def test_blackman(self): + # check symmetry + w = blackman(10) + assert_equal(w, flipud(w)) + # check known value + assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + + +class TestTrapz: + + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + assert_almost_equal(r, 1, 7) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None,:, None] + z[None, None,:] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapz(q, x=x[:, None, None], axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y[None,:, None], axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z[None, None,:], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapz(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z, axis=2) + assert_almost_equal(r, qz) + + def test_masked(self): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_almost_equal(trapz(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapz(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapz(y, xm), r) + + +class TestSinc: + + def test_simple(self): + assert_(sinc(0) == 1) + w = sinc(np.linspace(-1, 1, 100)) + # check symmetry + assert_array_almost_equal(w, flipud(w), 7) + + def test_array_like(self): + x = [0, 0.5] + y1 = sinc(np.array(x)) + y2 = sinc(list(x)) + y3 = sinc(tuple(x)) + assert_array_equal(y1, y2) + assert_array_equal(y1, y3) + + +class TestUnique: + + def test_simple(self): + x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) + assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) + assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) + x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] + assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) + x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) + assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) + + +class TestCheckFinite: + + def test_simple(self): + a = [1, 2, 3] + b = [1, 2, np.inf] + c = [1, 2, np.nan] + np.lib.asarray_chkfinite(a) + assert_raises(ValueError, np.lib.asarray_chkfinite, b) + assert_raises(ValueError, np.lib.asarray_chkfinite, c) + + def test_dtype_order(self): + # Regression test for missing dtype and order arguments + a = [1, 2, 3] + a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64) + assert_(a.dtype == np.float64) + + +class TestCorrCoef: + A = np.array( + [[0.15391142, 0.18045767, 0.14197213], + [0.70461506, 0.96474128, 0.27906989], + [0.9297531, 0.32296769, 0.19267156]]) + B = np.array( + [[0.10377691, 0.5417086, 0.49807457], + [0.82872117, 0.77801674, 0.39226705], + [0.9314666, 0.66800209, 0.03538394]]) + res1 = np.array( + [[1., 0.9379533, -0.04931983], + [0.9379533, 1., 0.30007991], + [-0.04931983, 0.30007991, 1.]]) + res2 = np.array( + [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], + [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], + [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], + [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], + [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], + [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) + + def test_non_array(self): + assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), + [[1., -1.], [-1., 1.]]) + + def test_simple(self): + tgt1 = corrcoef(self.A) + assert_almost_equal(tgt1, self.res1) + assert_(np.all(np.abs(tgt1) <= 1.0)) + + tgt2 = corrcoef(self.A, self.B) + assert_almost_equal(tgt2, self.res2) + assert_(np.all(np.abs(tgt2) <= 1.0)) + + def test_ddof(self): + # ddof raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) + sup.filter(DeprecationWarning) + # ddof has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) + assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) + + def test_bias(self): + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) + assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) + sup.filter(DeprecationWarning) + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, bias=1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = corrcoef(x) + tgt = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(res, tgt) + assert_(np.all(np.abs(res) <= 1.0)) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(corrcoef(np.array([])), np.nan) + assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_extreme(self): + x = [[1e-100, 1e100], [1e100, 1e-100]] + with np.errstate(all='raise'): + c = corrcoef(x) + assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) + assert_(np.all(np.abs(c) <= 1.0)) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_corrcoef_dtype(self, test_type): + cast_A = self.A.astype(test_type) + res = corrcoef(cast_A, dtype=test_type) + assert test_type == res.dtype + + +class TestCov: + x1 = np.array([[0, 2], [1, 1], [2, 0]]).T + res1 = np.array([[1., -1.], [-1., 1.]]) + x2 = np.array([0.0, 1.0, 2.0], ndmin=2) + frequencies = np.array([1, 4, 1]) + x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T + res2 = np.array([[0.4, -0.4], [-0.4, 0.4]]) + unit_frequencies = np.ones(3, dtype=np.int_) + weights = np.array([1.0, 4.0, 1.0]) + res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]]) + unit_weights = np.ones(3) + x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) + + def test_basic(self): + assert_allclose(cov(self.x1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(cov(x), res) + assert_allclose(cov(x, aweights=np.ones(3)), res) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(np.array([])), np.nan) + assert_array_equal(cov(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(cov(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_wrong_ddof(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(self.x1, ddof=5), + np.array([[np.inf, -np.inf], + [-np.inf, np.inf]])) + + def test_1D_rowvar(self): + assert_allclose(cov(self.x3), cov(self.x3, rowvar=False)) + y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501]) + assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False)) + + def test_1D_variance(self): + assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1)) + + def test_fweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies), + self.res1) + nonint = self.frequencies + 0.5 + assert_raises(TypeError, cov, self.x1, fweights=nonint) + f = np.ones((2, 3), dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = np.ones(2, dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = -1 * np.ones(3, dtype=np.int_) + assert_raises(ValueError, cov, self.x1, fweights=f) + + def test_aweights(self): + assert_allclose(cov(self.x1, aweights=self.weights), self.res3) + assert_allclose(cov(self.x1, aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1) + w = np.ones((2, 3)) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = np.ones(2) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = -1.0 * np.ones(3) + assert_raises(ValueError, cov, self.x1, aweights=w) + + def test_unit_fweights_and_aweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies, + aweights=self.unit_weights), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies, + aweights=self.unit_weights), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.weights), + self.res3) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_cov_dtype(self, test_type): + cast_x1 = self.x1.astype(test_type) + res = cov(cast_x1, dtype=test_type) + assert test_type == res.dtype + + +class Test_I0: + + def test_simple(self): + assert_almost_equal( + i0(0.5), + np.array(1.0634833707413234)) + + # need at least one test above 8, as the implementation is piecewise + A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + assert_almost_equal(i0(A), expected) + assert_almost_equal(i0(-A), expected) + + B = np.array([[0.827002, 0.99959078], + [0.89694769, 0.39298162], + [0.37954418, 0.05206293], + [0.36465447, 0.72446427], + [0.48164949, 0.50324519]]) + assert_almost_equal( + i0(B), + np.array([[1.17843223, 1.26583466], + [1.21147086, 1.03898290], + [1.03633899, 1.00067775], + [1.03352052, 1.13557954], + [1.05884290, 1.06432317]])) + # Regression test for gh-11205 + i0_0 = np.i0([0.]) + assert_equal(i0_0.shape, (1,)) + assert_array_equal(np.i0([0.]), np.array([1.])) + + def test_non_array(self): + a = np.arange(4) + + class array_like: + __array_interface__ = a.__array_interface__ + + def __array_wrap__(self, arr): + return self + + # E.g. pandas series survive ufunc calls through array-wrap: + assert isinstance(np.abs(array_like()), array_like) + exp = np.i0(a) + res = np.i0(array_like()) + + assert_array_equal(exp, res) + + def test_complex(self): + a = np.array([0, 1 + 2j]) + with pytest.raises(TypeError, match="i0 not supported for complex values"): + res = i0(a) + +class TestKaiser: + + def test_simple(self): + assert_(np.isfinite(kaiser(1, 1.0))) + assert_almost_equal(kaiser(0, 1.0), + np.array([])) + assert_almost_equal(kaiser(2, 1.0), + np.array([0.78984831, 0.78984831])) + assert_almost_equal(kaiser(5, 1.0), + np.array([0.78984831, 0.94503323, 1., + 0.94503323, 0.78984831])) + assert_almost_equal(kaiser(5, 1.56789), + np.array([0.58285404, 0.88409679, 1., + 0.88409679, 0.58285404])) + + def test_int_beta(self): + kaiser(3, 4) + + +class TestMsort: + + def test_simple(self): + A = np.array([[0.44567325, 0.79115165, 0.54900530], + [0.36844147, 0.37325583, 0.96098397], + [0.64864341, 0.52929049, 0.39172155]]) + assert_almost_equal( + msort(A), + np.array([[0.36844147, 0.37325583, 0.39172155], + [0.44567325, 0.52929049, 0.54900530], + [0.64864341, 0.79115165, 0.96098397]])) + + +class TestMeshgrid: + + def test_simple(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) + assert_array_equal(X, np.array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]])) + assert_array_equal(Y, np.array([[4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7]])) + + def test_single_input(self): + [X] = meshgrid([1, 2, 3, 4]) + assert_array_equal(X, np.array([1, 2, 3, 4])) + + def test_no_input(self): + args = [] + assert_array_equal([], meshgrid(*args)) + assert_array_equal([], meshgrid(*args, copy=False)) + + def test_indexing(self): + x = [1, 2, 3] + y = [4, 5, 6, 7] + [X, Y] = meshgrid(x, y, indexing='ij') + assert_array_equal(X, np.array([[1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3]])) + assert_array_equal(Y, np.array([[4, 5, 6, 7], + [4, 5, 6, 7], + [4, 5, 6, 7]])) + + # Test expected shapes: + z = [8, 9] + assert_(meshgrid(x, y)[0].shape == (4, 3)) + assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) + assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) + assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) + + assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') + + def test_sparse(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) + assert_array_equal(X, np.array([[1, 2, 3]])) + assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + + def test_invalid_arguments(self): + # Test that meshgrid complains about invalid arguments + # Regression test for issue #4755: + # https://github.com/numpy/numpy/issues/4755 + assert_raises(TypeError, meshgrid, + [1, 2, 3], [4, 5, 6, 7], indices='ij') + + def test_return_type(self): + # Test for appropriate dtype in returned arrays. + # Regression test for issue #5297 + # https://github.com/numpy/numpy/issues/5297 + x = np.arange(0, 10, dtype=np.float32) + y = np.arange(10, 20, dtype=np.float64) + + X, Y = np.meshgrid(x,y) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # copy + X, Y = np.meshgrid(x,y, copy=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # sparse + X, Y = np.meshgrid(x,y, sparse=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + def test_writeback(self): + # Issue 8561 + X = np.array([1.1, 2.2]) + Y = np.array([3.3, 4.4]) + x, y = np.meshgrid(X, Y, sparse=False, copy=True) + + x[0, :] = 0 + assert_equal(x[0, :], 0) + assert_equal(x[1, :], X) + + def test_nd_shape(self): + a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6))) + expected_shape = (2, 1, 3, 4, 5) + assert_equal(a.shape, expected_shape) + assert_equal(b.shape, expected_shape) + assert_equal(c.shape, expected_shape) + assert_equal(d.shape, expected_shape) + assert_equal(e.shape, expected_shape) + + def test_nd_values(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5]) + assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]]) + + def test_nd_indexing(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij') + assert_equal(a, [[[0, 0, 0], [0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1], [2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5], [3, 4, 5]]]) + + +class TestPiecewise: + + def test_simple(self): + # Condition is single bool list + x = piecewise([0, 0], [True, False], [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: single bool list + x = piecewise([0, 0], [[True, False]], [1]) + assert_array_equal(x, [1, 0]) + + # Conditions is single bool array + x = piecewise([0, 0], np.array([True, False]), [1]) + assert_array_equal(x, [1, 0]) + + # Condition is single int array + x = piecewise([0, 0], np.array([1, 0]), [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: int array + x = piecewise([0, 0], [np.array([1, 0])], [1]) + assert_array_equal(x, [1, 0]) + + x = piecewise([0, 0], [[False, True]], [lambda x:-1]) + assert_array_equal(x, [0, -1]) + + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], []) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], [1, 2, 3]) + + def test_two_conditions(self): + x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) + assert_array_equal(x, [3, 4]) + + def test_scalar_domains_three_conditions(self): + x = piecewise(3, [True, False, False], [4, 2, 0]) + assert_equal(x, 4) + + def test_default(self): + # No value specified for x[1], should be 0 + x = piecewise([1, 2], [True, False], [2]) + assert_array_equal(x, [2, 0]) + + # Should set x[1] to 3 + x = piecewise([1, 2], [True, False], [2, 3]) + assert_array_equal(x, [2, 3]) + + def test_0d(self): + x = np.array(3) + y = piecewise(x, x > 3, [4, 0]) + assert_(y.ndim == 0) + assert_(y == 0) + + x = 5 + y = piecewise(x, [True, False], [1, 0]) + assert_(y.ndim == 0) + assert_(y == 1) + + # With 3 ranges (It was failing, before) + y = piecewise(x, [False, False, True], [1, 2, 3]) + assert_array_equal(y, 3) + + def test_0d_comparison(self): + x = 3 + y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. + assert_equal(y, 4) + + # With 3 ranges (It was failing, before) + x = 4 + y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) + assert_array_equal(y, 2) + + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1]) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) + + def test_0d_0d_condition(self): + x = np.array(3) + c = np.array(x > 3) + y = piecewise(x, [c], [1, 2]) + assert_equal(y, 2) + + def test_multidimensional_extrafunc(self): + x = np.array([[-2.5, -1.5, -0.5], + [0.5, 1.5, 2.5]]) + y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) + assert_array_equal(y, np.array([[-1., -1., -1.], + [3., 3., 1.]])) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + x = np.arange(5.).view(subclass) + r = piecewise(x, [x<2., x>=4], [-1., 1., 0.]) + assert_equal(type(r), subclass) + assert_equal(r, [-1., -1., 0., 0., 1.]) + + +class TestBincount: + + def test_simple(self): + y = np.bincount(np.arange(4)) + assert_array_equal(y, np.ones(4)) + + def test_simple2(self): + y = np.bincount(np.array([1, 5, 2, 4, 1])) + assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) + + def test_simple_weight(self): + x = np.arange(4) + w = np.array([0.2, 0.3, 0.5, 0.1]) + y = np.bincount(x, w) + assert_array_equal(y, w) + + def test_simple_weight2(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) + + def test_with_minlength(self): + x = np.array([0, 1, 0, 1, 1]) + y = np.bincount(x, minlength=3) + assert_array_equal(y, np.array([2, 3, 0])) + x = [] + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([])) + + def test_with_minlength_smaller_than_maxvalue(self): + x = np.array([0, 1, 1, 2, 2, 3, 3]) + y = np.bincount(x, minlength=2) + assert_array_equal(y, np.array([1, 2, 2, 2])) + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([1, 2, 2, 2])) + + def test_with_minlength_and_weights(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w, 8) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) + + def test_empty(self): + x = np.array([], dtype=int) + y = np.bincount(x) + assert_array_equal(x, y) + + def test_empty_with_minlength(self): + x = np.array([], dtype=int) + y = np.bincount(x, minlength=5) + assert_array_equal(y, np.zeros(5, dtype=int)) + + def test_with_incorrect_minlength(self): + x = np.array([], dtype=int) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + x = np.arange(5) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_dtype_reference_leaks(self): + # gh-6805 + intp_refcount = sys.getrefcount(np.dtype(np.intp)) + double_refcount = sys.getrefcount(np.dtype(np.double)) + + for j in range(10): + np.bincount([1, 2, 3]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + for j in range(10): + np.bincount([1, 2, 3], [4, 5, 6]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + @pytest.mark.parametrize("vals", [[[2, 2]], 2]) + def test_error_not_1d(self, vals): + # Test that values has to be 1-D (both as array and nested list) + vals_arr = np.asarray(vals) + with assert_raises(ValueError): + np.bincount(vals_arr) + with assert_raises(ValueError): + np.bincount(vals) + + +class TestInterp: + + def test_exceptions(self): + assert_raises(ValueError, interp, 0, [], []) + assert_raises(ValueError, interp, 0, [0], [1, 2]) + assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) + assert_raises(ValueError, interp, 0, [], [], period=360) + assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) + + def test_basic(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_right_left_behavior(self): + # Needs range of sizes to test different code paths. + # size ==1 is special cased, 1 < size < 5 is linear search, and + # size >= 5 goes through local search and possibly binary search. + for size in range(1, 10): + xp = np.arange(size, dtype=np.double) + yp = np.ones(size, dtype=np.double) + incpts = np.array([-1, 0, size - 1, size], dtype=np.double) + decpts = incpts[::-1] + + incres = interp(incpts, xp, yp) + decres = interp(decpts, xp, yp) + inctgt = np.array([1, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0) + decres = interp(decpts, xp, yp, left=0) + inctgt = np.array([0, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, right=2) + decres = interp(decpts, xp, yp, right=2) + inctgt = np.array([1, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0, right=2) + decres = interp(decpts, xp, yp, left=0, right=2) + inctgt = np.array([0, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + def test_scalar_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = .3 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float32(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float64(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.nan + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_non_finite_behavior_exact_x(self): + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4]) + fp = [1, 2, np.nan, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) + + @pytest.fixture(params=[ + lambda x: np.float_(x), + lambda x: _make_complex(x, 0), + lambda x: _make_complex(0, x), + lambda x: _make_complex(x, np.multiply(x, -2)) + ], ids=[ + 'real', + 'complex-real', + 'complex-imag', + 'complex-both' + ]) + def sc(self, request): + """ scale function used by the below tests """ + return request.param + + def test_non_finite_any_nan(self, sc): + """ test that nans are propagated """ + assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan)) + + def test_non_finite_inf(self, sc): + """ Test that interp between opposite infs gives nan """ + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + + # unless the y values are equal + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) + + def test_non_finite_half_inf_xf(self, sc): + """ Test that interp where both axes have a bound at inf gives nan """ + assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + + def test_non_finite_half_inf_x(self, sc): + """ Test interp where the x axis has a bound at inf """ + assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0)) + assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0)) + + def test_non_finite_half_inf_f(self, sc): + """ Test interp where the f axis has a bound at inf """ + assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf)) + + def test_complex_interp(self): + # test complex interpolation + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j + x0 = 0.3 + y0 = x0 + (1+x0)*1.0j + assert_almost_equal(np.interp(x0, x, y), y0) + # test complex left and right + x0 = -1 + left = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, left=left), left) + x0 = 2.0 + right = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, right=right), right) + # test complex non finite + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2+1j, np.inf, 4] + y = [1, 2+1j, np.inf+0.5j, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), y) + # test complex periodic + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5+1.0j, 10+2j, 3+3j, 4+4j] + y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j, + 3.5+3.5j, 3.75+3.75j] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + def test_zero_dimensional_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + + xp = np.array([0, 2, 4]) + fp = np.array([1, -1, 1]) + + actual = np.interp(np.array(1), xp, fp) + assert_equal(actual, 0) + assert_(isinstance(actual, np.float64)) + + actual = np.interp(np.array(4.5), xp, fp, period=4) + assert_equal(actual, 0.5) + assert_(isinstance(actual, np.float64)) + + def test_if_len_x_is_small(self): + xp = np.arange(0, 10, 0.0001) + fp = np.sin(xp) + assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) + + def test_period(self): + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5, 10, 3, 4] + y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + x = np.array(x, order='F').reshape(2, -1) + y = np.array(y, order='C').reshape(2, -1) + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + +def compare_results(res, desired): + for i in range(len(desired)): + assert_array_equal(res[i], desired[i]) + + +class TestPercentile: + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, 0), 0.) + assert_equal(np.percentile(x, 100), 3.5) + assert_equal(np.percentile(x, 50), 1.75) + x[1] = np.nan + assert_equal(np.percentile(x, 0), np.nan) + assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan) + + def test_fraction(self): + x = [Fraction(i, 2) for i in range(8)] + + p = np.percentile(x, Fraction(0)) + assert_equal(p, Fraction(0)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(100)) + assert_equal(p, Fraction(7, 2)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(50)) + assert_equal(p, Fraction(7, 4)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, [Fraction(50)]) + assert_equal(p, np.array([Fraction(7, 4)])) + assert_equal(type(p), np.ndarray) + + def test_api(self): + d = np.ones(5) + np.percentile(d, 5, None, None, False) + np.percentile(d, 5, None, None, False, 'linear') + o = np.ones((1,)) + np.percentile(d, 5, None, o, False, 'linear') + + def test_2D(self): + x = np.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) + + def test_linear(self): + + # Test defaults + assert_equal(np.percentile(range(10), 50), 4.5) + + # explicitly specify interpolation_method 'linear' (the default) + assert_equal(np.percentile(range(10), 50, + interpolation='linear'), 4.5) + + def test_lower_higher(self): + + # interpolation_method 'lower'/'higher' + assert_equal(np.percentile(range(10), 50, + interpolation='lower'), 4) + assert_equal(np.percentile(range(10), 50, + interpolation='higher'), 5) + + def test_midpoint(self): + assert_equal(np.percentile(range(10), 51, + interpolation='midpoint'), 4.5) + assert_equal(np.percentile(range(11), 51, + interpolation='midpoint'), 5.5) + assert_equal(np.percentile(range(11), 50, + interpolation='midpoint'), 5) + + def test_nearest(self): + assert_equal(np.percentile(range(10), 51, + interpolation='nearest'), 5) + assert_equal(np.percentile(range(10), 49, + interpolation='nearest'), 4) + + def test_sequence(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) + + def test_axis(self): + x = np.arange(12).reshape(3, 4) + + assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) + + # ensure qth axis is always first as with np.array(old_percentile(..)) + x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + assert_equal(np.percentile(x, (25, 50)).shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) + assert_equal( + np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), + interpolation="higher").shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75), + interpolation="higher").shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0, + interpolation="higher").shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1, + interpolation="higher").shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2, + interpolation="higher").shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3, + interpolation="higher").shape, (2, 3, 4, 5)) + assert_equal(np.percentile(x, (25, 50, 75), axis=1, + interpolation="higher").shape, (3, 3, 5, 6)) + + def test_scalar_q(self): + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50), 5.5) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + assert_equal(np.percentile(x, 50, axis=0), r0) + assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) + r1 = np.array([1.5, 5.5, 9.5]) + assert_almost_equal(np.percentile(x, 50, axis=1), r1) + assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) + + out = np.empty(1) + assert_equal(np.percentile(x, 50, out=out), 5.5) + assert_equal(out, 5.5) + out = np.empty(4) + assert_equal(np.percentile(x, 50, axis=0, out=out), r0) + assert_equal(out, r0) + out = np.empty(3) + assert_equal(np.percentile(x, 50, axis=1, out=out), r1) + assert_equal(out, r1) + + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + c0 = np.percentile(x, 50, interpolation='lower', axis=0) + assert_equal(c0, r0) + assert_equal(c0.shape, r0.shape) + r1 = np.array([1., 5., 9.]) + c1 = np.percentile(x, 50, interpolation='lower', axis=1) + assert_almost_equal(c1, r1) + assert_equal(c1.shape, r1.shape) + + out = np.empty((), dtype=x.dtype) + c = np.percentile(x, 50, interpolation='lower', out=out) + assert_equal(c, 5) + assert_equal(out, 5) + out = np.empty(4, dtype=x.dtype) + c = np.percentile(x, 50, interpolation='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + out = np.empty(3, dtype=x.dtype) + c = np.percentile(x, 50, interpolation='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_exception(self): + assert_raises(ValueError, np.percentile, [1, 2], 56, + interpolation='foobar') + assert_raises(ValueError, np.percentile, [1], 101) + assert_raises(ValueError, np.percentile, [1], -1) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) + + def test_percentile_list(self): + assert_equal(np.percentile([1, 2, 3], 0), 1) + + def test_percentile_out(self): + x = np.array([1, 2, 3]) + y = np.zeros((3,)) + p = (1, 2, 3) + np.percentile(x, p, out=y) + assert_equal(y, np.percentile(x, p)) + + x = np.array([[1, 2, 3], + [4, 5, 6]]) + + y = np.zeros((3, 3)) + np.percentile(x, p, axis=0, out=y) + assert_equal(y, np.percentile(x, p, axis=0)) + + y = np.zeros((3, 2)) + np.percentile(x, p, axis=1, out=y) + assert_equal(y, np.percentile(x, p, axis=1)) + + x = np.arange(12).reshape(3, 4) + # q.dim > 1, float + r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) + out = np.empty((2, 4)) + assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0) + assert_equal(out, r0) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + out = np.empty((2, 3)) + assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) + assert_equal(out, r1) + + # q.dim > 1, int + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + out = np.empty((2, 4), dtype=x.dtype) + c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) + out = np.empty((2, 3), dtype=x.dtype) + c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_percentile_empty_dim(self): + # empty dims are preserved + d = np.arange(11 * 2).reshape(11, 1, 2, 1) + assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) + + assert_array_equal(np.percentile(d, 50, axis=2, + interpolation='midpoint').shape, + (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-2, + interpolation='midpoint').shape, + (11, 1, 1)) + + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, + (2, 1, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, + (2, 11, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, + (2, 11, 1, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, + (2, 11, 1, 2)) + + def test_percentile_no_overwrite(self): + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50], overwrite_input=False) + assert_equal(a, np.array([2, 3, 4, 1])) + + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50]) + assert_equal(a, np.array([2, 3, 4, 1])) + + def test_no_p_overwrite(self): + p = np.linspace(0., 100., num=5) + np.percentile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5)) + p = np.linspace(0., 100., num=5).tolist() + np.percentile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) + + def test_percentile_overwrite(self): + a = np.array([2, 3, 4, 1]) + b = np.percentile(a, [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + + assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), + np.percentile(x, [25, 60], axis=None)) + assert_equal(np.percentile(x, [25, 60], axis=(0,)), + np.percentile(x, [25, 60], axis=0)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + np.percentile(d[:,:,:, 0].flatten(), 25)) + assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], + np.percentile(d[:,:, 1,:].flatten(), [10, 90])) + assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], + np.percentile(d[:,:, 2,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], + np.percentile(d[2,:,:,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], + np.percentile(d[2, 1,:,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], + np.percentile(d[2,:,:, 1].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], + np.percentile(d[2,:, 2,:].flatten(), 25)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25) + assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25) + assert_raises(np.AxisError, np.percentile, d, axis=4, q=25) + assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25) + # each of these refers to the same axis twice + assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), + keepdims=True).shape, (2, 1, 1, 7, 1)) + assert_equal(np.percentile(d, [1, 7], axis=(0, 3), + keepdims=True).shape, (2, 1, 5, 7, 1)) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal(np.percentile(d, 0, 0, interpolation='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal(np.percentile(d, 1, 1, interpolation='nearest', out=o), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 2, out=o), o) + assert_equal(np.percentile(d, 2, interpolation='nearest', out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal( + np.percentile(d, 0, 0, interpolation='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal( + np.percentile(d, 1, 1, interpolation='nearest', out=o), o) + o = np.zeros(()) + assert_equal(np.percentile(d, 1, out=o), o) + assert_equal( + np.percentile(d, 1, interpolation='nearest', out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3, axis=0), np.nan) + assert_equal(np.percentile(a, [0.3, 0.6], axis=0), + np.array([np.nan] * 2)) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3).ndim, 0) + + # axis0 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 0), b) + + # axis0 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], 0) + b[:, 2, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 0), b) + + # axis1 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 1), b) + # axis1 not zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) + b[:, 1, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 1), b) + + # axis02 zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.percentile(a, 0.3, (0, 2)), b) + # axis02 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2)) + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) + # axis02 not zerod with nearest interpolation + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2), interpolation='nearest') + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile( + a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) + + def test_nan_q(self): + # GH18830 + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], np.nan) + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], [np.nan]) + q = np.linspace(1.0, 99.0, 16) + q[0] = np.nan + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], q) + +class TestQuantile: + # most of this is already tested by TestPercentile + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.quantile(x, 0), 0.) + assert_equal(np.quantile(x, 1), 3.5) + assert_equal(np.quantile(x, 0.5), 1.75) + + def test_correct_quantile_value(self): + a = np.array([True]) + tf_quant = np.quantile(True, False) + assert_equal(tf_quant, a[0]) + assert_equal(type(tf_quant), a.dtype) + a = np.array([False, True, True]) + quant_res = np.quantile(a, a) + assert_array_equal(quant_res, a) + assert_equal(a.dtype, quant_res.dtype) + + def test_fraction(self): + # fractional input, integral quantile + x = [Fraction(i, 2) for i in range(8)] + + q = np.quantile(x, 0) + assert_equal(q, 0) + assert_equal(type(q), Fraction) + + q = np.quantile(x, 1) + assert_equal(q, Fraction(7, 2)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, Fraction(1, 2)) + assert_equal(q, Fraction(7, 4)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, [Fraction(1, 2)]) + assert_equal(q, np.array([Fraction(7, 4)])) + assert_equal(type(q), np.ndarray) + + q = np.quantile(x, [[Fraction(1, 2)]]) + assert_equal(q, np.array([[Fraction(7, 4)]])) + assert_equal(type(q), np.ndarray) + + # repeat with integral input but fractional quantile + x = np.arange(8) + assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.quantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.quantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) + + def test_quantile_monotonic(self): + # GH 14685 + # test that the return value of quantile is monotonic if p0 is ordered + p0 = np.arange(0, 1, 0.01) + quantile = np.quantile(np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, + 8, 8, 7]) * 0.1, p0) + assert_equal(np.sort(quantile), quantile) + + @hypothesis.given( + arr=arrays(dtype=np.float64, + shape=st.integers(min_value=3, max_value=1000), + elements=st.floats(allow_infinity=False, allow_nan=False, + min_value=-1e300, max_value=1e300))) + def test_quantile_monotonic_hypo(self, arr): + p0 = np.arange(0, 1, 0.01) + quantile = np.quantile(arr, p0) + assert_equal(np.sort(quantile), quantile) + + +class TestLerp: + @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + t1=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a = st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b = st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_lerp_monotonic(self, t0, t1, a, b): + l0 = np.lib.function_base._lerp(a, b, t0) + l1 = np.lib.function_base._lerp(a, b, t1) + if t0 == t1 or a == b: + assert l0 == l1 # uninteresting + elif (t0 < t1) == (a < b): + assert l0 <= l1 + else: + assert l0 >= l1 + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_lerp_bounded(self, t, a, b): + if a <= b: + assert a <= np.lib.function_base._lerp(a, b, t) <= b + else: + assert b <= np.lib.function_base._lerp(a, b, t) <= a + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_lerp_symmetric(self, t, a, b): + # double subtraction is needed to remove the extra precision of t < 0.5 + left = np.lib.function_base._lerp(a, b, 1 - (1 - t)) + right = np.lib.function_base._lerp(b, a, 1 - t) + assert left == right + + def test_lerp_0d_inputs(self): + a = np.array(2) + b = np.array(5) + t = np.array(0.2) + assert np.lib.function_base._lerp(a, b, t) == 2.6 + + +class TestMedian: + + def test_basic(self): + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_equal(np.median(a0), 1) + assert_allclose(np.median(a1), 0.5) + assert_allclose(np.median(a2), 2.5) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_equal(np.median(a2, axis=1), [1, 4]) + assert_allclose(np.median(a2, axis=None), 2.5) + + a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) + assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) + a = np.array([0.0463301, 0.0444502, 0.141249]) + assert_equal(a[0], np.median(a)) + a = np.array([0.0444502, 0.141249, 0.0463301]) + assert_equal(a[-1], np.median(a)) + # check array scalar result + assert_equal(np.median(a).ndim, 0) + a[1] = np.nan + assert_equal(np.median(a).ndim, 0) + + def test_axis_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: + orig = a.copy() + np.median(a, axis=None) + for ax in range(a.ndim): + np.median(a, axis=ax) + assert_array_equal(a, orig) + + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3), 3.5) + assert_allclose(np.median(a3, axis=None), 3.5) + assert_allclose(np.median(a3.T), 3.5) + + def test_overwrite_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) + assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), + [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) + assert_allclose( + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), + [3, 4]) + + a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) + np.random.shuffle(a4.ravel()) + assert_allclose(np.median(a4, axis=None), + np.median(a4.copy(), axis=None, overwrite_input=True)) + assert_allclose(np.median(a4, axis=0), + np.median(a4.copy(), axis=0, overwrite_input=True)) + assert_allclose(np.median(a4, axis=1), + np.median(a4.copy(), axis=1, overwrite_input=True)) + assert_allclose(np.median(a4, axis=2), + np.median(a4.copy(), axis=2, overwrite_input=True)) + + def test_array_like(self): + x = [1, 2, 3] + assert_almost_equal(np.median(x), 2) + x2 = [x] + assert_almost_equal(np.median(x2), 2) + assert_allclose(np.median(x2, axis=0), x) + + def test_subclass(self): + # gh-3846 + class MySubClass(np.ndarray): + + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def mean(self, axis=None, dtype=None, out=None): + return -7 + + a = MySubClass([1, 2, 3]) + assert_equal(np.median(a), -7) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a, axis=0), np.nan) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a).ndim, 0) + + # axis0 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 0), b) + + # axis1 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 1), b) + + # axis02 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.median(a, (0, 2)), b) + + def test_empty(self): + # mean(empty array) emits two warnings: empty slice and divide by 0 + a = np.array([], dtype=float) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + assert_equal(len(w), 2) + + # multiple dimensions + a = np.array([], dtype=float, ndmin=3) + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.array([], dtype=float, ndmin=2) + assert_equal(np.median(a, axis=0), b) + assert_equal(np.median(a, axis=1), b) + + # axis 2 + b = np.array(np.nan, dtype=float, ndmin=2) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.arange(7.) + assert_(type(np.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.median(o.astype(object))), float) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.median(x, axis=(0, 1)), np.median(o)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.median(x, axis=(0, -1)), np.median(o)) + + assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) + assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) + assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.median(d, axis=(0, 1, 2))[0], + np.median(d[:,:,:, 0].flatten())) + assert_equal(np.median(d, axis=(0, 1, 3))[1], + np.median(d[:,:, 1,:].flatten())) + assert_equal(np.median(d, axis=(3, 1, -4))[2], + np.median(d[:,:, 2,:].flatten())) + assert_equal(np.median(d, axis=(3, 1, 2))[2], + np.median(d[2,:,:,:].flatten())) + assert_equal(np.median(d, axis=(3, 2))[2, 1], + np.median(d[2, 1,:,:].flatten())) + assert_equal(np.median(d, axis=(1, -2))[2, 1], + np.median(d[2,:,:, 1].flatten())) + assert_equal(np.median(d, axis=(1, 3))[2, 2], + np.median(d[2,:, 2,:].flatten())) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.median, d, axis=-5) + assert_raises(np.AxisError, np.median, d, axis=(0, -5)) + assert_raises(np.AxisError, np.median, d, axis=4) + assert_raises(np.AxisError, np.median, d, axis=(0, 4)) + assert_raises(ValueError, np.median, d, axis=(1, 1)) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.median(d, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.median(d, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + +class TestAdd_newdoc_ufunc: + + def test_ufunc_arg(self): + assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") + assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah") + + def test_string_arg(self): + assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) + + +class TestAdd_newdoc: + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_add_doc(self): + # test that np.add_newdoc did attach a docstring successfully: + tgt = "Current flat index into the array." + assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt) + assert_(len(np.core.ufunc.identity.__doc__) > 300) + assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_errors_are_ignored(self): + prev_doc = np.core.flatiter.index.__doc__ + # nothing changed, but error ignored, this should probably + # give a warning (or even error) in the future. + np.add_newdoc("numpy.core", "flatiter", ("index", "bad docstring")) + assert prev_doc == np.core.flatiter.index.__doc__ + + +class TestAddDocstring(): + # Test should possibly be moved, but it also fits to be close to + # the newdoc tests... + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_add_same_docstring(self): + # test for attributes (which are C-level defined) + np.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__) + # And typical functions: + def func(): + """docstring""" + return + + np.add_docstring(func, func.__doc__) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_different_docstring_fails(self): + # test for attributes (which are C-level defined) + with assert_raises(RuntimeError): + np.add_docstring(np.ndarray.flat, "different docstring") + # And typical functions: + def func(): + """docstring""" + return + + with assert_raises(RuntimeError): + np.add_docstring(func, "different docstring") + + +class TestSortComplex: + + @pytest.mark.parametrize("type_in, type_out", [ + ('l', 'D'), + ('h', 'F'), + ('H', 'F'), + ('b', 'F'), + ('B', 'F'), + ('g', 'G'), + ]) + def test_sort_real(self, type_in, type_out): + # sort_complex() type casting for real input types + a = np.array([5, 3, 6, 2, 1], dtype=type_in) + actual = np.sort_complex(a) + expected = np.sort(a).astype(type_out) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + def test_sort_complex(self): + # sort_complex() handling of complex input + a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D') + expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D') + actual = np.sort_complex(a) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_histograms.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_histograms.py new file mode 100644 index 0000000000000000000000000000000000000000..a46f6c56b6ecc7c15370eeec0f73ee41f2861eb0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_histograms.py @@ -0,0 +1,838 @@ +import numpy as np + +from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose, + assert_array_max_ulp, assert_raises_regex, suppress_warnings, + ) +import pytest + + +class TestHistogram: + + def setup(self): + pass + + def teardown(self): + pass + + def test_simple(self): + n = 100 + v = np.random.rand(n) + (a, b) = histogram(v) + # check if the sum of the bins equals the number of samples + assert_equal(np.sum(a, axis=0), n) + # check that the bin counts are evenly spaced when the data is from + # a linear function + (a, b) = histogram(np.linspace(0, 10, 100)) + assert_array_equal(a, 10) + + def test_one_bin(self): + # Ticket 632 + hist, edges = histogram([1, 2, 3, 4], [1, 2]) + assert_array_equal(hist, [2, ]) + assert_array_equal(edges, [1, 2]) + assert_raises(ValueError, histogram, [1, 2], bins=0) + h, e = histogram([1, 2], bins=1) + assert_equal(h, np.array([2])) + assert_allclose(e, np.array([1., 2.])) + + def test_normed(self): + sup = suppress_warnings() + with sup: + rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*') + # Check that the integral of the density equals 1. + n = 100 + v = np.random.rand(n) + a, b = histogram(v, normed=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + assert_equal(len(rec), 1) + + sup = suppress_warnings() + with sup: + rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*') + # Check with non-constant bin widths (buggy but backwards + # compatible) + v = np.arange(10) + bins = [0, 1, 5, 9, 10] + a, b = histogram(v, bins, normed=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + assert_equal(len(rec), 1) + + def test_density(self): + # Check that the integral of the density equals 1. + n = 100 + v = np.random.rand(n) + a, b = histogram(v, density=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + + # Check with non-constant bin widths + v = np.arange(10) + bins = [0, 1, 3, 6, 10] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, .1) + assert_equal(np.sum(a * np.diff(b)), 1) + + # Test that passing False works too + a, b = histogram(v, bins, density=False) + assert_array_equal(a, [1, 2, 3, 4]) + + # Variable bin widths are especially useful to deal with + # infinities. + v = np.arange(10) + bins = [0, 1, 3, 6, np.inf] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, [.1, .1, .1, 0.]) + + # Taken from a bug report from N. Becker on the numpy-discussion + # mailing list Aug. 6, 2010. + counts, dmy = np.histogram( + [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) + assert_equal(counts, [.25, 0]) + + def test_outliers(self): + # Check that outliers are not tallied + a = np.arange(10) + .5 + + # Lower outliers + h, b = histogram(a, range=[0, 9]) + assert_equal(h.sum(), 9) + + # Upper outliers + h, b = histogram(a, range=[1, 10]) + assert_equal(h.sum(), 9) + + # Normalization + h, b = histogram(a, range=[1, 9], density=True) + assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15) + + # Weights + w = np.arange(10) + .5 + h, b = histogram(a, range=[1, 9], weights=w, density=True) + assert_equal((h * np.diff(b)).sum(), 1) + + h, b = histogram(a, bins=8, range=[1, 9], weights=w) + assert_equal(h, w[1:-1]) + + def test_arr_weights_mismatch(self): + a = np.arange(10) + .5 + w = np.arange(11) + .5 + with assert_raises_regex(ValueError, "same shape as"): + h, b = histogram(a, range=[1, 9], weights=w, density=True) + + + def test_type(self): + # Check the type of the returned histogram + a = np.arange(10) + .5 + h, b = histogram(a) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, density=True) + assert_(np.issubdtype(h.dtype, np.floating)) + + h, b = histogram(a, weights=np.ones(10, int)) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, weights=np.ones(10, float)) + assert_(np.issubdtype(h.dtype, np.floating)) + + def test_f32_rounding(self): + # gh-4799, check that the rounding of the edges works with float32 + x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) + y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) + counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) + assert_equal(counts_hist.sum(), 3.) + + def test_bool_conversion(self): + # gh-12107 + # Reference integer histogram + a = np.array([1, 1, 0], dtype=np.uint8) + int_hist, int_edges = np.histogram(a) + + # Should raise an warning on booleans + # Ensure that the histograms are equivalent, need to suppress + # the warnings to get the actual outputs + with suppress_warnings() as sup: + rec = sup.record(RuntimeWarning, 'Converting input from .*') + hist, edges = np.histogram([True, True, False]) + # A warning should be issued + assert_equal(len(rec), 1) + assert_array_equal(hist, int_hist) + assert_array_equal(edges, int_edges) + + def test_weights(self): + v = np.random.rand(100) + w = np.ones(100) * 5 + a, b = histogram(v) + na, nb = histogram(v, density=True) + wa, wb = histogram(v, weights=w) + nwa, nwb = histogram(v, weights=w, density=True) + assert_array_almost_equal(a * 5, wa) + assert_array_almost_equal(na, nwa) + + # Check weights are properly applied. + v = np.linspace(0, 10, 10) + w = np.concatenate((np.zeros(5), np.ones(5))) + wa, wb = histogram(v, bins=np.arange(11), weights=w) + assert_array_almost_equal(wa, w) + + # Check with integer weights + wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) + assert_array_equal(wa, [4, 5, 0, 1]) + wa, wb = histogram( + [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True) + assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) + + # Check weights with non-uniform bin widths + a, b = histogram( + np.arange(9), [0, 1, 3, 6, 10], + weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) + assert_almost_equal(a, [.2, .1, .1, .075]) + + def test_exotic_weights(self): + + # Test the use of weights that are not integer or floats, but e.g. + # complex numbers or object types. + + # Complex weights + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Decimal weights + from decimal import Decimal + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + def test_no_side_effects(self): + # This is a regression test that ensures that values passed to + # ``histogram`` are unchanged. + values = np.array([1.3, 2.5, 2.3]) + np.histogram(values, range=[-10, 10], bins=100) + assert_array_almost_equal(values, [1.3, 2.5, 2.3]) + + def test_empty(self): + a, b = histogram([], bins=([0, 1])) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_error_binnum_type (self): + # Tests if right Error is raised if bins argument is float + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, 5) + assert_raises(TypeError, histogram, vals, 2.4) + + def test_finite_range(self): + # Normal ranges should be fine + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, range=[0.25,0.75]) + assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) + assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) + + def test_invalid_range(self): + # start of range must be < end of range + vals = np.linspace(0.0, 1.0, num=100) + with assert_raises_regex(ValueError, "max must be larger than"): + np.histogram(vals, range=[0.1, 0.01]) + + def test_bin_edge_cases(self): + # Ensure that floating-point computations correctly place edge cases. + arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) + hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) + mask = hist > 0 + left_edges = edges[:-1][mask] + right_edges = edges[1:][mask] + for x, left, right in zip(arr, left_edges, right_edges): + assert_(x >= left) + assert_(x < right) + + def test_last_bin_inclusive_range(self): + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) + assert_equal(hist[-1], 1) + + def test_bin_array_dims(self): + # gracefully handle bins object > 1 dimension + vals = np.linspace(0.0, 1.0, num=100) + bins = np.array([[0, 0.5], [0.6, 1.0]]) + with assert_raises_regex(ValueError, "must be 1d"): + np.histogram(vals, bins=bins) + + def test_unsigned_monotonicity_check(self): + # Ensures ValueError is raised if bins not increasing monotonically + # when bins contain unsigned values (see #9222) + arr = np.array([2]) + bins = np.array([1, 3, 1], dtype='uint64') + with assert_raises(ValueError): + hist, edges = np.histogram(arr, bins=bins) + + def test_object_array_of_0d(self): + # gh-7864 + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [-np.inf]) + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [np.inf]) + + # these should not crash + np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001]) + np.histogram([np.array(0.5) for i in range(10)] + [.5]) + + def test_some_nan_values(self): + # gh-7503 + one_nan = np.array([0, 1, np.nan]) + all_nan = np.array([np.nan, np.nan]) + + # the internal comparisons with NaN give warnings + sup = suppress_warnings() + sup.filter(RuntimeWarning) + with sup: + # can't infer range with nan + assert_raises(ValueError, histogram, one_nan, bins='auto') + assert_raises(ValueError, histogram, all_nan, bins='auto') + + # explicit range solves the problem + h, b = histogram(one_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 0) # nan is not counted + + # as does an explicit set of bins + h, b = histogram(one_nan, bins=[0, 1]) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins=[0, 1]) + assert_equal(h.sum(), 0) # nan is not counted + + def test_datetime(self): + begin = np.datetime64('2000-01-01', 'D') + offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20]) + bins = np.array([0, 2, 7, 20]) + dates = begin + offsets + date_bins = begin + bins + + td = np.dtype('timedelta64[D]') + + # Results should be the same for integer offsets or datetime values. + # For now, only explicit bins are supported, since linspace does not + # work on datetimes or timedeltas + d_count, d_edge = histogram(dates, bins=date_bins) + t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td)) + i_count, i_edge = histogram(offsets, bins=bins) + + assert_equal(d_count, i_count) + assert_equal(t_count, i_count) + + assert_equal((d_edge - begin).astype(int), i_edge) + assert_equal(t_edge.astype(int), i_edge) + + assert_equal(d_edge.dtype, dates.dtype) + assert_equal(t_edge.dtype, td) + + def do_signed_overflow_bounds(self, dtype): + exponent = 8 * np.dtype(dtype).itemsize - 1 + arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) + hist, e = histogram(arr, bins=2) + assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) + assert_equal(hist, [1, 1]) + + def test_signed_overflow_bounds(self): + self.do_signed_overflow_bounds(np.byte) + self.do_signed_overflow_bounds(np.short) + self.do_signed_overflow_bounds(np.intc) + self.do_signed_overflow_bounds(np.int_) + self.do_signed_overflow_bounds(np.longlong) + + def do_precision_lower_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([1.0 + eps, 2.0], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[0] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [1]) + + # gh-10322 means that the type comes from arr - this may change + assert_equal(x_loc.dtype, float_small) + + def do_precision_upper_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([0.0, 1.0 - eps], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[-1] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [1]) + + # gh-10322 means that the type comes from arr - this may change + assert_equal(x_loc.dtype, float_small) + + def do_precision(self, float_small, float_large): + self.do_precision_lower_bound(float_small, float_large) + self.do_precision_upper_bound(float_small, float_large) + + def test_precision(self): + # not looping results in a useful stack trace upon failure + self.do_precision(np.half, np.single) + self.do_precision(np.half, np.double) + self.do_precision(np.half, np.longdouble) + self.do_precision(np.single, np.double) + self.do_precision(np.single, np.longdouble) + self.do_precision(np.double, np.longdouble) + + def test_histogram_bin_edges(self): + hist, e = histogram([1, 2, 3, 4], [1, 2]) + edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) + assert_array_equal(edges, e) + + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, e = histogram(arr, bins=30, range=(-0.5, 5)) + edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5)) + assert_array_equal(edges, e) + + hist, e = histogram(arr, bins='auto', range=(0, 1)) + edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) + assert_array_equal(edges, e) + + +class TestHistogramOptimBinNums: + """ + Provide test coverage when using provided estimators for optimal number of + bins + """ + + def test_empty(self): + estimator_list = ['fd', 'scott', 'rice', 'sturges', + 'doane', 'sqrt', 'auto', 'stone'] + # check it can deal with empty data + for estimator in estimator_list: + a, b = histogram([], bins=estimator) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_simple(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). All test values have been precomputed and the values + shouldn't change + """ + # Some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, + 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, + 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, + 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}} + + for testlen, expectedResults in basic_test.items(): + # Create some sort of non uniform data to test with + # (2 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x = np.concatenate((x1, x2)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator) + assert_equal(len(a), numbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) + + def test_small(self): + """ + Smaller datasets have the potential to cause issues with the data + adaptive methods, especially the FD method. All bin numbers have been + precalculated. + """ + small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'stone': 1}, + 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, + 'doane': 1, 'sqrt': 2, 'stone': 1}, + 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, + 'doane': 3, 'sqrt': 2, 'stone': 1}} + + for testlen, expectedResults in small_dat.items(): + testdat = np.arange(testlen) + for estimator, expbins in expectedResults.items(): + a, b = np.histogram(testdat, estimator) + assert_equal(len(a), expbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) + + def test_incorrect_methods(self): + """ + Check a Value Error is thrown when an unknown string is passed in + """ + check_list = ['mad', 'freeman', 'histograms', 'IQR'] + for estimator in check_list: + assert_raises(ValueError, histogram, [1, 2, 3], estimator) + + def test_novariance(self): + """ + Check that methods handle no variance in data + Primarily for Scott and FD as the SD and IQR are both 0 in this case + """ + novar_dataset = np.ones(100) + novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1} + + for estimator, numbins in novar_resultdict.items(): + a, b = np.histogram(novar_dataset, estimator) + assert_equal(len(a), numbins, err_msg="{0} estimator, " + "No Variance test".format(estimator)) + + def test_limited_variance(self): + """ + Check when IQR is 0, but variance exists, we return the sturges value + and not the fd value. + """ + lim_var_data = np.ones(1000) + lim_var_data[:3] = 0 + lim_var_data[-4:] = 100 + + edges_auto = histogram_bin_edges(lim_var_data, 'auto') + assert_equal(edges_auto, np.linspace(0, 100, 12)) + + edges_fd = histogram_bin_edges(lim_var_data, 'fd') + assert_equal(edges_fd, np.array([0, 100])) + + edges_sturges = histogram_bin_edges(lim_var_data, 'sturges') + assert_equal(edges_sturges, np.linspace(0, 100, 12)) + + def test_outlier(self): + """ + Check the FD, Scott and Doane with outliers. + + The FD estimates a smaller binwidth since it's less affected by + outliers. Since the range is so (artificially) large, this means more + bins, most of which will be empty, but the data of interest usually is + unaffected. The Scott estimator is more affected and returns fewer bins, + despite most of the variance being in one area of the data. The Doane + estimator lies somewhere between the other two. + """ + xcenter = np.linspace(-10, 10, 50) + outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) + + outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6} + + for estimator, numbins in outlier_resultdict.items(): + a, b = np.histogram(outlier_dataset, estimator) + assert_equal(len(a), numbins) + + def test_scott_vs_stone(self): + """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + + def nbins_ratio(seed, size): + rng = np.random.RandomState(seed) + x = rng.normal(loc=0, scale=2, size=size) + a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) + return a / (a + b) + + ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] + for seed in range(10)] + + # the average difference between the two methods decreases as the dataset size increases. + avg = abs(np.mean(ll, axis=0) - 0.5) + assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) + + def test_simple_range(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). Adding in a 3rd mixture that will then be + completely ignored. All test values have been precomputed and + the shouldn't change. + """ + # some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = { + 50: {'fd': 8, 'scott': 8, 'rice': 15, + 'sturges': 14, 'auto': 14, 'stone': 8}, + 500: {'fd': 15, 'scott': 16, 'rice': 32, + 'sturges': 20, 'auto': 20, 'stone': 80}, + 5000: {'fd': 33, 'scott': 33, 'rice': 69, + 'sturges': 27, 'auto': 33, 'stone': 80} + } + + for testlen, expectedResults in basic_test.items(): + # create some sort of non uniform data to test with + # (3 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x3 = np.linspace(-100, -50, testlen) + x = np.hstack((x1, x2, x3)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator, range = (-20, 20)) + msg = "For the {0} estimator".format(estimator) + msg += " with datasize of {0}".format(testlen) + assert_equal(len(a), numbins, err_msg=msg) + + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_signed_integer_data(self, bins): + # Regression test for gh-14379. + a = np.array([-2, 0, 127], dtype=np.int8) + hist, edges = np.histogram(a, bins=bins) + hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins) + assert_array_equal(hist, hist32) + assert_array_equal(edges, edges32) + + def test_simple_weighted(self): + """ + Check that weighted data raises a TypeError + """ + estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + for estimator in estimator_list: + assert_raises(TypeError, histogram, [1, 2, 3], + estimator, weights=[1, 2, 3]) + + +class TestHistogramdd: + + def test_simple(self): + x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], + [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) + H, edges = histogramdd(x, (2, 3, 3), + range=[[-1, 1], [0, 3], [0, 3]]) + answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], + [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) + assert_array_equal(H, answer) + + # Check normalization + ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] + H, edges = histogramdd(x, bins=ed, density=True) + assert_(np.all(H == answer / 12.)) + + # Check that H has the correct shape. + H, edges = histogramdd(x, (2, 3, 4), + range=[[-1, 1], [0, 3], [0, 4]], + density=True) + answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], + [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) + assert_array_almost_equal(H, answer / 6., 4) + # Check that a sequence of arrays is accepted and H has the correct + # shape. + z = [np.squeeze(y) for y in np.split(x, 3, axis=1)] + H, edges = histogramdd( + z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) + answer = np.array([[[0, 0], [0, 0], [0, 0]], + [[0, 1], [0, 0], [1, 0]], + [[0, 1], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0]]]) + assert_array_equal(H, answer) + + Z = np.zeros((5, 5, 5)) + Z[list(range(5)), list(range(5)), list(range(5))] = 1. + H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) + assert_array_equal(H, Z) + + def test_shape_3d(self): + # All possible permutations for bins of different lengths in 3D. + bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), + (4, 5, 6)) + r = np.random.rand(10, 3) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_shape_4d(self): + # All possible permutations for bins of different lengths in 4D. + bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), + (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), + (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), + (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), + (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), + (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) + + r = np.random.rand(10, 4) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_weights(self): + v = np.random.rand(100, 2) + hist, edges = histogramdd(v) + n_hist, edges = histogramdd(v, density=True) + w_hist, edges = histogramdd(v, weights=np.ones(100)) + assert_array_equal(w_hist, hist) + w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True) + assert_array_equal(w_hist, n_hist) + w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) + assert_array_equal(w_hist, 2 * hist) + + def test_identical_samples(self): + x = np.zeros((10, 2), int) + hist, edges = histogramdd(x, bins=2) + assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) + + def test_empty(self): + a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, np.array([[0.]])) + a, b = np.histogramdd([[], [], []], bins=2) + assert_array_max_ulp(a, np.zeros((2, 2, 2))) + + def test_bins_errors(self): + # There are two ways to specify bins. Check for the right errors + # when mixing those. + x = np.arange(8).reshape(2, 4) + assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) + assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) + assert_raises( + ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) + assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) + + def test_inf_edges(self): + # Test using +/-inf bin edges works. See #1788. + with np.errstate(invalid='ignore'): + x = np.arange(6).reshape(3, 2) + expected = np.array([[1, 0], [0, 1], [0, 1]]) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) + assert_allclose(h, expected) + + def test_rightmost_binedge(self): + # Test event very close to rightmost binedge. See Github issue #4266 + x = [0.9999999995] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0000000001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + x = [1.0001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + + def test_finite_range(self): + vals = np.random.random((100, 3)) + histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) + + def test_equal_edges(self): + """ Test that adjacent entries in an edge array can be equal """ + x = np.array([0, 1, 2]) + y = np.array([0, 1, 2]) + x_edges = np.array([0, 2, 2]) + y_edges = 1 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + hist_expected = np.array([ + [2.], + [1.], # x == 2 falls in the final bin + ]) + assert_equal(hist, hist_expected) + + def test_edge_dtype(self): + """ Test that if an edge array is input, its type is preserved """ + x = np.array([0, 10, 20]) + y = x / 10 + x_edges = np.array([0, 5, 15, 20]) + y_edges = x_edges / 10 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(edges[0].dtype, x_edges.dtype) + assert_equal(edges[1].dtype, y_edges.dtype) + + def test_large_integers(self): + big = 2**60 # Too large to represent with a full precision float + + x = np.array([0], np.int64) + x_edges = np.array([-1, +1], np.int64) + y = big + x + y_edges = big + x_edges + + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(hist[0, 0], 1) + + def test_density_non_uniform_2d(self): + # Defines the following grid: + # + # 0 2 8 + # 0+-+-----+ + # + | + + # + | + + # 6+-+-----+ + # 8+-+-----+ + x_edges = np.array([0, 2, 8]) + y_edges = np.array([0, 6, 8]) + relative_areas = np.array([ + [3, 9], + [1, 3]]) + + # ensure the number of points in each region is proportional to its area + x = np.array([1] + [1]*3 + [7]*3 + [7]*9) + y = np.array([7] + [1]*3 + [7]*3 + [1]*9) + + # sanity check that the above worked as intended + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges)) + assert_equal(hist, relative_areas) + + # resulting histogram should be uniform, since counts and areas are proportional + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True) + assert_equal(hist, 1 / (8*8)) + + def test_density_non_uniform_1d(self): + # compare to histogram to show the results are the same + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + hist, edges = histogram(v, bins, density=True) + hist_dd, edges_dd = histogramdd((v,), (bins,), density=True) + assert_equal(hist, hist_dd) + assert_equal(edges, edges_dd[0]) + + def test_density_via_normed(self): + # normed should simply alias to density argument + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + hist, edges = histogram(v, bins, density=True) + hist_dd, edges_dd = histogramdd((v,), (bins,), normed=True) + assert_equal(hist, hist_dd) + assert_equal(edges, edges_dd[0]) + + def test_density_normed_redundancy(self): + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + with assert_raises_regex(TypeError, "Cannot specify both"): + hist_dd, edges_dd = histogramdd((v,), (bins,), + density=True, + normed=True) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_index_tricks.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_index_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..9070d91a9f22b35667f6c24bf73bd8dafb856275 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_index_tricks.py @@ -0,0 +1,518 @@ +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_raises_regex, + assert_warns + ) +from numpy.lib.index_tricks import ( + mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, + index_exp, ndindex, r_, s_, ix_ + ) + + +class TestRavelUnravelIndex: + def test_basic(self): + assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) + + # test that new shape argument works properly + assert_equal(np.unravel_index(indices=2, + shape=(2, 2)), + (1, 0)) + + # test that an invalid second keyword argument + # is properly handled, including the old name `dims`. + with assert_raises(TypeError): + np.unravel_index(indices=2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(254, ims=(17, 94)) + + with assert_raises(TypeError): + np.unravel_index(254, dims=(17, 94)) + + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) + assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) + assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) + assert_raises(ValueError, np.unravel_index, -1, (2, 2)) + assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) + assert_raises(ValueError, np.unravel_index, 4, (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) + assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) + + assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal( + np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) + + arr = np.array([[3, 6, 6], [4, 5, 1]]) + assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) + assert_equal( + np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) + assert_equal( + np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) + assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), + [12, 13, 13]) + assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) + + assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), + [[3, 6, 6], [4, 5, 1]]) + assert_equal( + np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), + [[3, 6, 6], [4, 5, 1]]) + assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) + + def test_empty_indices(self): + msg1 = 'indices must be integral: the provided empty sequence was' + msg2 = 'only int indices permitted' + assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5)) + assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5)) + assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]), + (10, 3, 5)) + assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)), + [[], [], []]) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []), + (10, 3)) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']), + (10, 3)) + assert_raises_regex(TypeError, msg2, np.ravel_multi_index, + (np.array([]), np.array([])), (5, 3)) + assert_equal(np.ravel_multi_index( + (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), []) + assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int), + (5, 3)), []) + + def test_big_indices(self): + # ravel_multi_index for big indices (issue #7546) + if np.intp == np.int64: + arr = ([1, 29], [3, 5], [3, 117], [19, 2], + [2379, 1284], [2, 2], [0, 1]) + assert_equal( + np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), + [5627771580, 117259570957]) + + # test unravel_index for big indices (issue #9538) + assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1)) + + # test overflow checking for too big array (issue #7546) + dummy_arr = ([0],[0]) + half_max = np.iinfo(np.intp).max // 2 + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max+1, 2)) + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F') + + def test_dtypes(self): + # Test with different data types + for dtype in [np.int16, np.uint16, np.int32, + np.uint32, np.int64, np.uint64]: + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) + shape = (5, 8) + uncoords = 8*coords[0]+coords[1] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*coords[1] + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + dtype=dtype) + shape = (5, 8, 10) + uncoords = 10*(8*coords[0]+coords[1])+coords[2] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*(coords[1]+8*coords[2]) + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + def test_clipmodes(self): + # Test clipmodes + assert_equal( + np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), + np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), + mode=( + 'wrap', 'raise', 'clip', 'raise')), + np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) + assert_raises( + ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) + + def test_writeability(self): + # See gh-7269 + x, y = np.unravel_index([1, 2, 3], (4, 5)) + assert_(x.flags.writeable) + assert_(y.flags.writeable) + + def test_0d(self): + # gh-580 + x = np.unravel_index(0, ()) + assert_equal(x, ()) + + assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) + assert_raises_regex( + ValueError, "out of bounds", np.unravel_index, [1], ()) + + @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"]) + def test_empty_array_ravel(self, mode): + res = np.ravel_multi_index( + np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) + assert(res.shape == (0,)) + + with assert_raises(ValueError): + np.ravel_multi_index( + np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode) + + def test_empty_array_unravel(self): + res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) + # res is a tuple of three empty arrays + assert(len(res) == 3) + assert(all(a.shape == (0,) for a in res)) + + with assert_raises(ValueError): + np.unravel_index([1], (2, 1, 0)) + +class TestGrid: + def test_basic(self): + a = mgrid[-1:1:10j] + b = mgrid[-1:1:0.1] + assert_(a.shape == (10,)) + assert_(b.shape == (20,)) + assert_(a[0] == -1) + assert_almost_equal(a[-1], 1) + assert_(b[0] == -1) + assert_almost_equal(b[1]-b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0]+19*0.1, 11) + assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) + + def test_linspace_equivalence(self): + y, st = np.linspace(2, 10, retstep=True) + assert_almost_equal(st, 8/49.0) + assert_array_almost_equal(y, mgrid[2:10:50j], 13) + + def test_nd(self): + c = mgrid[-1:1:10j, -2:2:10j] + d = mgrid[-1:1:0.1, -2:2:0.2] + assert_(c.shape == (2, 10, 10)) + assert_(d.shape == (2, 20, 20)) + assert_array_equal(c[0][0, :], -np.ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) + assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) + assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], + 0.1*np.ones(20, 'd'), 11) + assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], + 0.2*np.ones(20, 'd'), 11) + + def test_sparse(self): + grid_full = mgrid[-1:1:10j, -2:2:10j] + grid_sparse = ogrid[-1:1:10j, -2:2:10j] + + # sparse grids can be made dense by broadcasting + grid_broadcast = np.broadcast_arrays(*grid_sparse) + for f, b in zip(grid_full, grid_broadcast): + assert_equal(f, b) + + @pytest.mark.parametrize("start, stop, step, expected", [ + (None, 10, 10j, (200, 10)), + (-10, 20, None, (1800, 30)), + ]) + def test_mgrid_size_none_handling(self, start, stop, step, expected): + # regression test None value handling for + # start and step values used by mgrid; + # internally, this aims to cover previously + # unexplored code paths in nd_grid() + grid = mgrid[start:stop:step, start:stop:step] + # need a smaller grid to explore one of the + # untested code paths + grid_small = mgrid[start:stop:step] + assert_equal(grid.size, expected[0]) + assert_equal(grid_small.size, expected[1]) + + def test_accepts_npfloating(self): + # regression test for #16466 + grid64 = mgrid[0.1:0.33:0.1, ] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ] + assert_(grid32.dtype == np.float64) + assert_array_almost_equal(grid64, grid32) + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)] + assert_(grid32.dtype == np.float64) + assert_array_almost_equal(grid64, grid32) + + def test_accepts_npcomplexfloating(self): + # Related to #16466 + assert_array_almost_equal( + mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ] + ) + + # different code path for single slice + assert_array_almost_equal( + mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)] + ) + +class TestConcatenator: + def test_1d(self): + assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) + b = np.ones(5) + c = r_[b, 0, 0, b] + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + + def test_mixed_type(self): + g = r_[10.1, 1:10] + assert_(g.dtype == 'f8') + + def test_more_mixed_type(self): + g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] + assert_(g.dtype == 'f8') + + def test_complex_step(self): + # Regression test for #12262 + g = r_[0:36:100j] + assert_(g.shape == (100,)) + + # Related to #16466 + g = r_[0:36:np.complex64(100j)] + assert_(g.shape == (100,)) + + def test_2d(self): + b = np.random.rand(5, 5) + c = np.random.rand(5, 5) + d = r_['1', b, c] # append columns + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b) + assert_array_equal(d[:, 5:], c) + d = r_[b, c] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b) + assert_array_equal(d[5:, :], c) + + def test_0d(self): + assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) + assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) + assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3]) + + +class TestNdenumerate: + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(list(ndenumerate(a)), + [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) + + +class TestIndexExpression: + def test_regression_1(self): + # ticket #1196 + a = np.arange(2) + assert_equal(a[:-1], a[s_[:-1]]) + assert_equal(a[:-1], a[index_exp[:-1]]) + + def test_simple_1(self): + a = np.random.rand(4, 5, 6) + + assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) + assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) + + +class TestIx_: + def test_regression_1(self): + # Test empty untyped inputs create outputs of indexing type, gh-5804 + a, = np.ix_(range(0)) + assert_equal(a.dtype, np.intp) + + a, = np.ix_([]) + assert_equal(a.dtype, np.intp) + + # but if the type is specified, don't change it + a, = np.ix_(np.array([], dtype=np.float32)) + assert_equal(a.dtype, np.float32) + + def test_shape_and_dtype(self): + sizes = (4, 5, 3, 2) + # Test both lists and arrays + for func in (range, np.arange): + arrays = np.ix_(*[func(sz) for sz in sizes]) + for k, (a, sz) in enumerate(zip(arrays, sizes)): + assert_equal(a.shape[k], sz) + assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) + assert_(np.issubdtype(a.dtype, np.integer)) + + def test_bool(self): + bool_a = [True, False, True, True] + int_a, = np.nonzero(bool_a) + assert_equal(np.ix_(bool_a)[0], int_a) + + def test_1d_only(self): + idx2d = [[1, 2, 3], [4, 5, 6]] + assert_raises(ValueError, np.ix_, idx2d) + + def test_repeated_input(self): + length_of_vector = 5 + x = np.arange(length_of_vector) + out = ix_(x, x) + assert_equal(out[0].shape, (length_of_vector, 1)) + assert_equal(out[1].shape, (1, length_of_vector)) + # check that input shape is not modified + assert_equal(x.shape, (length_of_vector,)) + + +def test_c_(): + a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] + assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) + + +class TestFillDiagonal: + def test_basic(self): + a = np.zeros((3, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + ) + + def test_tall_matrix(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + ) + + def test_tall_matrix_wrap(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5, True) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0]]) + ) + + def test_wide_matrix(self): + a = np.zeros((3, 10), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]) + ) + + def test_operate_4d_array(self): + a = np.zeros((3, 3, 3, 3), int) + fill_diagonal(a, 4) + i = np.array([0, 1, 2]) + assert_equal(np.where(a != 0), (i, i, i, i)) + + def test_low_dim_handling(self): + # raise error with low dimensionality + a = np.zeros(3, int) + with assert_raises_regex(ValueError, "at least 2-d"): + fill_diagonal(a, 5) + + def test_hetero_shape_handling(self): + # raise error with high dimensionality and + # shape mismatch + a = np.zeros((3,3,7,3), int) + with assert_raises_regex(ValueError, "equal length"): + fill_diagonal(a, 2) + + +def test_diag_indices(): + di = diag_indices(4) + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + a[di] = 100 + assert_array_equal( + a, np.array([[100, 2, 3, 4], + [5, 100, 7, 8], + [9, 10, 100, 12], + [13, 14, 15, 100]]) + ) + + # Now, we create indices to manipulate a 3-d array: + d3 = diag_indices(2, 3) + + # And use it to set the diagonal of a zeros array to 1: + a = np.zeros((2, 2, 2), int) + a[d3] = 1 + assert_array_equal( + a, np.array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + ) + + +class TestDiagIndicesFrom: + + def test_diag_indices_from(self): + x = np.random.random((4, 4)) + r, c = diag_indices_from(x) + assert_array_equal(r, np.arange(4)) + assert_array_equal(c, np.arange(4)) + + def test_error_small_input(self): + x = np.ones(7) + with assert_raises_regex(ValueError, "at least 2-d"): + diag_indices_from(x) + + def test_error_shape_mismatch(self): + x = np.zeros((3, 3, 2, 3), int) + with assert_raises_regex(ValueError, "equal length"): + diag_indices_from(x) + + +def test_ndindex(): + x = list(ndindex(1, 2, 3)) + expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] + assert_array_equal(x, expected) + + x = list(ndindex((1, 2, 3))) + assert_array_equal(x, expected) + + # Test use of scalars and tuples + x = list(ndindex((3,))) + assert_array_equal(x, list(ndindex(3))) + + # Make sure size argument is optional + x = list(ndindex()) + assert_equal(x, [()]) + + x = list(ndindex(())) + assert_equal(x, [()]) + + # Make sure 0-sized ndindex works correctly + x = list(ndindex(*[0])) + assert_equal(x, []) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_io.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_io.py new file mode 100644 index 0000000000000000000000000000000000000000..ebbcd9c7146077cab81667ea97b2f9a8599dc5df --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_io.py @@ -0,0 +1,2660 @@ +import sys +import gc +import gzip +import os +import threading +import time +import warnings +import io +import re +import pytest +from pathlib import Path +from tempfile import NamedTemporaryFile +from io import BytesIO, StringIO +from datetime import datetime +import locale +from multiprocessing import Process, Value +from ctypes import c_bool + +import numpy as np +import numpy.ma as ma +from numpy.lib._iotools import ConverterError, ConversionWarning +from numpy.compat import asbytes +from numpy.ma.testutils import assert_equal +from numpy.testing import ( + assert_warns, assert_, assert_raises_regex, assert_raises, + assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY, + HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings, + break_cycles + ) +from numpy.testing._private.utils import requires_memory + + +class TextIO(BytesIO): + """Helper IO class. + + Writes encode strings to bytes if needed, reads return bytes. + This makes it easier to emulate files opened in binary mode + without needing to explicitly convert strings to bytes in + setting up the test data. + + """ + def __init__(self, s=""): + BytesIO.__init__(self, asbytes(s)) + + def write(self, s): + BytesIO.write(self, asbytes(s)) + + def writelines(self, lines): + BytesIO.writelines(self, [asbytes(s) for s in lines]) + + +IS_64BIT = sys.maxsize > 2**32 +try: + import bz2 + HAS_BZ2 = True +except ImportError: + HAS_BZ2 = False +try: + import lzma + HAS_LZMA = True +except ImportError: + HAS_LZMA = False + + +def strptime(s, fmt=None): + """ + This function is available in the datetime module only from Python >= + 2.5. + + """ + if type(s) == bytes: + s = s.decode("latin1") + return datetime(*time.strptime(s, fmt)[:3]) + + +class RoundtripTest: + def roundtrip(self, save_func, *args, **kwargs): + """ + save_func : callable + Function used to save arrays to file. + file_on_disk : bool + If true, store the file on disk, instead of in a + string buffer. + save_kwds : dict + Parameters passed to `save_func`. + load_kwds : dict + Parameters passed to `numpy.load`. + args : tuple of arrays + Arrays stored to file. + + """ + save_kwds = kwargs.get('save_kwds', {}) + load_kwds = kwargs.get('load_kwds', {"allow_pickle": True}) + file_on_disk = kwargs.get('file_on_disk', False) + + if file_on_disk: + target_file = NamedTemporaryFile(delete=False) + load_file = target_file.name + else: + target_file = BytesIO() + load_file = target_file + + try: + arr = args + + save_func(target_file, *arr, **save_kwds) + target_file.flush() + target_file.seek(0) + + if sys.platform == 'win32' and not isinstance(target_file, BytesIO): + target_file.close() + + arr_reloaded = np.load(load_file, **load_kwds) + + self.arr = arr + self.arr_reloaded = arr_reloaded + finally: + if not isinstance(target_file, BytesIO): + target_file.close() + # holds an open file descriptor so it can't be deleted on win + if 'arr_reloaded' in locals(): + if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): + os.remove(target_file.name) + + def check_roundtrips(self, a): + self.roundtrip(a) + self.roundtrip(a, file_on_disk=True) + self.roundtrip(np.asfortranarray(a)) + self.roundtrip(np.asfortranarray(a), file_on_disk=True) + if a.shape[0] > 1: + # neither C nor Fortran contiguous for 2D arrays or more + self.roundtrip(np.asfortranarray(a)[1:]) + self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) + + def test_array(self): + a = np.array([], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], int) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) + self.check_roundtrips(a) + + def test_array_object(self): + a = np.array([], object) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], object) + self.check_roundtrips(a) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + self.roundtrip(a) + + @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32") + def test_mmap(self): + a = np.array([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + a = np.asfortranarray([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + self.check_roundtrips(a) + + @pytest.mark.slow + def test_format_2_0(self): + dt = [(("%d" % i) * 100, float) for i in range(500)] + a = np.ones(1000, dtype=dt) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', UserWarning) + self.check_roundtrips(a) + + +class TestSaveLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(self.arr[0], self.arr_reloaded) + assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) + assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + + +class TestSavezLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + try: + for n, arr in enumerate(self.arr): + reloaded = self.arr_reloaded['arr_%d' % n] + assert_equal(arr, reloaded) + assert_equal(arr.dtype, reloaded.dtype) + assert_equal(arr.flags.fnc, reloaded.flags.fnc) + finally: + # delete tempfile, must be done here on windows + if self.arr_reloaded.fid: + self.arr_reloaded.fid.close() + os.remove(self.arr_reloaded.fid.name) + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.slow + def test_big_arrays(self): + L = (1 << 31) + 100000 + a = np.empty(L, dtype=np.uint8) + with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: + np.savez(tmp, a=a) + del a + npfile = np.load(tmp) + a = npfile['a'] # Should succeed + npfile.close() + del a # Avoid pyflakes unused variable warning. + + def test_multiple_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + self.roundtrip(a, b) + + def test_named_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(a, l['file_a']) + assert_equal(b, l['file_b']) + + def test_BagObj(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(sorted(dir(l.f)), ['file_a','file_b']) + assert_equal(a, l.f.file_a) + assert_equal(b, l.f.file_b) + + def test_savez_filename_clashes(self): + # Test that issue #852 is fixed + # and savez functions in multithreaded environment + + def writer(error_list): + with temppath(suffix='.npz') as tmp: + arr = np.random.randn(500, 500) + try: + np.savez(tmp, arr=arr) + except OSError as err: + error_list.append(err) + + errors = [] + threads = [threading.Thread(target=writer, args=(errors,)) + for j in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + if errors: + raise AssertionError(errors) + + def test_not_closing_opened_fid(self): + # Test that issue #2178 is fixed: + # verify could seek on 'loaded' file + with temppath(suffix='.npz') as tmp: + with open(tmp, 'wb') as fp: + np.savez(fp, data='LOVELY LOAD') + with open(tmp, 'rb', 10000) as fp: + fp.seek(0) + assert_(not fp.closed) + np.load(fp)['data'] + # fp must not get closed by .load + assert_(not fp.closed) + fp.seek(0) + assert_(not fp.closed) + + @pytest.mark.slow_pypy + def test_closing_fid(self): + # Test that issue #1517 (too many opened files) remains closed + # It might be a "weak" test since failed to get triggered on + # e.g. Debian sid of 2012 Jul 05 but was reported to + # trigger the failure on Ubuntu 10.04: + # http://projects.scipy.org/numpy/ticket/1517#comment:2 + with temppath(suffix='.npz') as tmp: + np.savez(tmp, data='LOVELY LOAD') + # We need to check if the garbage collector can properly close + # numpy npz file returned by np.load when their reference count + # goes to zero. Python 3 running in debug mode raises a + # ResourceWarning when file closing is left to the garbage + # collector, so we catch the warnings. + with suppress_warnings() as sup: + sup.filter(ResourceWarning) # TODO: specify exact message + for i in range(1, 1025): + try: + np.load(tmp)["data"] + except Exception as e: + msg = "Failed to load data from a file: %s" % e + raise AssertionError(msg) + finally: + if IS_PYPY: + gc.collect() + + def test_closing_zipfile_after_load(self): + # Check that zipfile owns file and can close it. This needs to + # pass a file name to load for the test. On windows failure will + # cause a second error will be raised when the attempt to remove + # the open file is made. + prefix = 'numpy_test_closing_zipfile_after_load_' + with temppath(suffix='.npz', prefix=prefix) as tmp: + np.savez(tmp, lab='place holder') + data = np.load(tmp) + fp = data.zip.fp + data.close() + assert_(fp.closed) + + +class TestSaveTxt: + def test_array(self): + a = np.array([[1, 2], [3, 4]], float) + fmt = "%.18e" + c = BytesIO() + np.savetxt(c, a, fmt=fmt) + c.seek(0) + assert_equal(c.readlines(), + [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), + asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) + + a = np.array([[1, 2], [3, 4]], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) + + def test_0D_3D(self): + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, np.array(1)) + assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) + + def test_structured(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_structured_padded(self): + # gh-13297 + a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[ + ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4') + ]) + c = BytesIO() + np.savetxt(c, a[['foo', 'baz']], fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 3\n', b'4 6\n']) + + def test_multifield_view(self): + a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')]) + v = a[['x', 'z']] + with temppath(suffix='.npy') as path: + path = Path(path) + np.save(path, v) + data = np.load(path) + assert_array_equal(data, v) + + def test_delimiter(self): + a = np.array([[1., 2.], [3., 4.]]) + c = BytesIO() + np.savetxt(c, a, delimiter=',', fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) + + def test_format(self): + a = np.array([(1, 2), (3, 4)]) + c = BytesIO() + # Sequence of formats + np.savetxt(c, a, fmt=['%02d', '%3.1f']) + c.seek(0) + assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) + + # A single multiformat string + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Specify delimiter, should be overridden + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Bad fmt, should raise a ValueError + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, a, fmt=99) + + def test_header_footer(self): + # Test the functionality of the header and footer keyword argument. + + c = BytesIO() + a = np.array([(1, 2), (3, 4)], dtype=int) + test_header_footer = 'Test header / footer' + # Test the header keyword argument + np.savetxt(c, a, fmt='%1d', header=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) + # Test the footer keyword argument + c = BytesIO() + np.savetxt(c, a, fmt='%1d', footer=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) + # Test the commentstr keyword argument used on the header + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + header=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) + # Test the commentstr keyword argument used on the footer + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + footer=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) + + def test_file_roundtrip(self): + with temppath() as name: + a = np.array([(1, 2), (3, 4)]) + np.savetxt(name, a) + b = np.loadtxt(name) + assert_array_equal(a, b) + + def test_complex_arrays(self): + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re + 1.0j * im + + # One format only + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', + b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) + + # One format for each real and imaginary part + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', + b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) + + # One format for each complex number + c = BytesIO() + np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', + b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) + + def test_complex_negative_exponent(self): + # Previous to 1.15, some formats generated x+-yj, gh 7895 + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n', + b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n']) + + + def test_custom_writer(self): + + class CustomWriter(list): + def write(self, text): + self.extend(text.split(b'\n')) + + w = CustomWriter() + a = np.array([(1, 2), (3, 4)]) + np.savetxt(w, a) + b = np.loadtxt(w) + assert_array_equal(a, b) + + def test_unicode(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode_) + with tempdir() as tmpdir: + # set encoding as on windows it may not be unicode even on py3 + np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], + encoding='UTF-8') + + def test_unicode_roundtrip(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode_) + # our gz wrapper support encoding + suffixes = ['', '.gz'] + if HAS_BZ2: + suffixes.append('.bz2') + if HAS_LZMA: + suffixes.extend(['.xz', '.lzma']) + with tempdir() as tmpdir: + for suffix in suffixes: + np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, + fmt=['%s'], encoding='UTF-16-LE') + b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), + encoding='UTF-16-LE', dtype=np.unicode_) + assert_array_equal(a, b) + + def test_unicode_bytestream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode_) + s = BytesIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read().decode('UTF-8'), utf8 + '\n') + + def test_unicode_stringstream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode_) + s = StringIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read(), utf8 + '\n') + + @pytest.mark.parametrize("fmt", [u"%f", b"%f"]) + @pytest.mark.parametrize("iotype", [StringIO, BytesIO]) + def test_unicode_and_bytes_fmt(self, fmt, iotype): + # string type of fmt should not matter, see also gh-4053 + a = np.array([1.]) + s = iotype() + np.savetxt(s, a, fmt=fmt) + s.seek(0) + if iotype is StringIO: + assert_equal(s.read(), u"%f\n" % 1.) + else: + assert_equal(s.read(), b"%f\n" % 1.) + + @pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work") + @pytest.mark.slow + @requires_memory(free_bytes=7e9) + def test_large_zip(self): + def check_large_zip(memoryerror_raised): + memoryerror_raised.value = False + try: + # The test takes at least 6GB of memory, writes a file larger + # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile`` + test_data = np.asarray([np.random.rand( + np.random.randint(50,100),4) + for i in range(800000)], dtype=object) + with tempdir() as tmpdir: + np.savez(os.path.join(tmpdir, 'test.npz'), + test_data=test_data) + except MemoryError: + memoryerror_raised.value = True + raise + # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + # Use an object in shared memory to re-raise the MemoryError exception + # in our process if needed, see gh-16889 + memoryerror_raised = Value(c_bool) + p = Process(target=check_large_zip, args=(memoryerror_raised,)) + p.start() + p.join() + if memoryerror_raised.value: + raise MemoryError("Child process raised a MemoryError exception") + # -9 indicates a SIGKILL, probably an OOM. + if p.exitcode == -9: + pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + assert p.exitcode == 0 + +class LoadTxtBase: + def check_compressed(self, fopen, suffixes): + # Test that we can load data from a compressed file + wanted = np.arange(6).reshape((2, 3)) + linesep = ('\n', '\r\n', '\r') + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + for suffix in suffixes: + with temppath(suffix=suffix) as name: + with fopen(name, mode='wt', encoding='UTF-32-LE') as f: + f.write(data) + res = self.loadfunc(name, encoding='UTF-32-LE') + assert_array_equal(res, wanted) + with fopen(name, "rt", encoding='UTF-32-LE') as f: + res = self.loadfunc(f) + assert_array_equal(res, wanted) + + def test_compressed_gzip(self): + self.check_compressed(gzip.open, ('.gz',)) + + @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2") + def test_compressed_bz2(self): + self.check_compressed(bz2.open, ('.bz2',)) + + @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma") + def test_compressed_lzma(self): + self.check_compressed(lzma.open, ('.xz', '.lzma')) + + def test_encoding(self): + with temppath() as path: + with open(path, "wb") as f: + f.write('0.\n1.\n2.'.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16") + assert_array_equal(x, [0., 1., 2.]) + + def test_stringload(self): + # umlaute + nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") + with temppath() as path: + with open(path, "wb") as f: + f.write(nonascii.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_) + assert_array_equal(x, nonascii) + + def test_binary_decode(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_converters_decode(self): + # test converters that decode strings + c = TextIO() + c.write(b'\xcf\x96') + c.seek(0) + x = self.loadfunc(c, dtype=np.unicode_, + converters={0: lambda x: x.decode('UTF-8')}) + a = np.array([b'\xcf\x96'.decode('UTF-8')]) + assert_array_equal(x, a) + + def test_converters_nodecode(self): + # test native string converters enabled by setting an encoding + utf8 = b'\xcf\x96'.decode('UTF-8') + with temppath() as path: + with io.open(path, 'wt', encoding='UTF-8') as f: + f.write(utf8) + x = self.loadfunc(path, dtype=np.unicode_, + converters={0: lambda x: x + 't'}, + encoding='UTF-8') + a = np.array([utf8 + 't']) + assert_array_equal(x, a) + + +class TestLoadTxt(LoadTxtBase): + loadfunc = staticmethod(np.loadtxt) + + def setup(self): + # lower chunksize for testing + self.orig_chunk = np.lib.npyio._loadtxt_chunksize + np.lib.npyio._loadtxt_chunksize = 1 + def teardown(self): + np.lib.npyio._loadtxt_chunksize = self.orig_chunk + + def test_record(self): + c = TextIO() + c.write('1 2\n3 4') + c.seek(0) + x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_array_equal(x, a) + + d = TextIO() + d.write('M 64.0 75.0\nF 25.0 60.0') + d.seek(0) + mydescriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + b = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=mydescriptor) + y = np.loadtxt(d, dtype=mydescriptor) + assert_array_equal(y, b) + + def test_array(self): + c = TextIO() + c.write('1 2\n3 4') + + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([[1, 2], [3, 4]], int) + assert_array_equal(x, a) + + c.seek(0) + x = np.loadtxt(c, dtype=float) + a = np.array([[1, 2], [3, 4]], float) + assert_array_equal(x, a) + + def test_1D(self): + c = TextIO() + c.write('1\n2\n3\n4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('1,2,3,4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',') + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + def test_missing(self): + c = TextIO() + c.write('1,2,3,,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + a = np.array([1, 2, 3, -999, 5], int) + assert_array_equal(x, a) + + def test_converters_with_usecols(self): + c = TextIO() + c.write('1,2,3,,5\n6,7,8,9,10\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + a = np.array([[2, -999], [7, 9]], int) + assert_array_equal(x, a) + + def test_comments_unicode(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=u'#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_byte(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=b'#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_multiple(self): + c = TextIO() + c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=['#', '@', '//']) + a = np.array([[1, 2, 3], [4, 5, 6]], int) + assert_array_equal(x, a) + + def test_comments_multi_chars(self): + c = TextIO() + c.write('/* comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='/*') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + # Check that '/*' is not transformed to ['/', '*'] + c = TextIO() + c.write('*/ comment\n1,2,3,5\n') + c.seek(0) + assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', + comments='/*') + + def test_skiprows(self): + c = TextIO() + c.write('comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_usecols(self): + a = np.array([[1, 2], [3, 4]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1,)) + assert_array_equal(x, a[:, 1]) + + a = np.array([[1, 2, 3], [3, 4, 5]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1, 2)) + assert_array_equal(x, a[:, 1:]) + + # Testing with arrays instead of tuples. + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) + assert_array_equal(x, a[:, 1:]) + + # Testing with an integer instead of a sequence + for int_type in [int, np.int8, np.int16, + np.int32, np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]: + to_read = int_type(1) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=to_read) + assert_array_equal(x, a[:, 1]) + + # Testing with some crazy custom integer type + class CrazyInt: + def __index__(self): + return 1 + + crazy_int = CrazyInt() + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=crazy_int) + assert_array_equal(x, a[:, 1]) + + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) + assert_array_equal(x, a[:, 1]) + + # Checking with dtypes defined converters. + data = '''JOE 70.1 25.3 + BOB 60.5 27.9 + ''' + c = TextIO(data) + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(arr['stid'], [b"JOE", b"BOB"]) + assert_equal(arr['temp'], [25.3, 27.9]) + + # Testing non-ints in usecols + c.seek(0) + bogus_idx = 1.5 + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx), + np.loadtxt, c, usecols=bogus_idx + ) + + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx), + np.loadtxt, c, usecols=[0, bogus_idx, 0] + ) + + def test_fancy_dtype(self): + c = TextIO() + c.write('1,2,3.0\n4,5,6.0\n') + c.seek(0) + dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + x = np.loadtxt(c, dtype=dt, delimiter=',') + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) + assert_array_equal(x, a) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_3d_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + assert_array_equal(x, a) + + def test_str_dtype(self): + # see gh-8033 + c = ["str1", "str2"] + + for dt in (str, np.bytes_): + a = np.array(["str1", "str2"], dtype=dt) + x = np.loadtxt(c, dtype=dt) + assert_array_equal(x, a) + + def test_empty_file(self): + with suppress_warnings() as sup: + sup.filter(message="loadtxt: Empty input file:") + c = TextIO() + x = np.loadtxt(c) + assert_equal(x.shape, (0,)) + x = np.loadtxt(c, dtype=np.int64) + assert_equal(x.shape, (0,)) + assert_(x.dtype == np.int64) + + def test_unused_converter(self): + c = TextIO() + c.writelines(['1 21\n', '3 42\n']) + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_array_equal(data, [21, 42]) + + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_array_equal(data, [33, 66]) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + def test_uint64_type(self): + tgt = (9223372043271415339, 9223372043271415853) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.uint64) + assert_equal(res, tgt) + + def test_int64_type(self): + tgt = (-9223372036854775807, 9223372036854775807) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.int64) + assert_equal(res, tgt) + + def test_from_float_hex(self): + # IEEE doubles and floats only, otherwise the float32 + # conversion may fail. + tgt = np.logspace(-10, 10, 5).astype(np.float32) + tgt = np.hstack((tgt, -tgt)).astype(float) + inp = '\n'.join(map(float.hex, tgt)) + c = TextIO() + c.write(inp) + for dt in [float, np.float32]: + c.seek(0) + res = np.loadtxt(c, dtype=dt) + assert_equal(res, tgt, err_msg="%s" % dt) + + def test_from_complex(self): + tgt = (complex(1, 1), complex(1, -1)) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, tgt) + + def test_complex_misformatted(self): + # test for backward compatibility + # some complex formats used to generate x+-yj + a = np.zeros((2, 2), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.16e') + c.seek(0) + txt = c.read() + c.seek(0) + # misformat the sign on the imaginary part, gh 7895 + txt_bad = txt.replace(b'e+00-', b'e00+-') + assert_(txt_bad != txt) + c.write(txt_bad) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, a) + + def test_universal_newline(self): + with temppath() as name: + with open(name, 'w') as f: + f.write('1 21\r3 42\r') + data = np.loadtxt(name) + assert_array_equal(data, [[1, 21], [3, 42]]) + + def test_empty_field_after_tab(self): + c = TextIO() + c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') + c.seek(0) + dt = {'names': ('x', 'y', 'z', 'comment'), + 'formats': (' num rows + c = TextIO() + c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1, max_rows=6) + a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int) + assert_array_equal(x, a) + +class Testfromregex: + def test_record(self): + c = TextIO() + c.write('1.312 foo\n1.534 bar\n4.444 qux') + c.seek(0) + + dt = [('num', np.float64), ('val', 'S3')] + x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) + a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_2(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.int32), ('val', 'S3')] + x = np.fromregex(c, r"(\d+)\s+(...)", dt) + a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_3(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.float64)] + x = np.fromregex(c, r"(\d+)\s+...", dt) + a = np.array([(1312,), (1534,), (4444,)], dtype=dt) + assert_array_equal(x, a) + + def test_record_unicode(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') + + dt = [('num', np.float64), ('val', 'U4')] + x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') + a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), + (4.444, 'qux')], dtype=dt) + assert_array_equal(x, a) + + regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) + x = np.fromregex(path, regexp, dt, encoding='UTF-8') + assert_array_equal(x, a) + + def test_compiled_bytes(self): + regexp = re.compile(b'(\\d)') + c = BytesIO(b'123') + dt = [('num', np.float64)] + a = np.array([1, 2, 3], dtype=dt) + x = np.fromregex(c, regexp, dt) + assert_array_equal(x, a) + +#####-------------------------------------------------------------------------- + + +class TestFromTxt(LoadTxtBase): + loadfunc = staticmethod(np.genfromtxt) + + def test_record(self): + # Test w/ explicit dtype + data = TextIO('1 2\n3 4') + test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) + control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_equal(test, control) + # + data = TextIO('M 64.0 75.0\nF 25.0 60.0') + descriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], + dtype=descriptor) + test = np.genfromtxt(data, dtype=descriptor) + assert_equal(test, control) + + def test_array(self): + # Test outputting a standard ndarray + data = TextIO('1 2\n3 4') + control = np.array([[1, 2], [3, 4]], dtype=int) + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data.seek(0) + control = np.array([[1, 2], [3, 4]], dtype=float) + test = np.loadtxt(data, dtype=float) + assert_array_equal(test, control) + + def test_1D(self): + # Test squeezing to 1D + control = np.array([1, 2, 3, 4], int) + # + data = TextIO('1\n2\n3\n4\n') + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data = TextIO('1,2,3,4\n') + test = np.genfromtxt(data, dtype=int, delimiter=',') + assert_array_equal(test, control) + + def test_comments(self): + # Test the stripping of comments + control = np.array([1, 2, 3, 5], int) + # Comment on its own line + data = TextIO('# comment\n1,2,3,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + # Comment at the end of a line + data = TextIO('1,2,3,5# comment\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + + def test_skiprows(self): + # Test row skipping + control = np.array([1, 2, 3, 5], int) + kwargs = dict(dtype=int, delimiter=',') + # + data = TextIO('comment\n1,2,3,5\n') + test = np.genfromtxt(data, skip_header=1, **kwargs) + assert_equal(test, control) + # + data = TextIO('# comment\n1,2,3,5\n') + test = np.loadtxt(data, skiprows=1, **kwargs) + assert_equal(test, control) + + def test_skip_footer(self): + data = ["# %i" % i for i in range(1, 6)] + data.append("A, B, C") + data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)]) + data[-1] = "99,99" + kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) + test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) + ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], + dtype=[(_, float) for _ in "ABC"]) + assert_equal(test, ctrl) + + def test_skip_footer_with_invalid(self): + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' + # Footer too small to get rid of all invalid values + assert_raises(ValueError, np.genfromtxt, + TextIO(basestr), skip_footer=1) + # except ValueError: + # pass + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + a = np.genfromtxt(TextIO(basestr), skip_footer=3) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) + a = np.genfromtxt( + TextIO(basestr), skip_footer=3, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) + + def test_header(self): + # Test retrieving a header + data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, names=True) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = {'gender': np.array([b'M', b'F']), + 'age': np.array([64.0, 25.0]), + 'weight': np.array([75.0, 60.0])} + assert_equal(test['gender'], control['gender']) + assert_equal(test['age'], control['age']) + assert_equal(test['weight'], control['weight']) + + def test_auto_dtype(self): + # Test the automatic definition of the output dtype + data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = [np.array([b'A', b'BCD']), + np.array([64, 25]), + np.array([75.0, 60.0]), + np.array([3 + 4j, 5 + 6j]), + np.array([True, False]), ] + assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) + for (i, ctrl) in enumerate(control): + assert_equal(test['f%i' % i], ctrl) + + def test_auto_dtype_uniform(self): + # Tests whether the output dtype can be uniformized + data = TextIO('1 2 3 4\n5 6 7 8\n') + test = np.genfromtxt(data, dtype=None) + control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + assert_equal(test, control) + + def test_fancy_dtype(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',') + control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_names_overwrite(self): + # Test overwriting the names of the dtype + descriptor = {'names': ('g', 'a', 'w'), + 'formats': ('S1', 'i4', 'f4')} + data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') + names = ('gender', 'age', 'weight') + test = np.genfromtxt(data, dtype=descriptor, names=names) + descriptor['names'] = names + control = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=descriptor) + assert_equal(test, control) + + def test_commented_header(self): + # Check that names can be retrieved even if the line is commented out. + data = TextIO(""" +#gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + # The # is part of the first name and should be deleted automatically. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], + dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) + assert_equal(test, ctrl) + # Ditto, but we should get rid of the first element + data = TextIO(b""" +# gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test, ctrl) + + def test_names_and_comments_none(self): + # Tests case when names is true but comments is None (gh-10780) + data = TextIO('col1 col2\n 1 2\n 3 4') + test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True) + control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)]) + assert_equal(test, control) + + def test_file_is_closed_on_error(self): + # gh-13200 + with tempdir() as tmpdir: + fpath = os.path.join(tmpdir, "test.csv") + with open(fpath, "wb") as f: + f.write(u'\N{GREEK PI SYMBOL}'.encode('utf8')) + + # ResourceWarnings are emitted from a destructor, so won't be + # detected by regular propagation to errors. + with assert_no_warnings(): + with pytest.raises(UnicodeDecodeError): + np.genfromtxt(fpath, encoding="ascii") + + def test_autonames_and_usecols(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = np.array(('aaaa', 45, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_with_usecols(self): + # Test the combination user-defined converters and usecol + data = TextIO('1,2,3,,5\n6,7,8,9,10\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + control = np.array([[2, -999], [7, 9]], int) + assert_equal(test, control) + + def test_converters_with_usecols_and_names(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, + converters={'C': lambda s: 2 * int(s)}) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = np.array(('aaaa', 90, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_cornercases(self): + # Test the conversion to datetime. + converter = { + 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + + def test_converters_cornercases2(self): + # Test the conversion to datetime64. + converter = { + 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', 'datetime64[us]'), ('stid', float)]) + assert_equal(test, control) + + def test_unused_converter(self): + # Test whether unused converters are forgotten + data = TextIO("1 21\n 3 42\n") + test = np.genfromtxt(data, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_equal(test, [21, 42]) + # + data.seek(0) + test = np.genfromtxt(data, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_equal(test, [33, 66]) + + def test_invalid_converter(self): + strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or + (b'r' not in x.lower() and x.strip() or 0.0)) + strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or + (b'%' not in x.lower() and x.strip() or 0.0)) + s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" + "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" + "D02N03,10/10/2004,R 1,,7,145.55") + kwargs = dict( + converters={2: strip_per, 3: strip_rand}, delimiter=",", + dtype=None) + assert_raises(ConverterError, np.genfromtxt, s, **kwargs) + + def test_tricky_converter_bug1666(self): + # Test some corner cases + s = TextIO('q1,2\nq3,4') + cnv = lambda s: float(s[1:]) + test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) + control = np.array([[1., 2.], [3., 4.]]) + assert_equal(test, control) + + def test_dtype_with_converters(self): + dstr = "2009; 23; 46" + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: bytes}) + control = np.array([('2009', 23., 46)], + dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) + assert_equal(test, control) + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: float}) + control = np.array([2009., 23., 46],) + assert_equal(test, control) + + def test_dtype_with_converters_and_usecols(self): + dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" + dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3} + dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')] + conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} + test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv) + control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp) + assert_equal(test, control) + dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')] + test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0,1,3), names=None, converters=conv) + control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp) + assert_equal(test, control) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + ndtype = [('nest', [('idx', int), ('code', object)])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + # nested but empty fields also aren't supported + ndtype = [('idx', int), ('code', object), ('nest', [])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + def test_dtype_with_object_no_converter(self): + # Object without a converter uses bytes: + parsed = np.genfromtxt(TextIO("1"), dtype=object) + assert parsed[()] == b"1" + parsed = np.genfromtxt(TextIO("string"), dtype=object) + assert parsed[()] == b"string" + + def test_userconverters_with_explicit_dtype(self): + # Test user_converters w/ explicit (standard) dtype + data = TextIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: bytes}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + def test_utf8_userconverters_with_explicit_dtype(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') + test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: np.compat.unicode}, + encoding='UTF-8') + control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], + dtype=[('', '|U11'), ('', float)]) + assert_equal(test, control) + + def test_spacedelimiter(self): + # Test space delimiter + data = TextIO("1 2 3 4 5\n6 7 8 9 10") + test = np.genfromtxt(data) + control = np.array([[1., 2., 3., 4., 5.], + [6., 7., 8., 9., 10.]]) + assert_equal(test, control) + + def test_integer_delimiter(self): + # Test using an integer for delimiter + data = " 1 2 3\n 4 5 67\n890123 4" + test = np.genfromtxt(TextIO(data), delimiter=3) + control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) + assert_equal(test, control) + + def test_missing(self): + data = TextIO('1,2,3,,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + control = np.array([1, 2, 3, -999, 5], int) + assert_equal(test, control) + + def test_missing_with_tabs(self): + # Test w/ a delimiter tab + txt = "1\t2\t3\n\t2\t\n1\t\t3" + test = np.genfromtxt(TextIO(txt), delimiter="\t", + usemask=True,) + ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) + ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) + assert_equal(test.data, ctrl_d) + assert_equal(test.mask, ctrl_m) + + def test_usecols(self): + # Test the selection of columns + # Select 1 column + control = np.array([[1, 2], [3, 4]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1,)) + assert_equal(test, control[:, 1]) + # + control = np.array([[1, 2, 3], [3, 4, 5]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1, 2)) + assert_equal(test, control[:, 1:]) + # Testing with arrays instead of tuples. + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2])) + assert_equal(test, control[:, 1:]) + + def test_usecols_as_css(self): + # Test giving usecols with a comma-separated string + data = "1 2 3\n4 5 6" + test = np.genfromtxt(TextIO(data), + names="a, b, c", usecols="a, c") + ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) + assert_equal(test, ctrl) + + def test_usecols_with_structured_dtype(self): + # Test usecols with an explicit structured dtype + data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + test = np.genfromtxt( + data, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(test['stid'], [b"JOE", b"BOB"]) + assert_equal(test['temp'], [25.3, 27.9]) + + def test_usecols_with_integer(self): + # Test usecols with an integer + test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) + assert_equal(test, np.array([1., 4.])) + + def test_usecols_with_named_columns(self): + # Test usecols with named columns + ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) + data = "1 2 3\n4 5 6" + kwargs = dict(names="a, b, c") + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data), + usecols=('a', 'c'), **kwargs) + assert_equal(test, ctrl) + + def test_empty_file(self): + # Test that an empty file raises the proper warning. + with suppress_warnings() as sup: + sup.filter(message="genfromtxt: Empty input file:") + data = TextIO() + test = np.genfromtxt(data) + assert_equal(test, np.array([])) + + # when skip_header > 0 + test = np.genfromtxt(data, skip_header=1) + assert_equal(test, np.array([])) + + def test_fancy_dtype_alt(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True) + control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.genfromtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_withmissing(self): + data = TextIO('A,B\n0,1\n2,N/A') + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + # + data.seek(0) + test = np.genfromtxt(data, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', float), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_user_missing_values(self): + data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + basekwargs = dict(dtype=None, delimiter=",", names=True,) + mdtype = [('A', int), ('B', float), ('C', complex)] + # + test = np.genfromtxt(TextIO(data), missing_values="N/A", + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], + dtype=mdtype) + assert_equal(test, control) + # + basekwargs['dtype'] = mdtype + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + # + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 'B': -99, 'C': -999j}, + usemask=True, + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + + def test_user_filling_values(self): + # Test with missing and filling values + ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) + data = "N/A, 2, 3\n4, ,???" + kwargs = dict(delimiter=",", + dtype=int, + names="a,b,c", + missing_values={0: "N/A", 'b': " ", 2: "???"}, + filling_values={0: 0, 'b': 0, 2: -999}) + test = np.genfromtxt(TextIO(data), **kwargs) + ctrl = np.array([(0, 2, 3), (4, 0, -999)], + dtype=[(_, int) for _ in "abc"]) + assert_equal(test, ctrl) + # + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) + assert_equal(test, ctrl) + + data2 = "1,2,*,4\n5,*,7,8\n" + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=0) + ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=-1) + ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) + assert_equal(test, ctrl) + + def test_withmissing_float(self): + data = TextIO('A,B\n0,1.5\n2,-999.00') + test = np.genfromtxt(data, dtype=None, delimiter=',', + missing_values='-999.0', names=True, usemask=True) + control = ma.array([(0, 1.5), (2, -1.)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_with_masked_column_uniform(self): + # Test masked column + data = TextIO('1 2 3\n4 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) + assert_equal(test, control) + + def test_with_masked_column_various(self): + # Test masked column + data = TextIO('True 2 3\nFalse 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([(1, 2, 3), (0, 5, 6)], + mask=[(0, 1, 0), (0, 1, 0)], + dtype=[('f0', bool), ('f1', bool), ('f2', int)]) + assert_equal(test, control) + + def test_invalid_raise(self): + # Test invalid raise + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = dict(delimiter=",", dtype=None, names=True) + def f(): + return np.genfromtxt(mdata, invalid_raise=False, **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) + # + mdata.seek(0) + assert_raises(ValueError, np.genfromtxt, mdata, + delimiter=",", names=True) + + def test_invalid_raise_with_usecols(self): + # Test invalid_raise with usecols + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = dict(delimiter=",", dtype=None, names=True, + invalid_raise=False) + def f(): + return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) + # + mdata.seek(0) + mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs) + assert_equal(len(mtest), 50) + control = np.ones(50, dtype=[(_, int) for _ in 'ab']) + control[[10 * _ for _ in range(5)]] = (2, 2) + assert_equal(mtest, control) + + def test_inconsistent_dtype(self): + # Test inconsistent dtype + data = ["1, 1, 1, 1, -1.1"] * 50 + mdata = TextIO("\n".join(data)) + + converters = {4: lambda x: "(%s)" % x.decode()} + kwargs = dict(delimiter=",", converters=converters, + dtype=[(_, int) for _ in 'abcde'],) + assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) + + def test_default_field_format(self): + # Test default format + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=None, defaultfmt="f%02i") + ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], + dtype=[("f00", int), ("f01", int), ("f02", float)]) + assert_equal(mtest, ctrl) + + def test_single_dtype_wo_names(self): + # Test single dtype w/o names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, defaultfmt="f%02i") + ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_explicit_names(self): + # Test single dtype w explicit names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names="a, b, c") + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_implicit_names(self): + # Test single dtype w implicit names + data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names=True) + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_easy_structured_dtype(self): + # Test easy structured dtype + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), delimiter=",", + dtype=(int, float, float), defaultfmt="f_%02i") + ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], + dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) + assert_equal(mtest, ctrl) + + def test_autostrip(self): + # Test autostrip + data = "01/01/2003 , 1.3, abcde" + kwargs = dict(delimiter=",", dtype=None) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), **kwargs) + assert_(w[0].category is np.VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], + dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) + assert_equal(mtest, ctrl) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs) + assert_(w[0].category is np.VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003', 1.3, 'abcde')], + dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) + assert_equal(mtest, ctrl) + + def test_replace_space(self): + # Test the 'replace_space' option + txt = "A.A, B (B), C:C\n1, 2, 3.14" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_replace_space_known_dtype(self): + # Test the 'replace_space' (and related) options when dtype != None + txt = "A.A, B (B), C:C\n1, 2, 3" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_incomplete_names(self): + # Test w/ incomplete names + data = "A,,C\n0,1,2\n3,4,5" + kwargs = dict(delimiter=",", names=True) + # w/ dtype=None + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, int) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), dtype=None, **kwargs) + assert_equal(test, ctrl) + # w/ default dtype + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, float) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), **kwargs) + + def test_names_auto_completion(self): + # Make sure that names are properly completed + data = "1 2 3\n 4 5 6" + test = np.genfromtxt(TextIO(data), + dtype=(int, float, int), names="a") + ctrl = np.array([(1, 2, 3), (4, 5, 6)], + dtype=[('a', int), ('f0', float), ('f1', int)]) + assert_equal(test, ctrl) + + def test_names_with_usecols_bug1636(self): + # Make sure we pick up the right names w/ usecols + data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" + ctrl_names = ("A", "C", "E") + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=(0, 2, 4), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=int, delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + + def test_fixed_width_names(self): + # Test fix-width w/ names + data = " A B C\n 0 1 2.3\n 45 67 9." + kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None) + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + # + kwargs = dict(delimiter=5, names=True, dtype=None) + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_filling_values(self): + # Test missing values + data = b"1, 2, 3\n1, , 5\n0, 6, \n" + kwargs = dict(delimiter=",", dtype=None, filling_values=-999) + ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_comments_is_none(self): + # Github issue 329 (None was previously being converted to 'None'). + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1], b'testNonetherestofthedata') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1], b' testNonetherestofthedata') + + def test_latin1(self): + latin1 = b'\xf6\xfc\xf6' + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + latin1 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1, 0], b"test1") + assert_equal(test[1, 1], b"testNonethe" + latin1) + assert_equal(test[1, 2], b"test3") + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding='latin1') + assert_equal(test[1, 0], u"test1") + assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1')) + assert_equal(test[1, 2], u"test3") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test['f0'], 0) + assert_equal(test['f1'], b"testNonethe" + latin1) + + def test_binary_decode_autodtype(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_utf8_byte_encoding(self): + utf8 = b"\xcf\x96" + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + utf8 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + ctl = np.array([ + [b'norm1', b'norm2', b'norm3'], + [b'test1', b'testNonethe' + utf8, b'test3'], + [b'norm1', b'norm2', b'norm3']]) + assert_array_equal(test, ctl) + + def test_utf8_file(self): + utf8 = b"\xcf\x96" + with temppath() as path: + with open(path, "wb") as f: + f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + ctl = np.array([ + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], + dtype=np.unicode_) + assert_array_equal(test, ctl) + + # test a mixed dtype + with open(path, "wb") as f: + f.write(b"0,testNonethe" + utf8) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + assert_equal(test['f0'], 0) + assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) + + def test_utf8_file_nodtype_unicode(self): + # bytes encoding with non-latin1 -> unicode upcast + utf8 = u'\u03d6' + latin1 = u'\xf6\xfc\xf6' + + # skip test if cannot encode utf8 test string with preferred + # encoding. The preferred encoding is assumed to be the default + # encoding of io.open. Will need to change this for PyTest, maybe + # using pytest.mark.xfail(raises=***). + try: + encoding = locale.getpreferredencoding() + utf8.encode(encoding) + except (UnicodeError, ImportError): + pytest.skip('Skipping test_utf8_file_nodtype_unicode, ' + 'unable to encode utf8 in preferred encoding') + + with temppath() as path: + with io.open(path, "wt") as f: + f.write(u"norm1,norm2,norm3\n") + f.write(u"norm1," + latin1 + u",norm3\n") + f.write(u"test1,testNonethe" + utf8 + u",test3\n") + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', + np.VisibleDeprecationWarning) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',') + # Check for warning when encoding not specified. + assert_(w[0].category is np.VisibleDeprecationWarning) + ctl = np.array([ + ["norm1", "norm2", "norm3"], + ["norm1", latin1, "norm3"], + ["test1", "testNonethe" + utf8, "test3"]], + dtype=np.unicode_) + assert_array_equal(test, ctl) + + def test_recfromtxt(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.recfromtxt(data, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + + def test_recfromcsv(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) + test = np.recfromcsv(data, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + # + data = TextIO('A,B\n0,1\n2,3') + test = np.recfromcsv(data, missing_values='N/A',) + control = np.array([(0, 1), (2, 3)], + dtype=[('a', int), ('b', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,3') + dtype = [('a', int), ('b', float)] + test = np.recfromcsv(data, missing_values='N/A', dtype=dtype) + control = np.array([(0, 1), (2, 3)], + dtype=dtype) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + #gh-10394 + data = TextIO('color\n"red"\n"blue"') + test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')}) + control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))]) + assert_equal(test.dtype, control.dtype) + assert_equal(test, control) + + def test_max_rows(self): + # Test the `max_rows` keyword argument. + data = '1 2\n3 4\n5 6\n7 8\n9 10\n' + txt = TextIO(data) + a1 = np.genfromtxt(txt, max_rows=3) + a2 = np.genfromtxt(txt) + assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) + assert_equal(a2, [[7, 8], [9, 10]]) + + # max_rows must be at least 1. + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) + + # An input with several invalid rows. + data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' + + test = np.genfromtxt(TextIO(data), max_rows=2) + control = np.array([[1., 1.], [2., 2.]]) + assert_equal(test, control) + + # Test keywords conflict + assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, + max_rows=4) + + # Test with invalid value + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) + + # Test with invalid not raise + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + + test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + # Structured array with field names. + data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' + + # Test with header, names and comments + txt = TextIO(data) + test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) + control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], + dtype=[('c', ' should convert to float + # 2**34 = 17179869184 => should convert to int64 + # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, + # int64 on 64-bit systems) + + data = TextIO('73786976294838206464 17179869184 1024') + + test = np.genfromtxt(data, dtype=None) + + assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) + + assert_(test.dtype['f0'] == float) + assert_(test.dtype['f1'] == np.int64) + assert_(test.dtype['f2'] == np.int_) + + assert_allclose(test['f0'], 73786976294838206464.) + assert_equal(test['f1'], 17179869184) + assert_equal(test['f2'], 1024) + + def test_unpack_structured(self): + # Regression test for gh-4341 + # Unpacking should work on structured arrays + txt = TextIO("M 21 72\nF 35 58") + dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')} + a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_equal(a.dtype, np.dtype('S1')) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(c.dtype, np.dtype('f4')) + assert_array_equal(a, np.array([b'M', b'F'])) + assert_array_equal(b, np.array([21, 35])) + assert_array_equal(c, np.array([72., 58.])) + + def test_unpack_auto_dtype(self): + # Regression test for gh-4341 + # Unpacking should work when dtype=None + txt = TextIO("M 21 72.\nF 35 58.") + expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.])) + test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8") + for arr, result in zip(expected, test): + assert_array_equal(arr, result) + assert_equal(arr.dtype, result.dtype) + + def test_unpack_single_name(self): + # Regression test for gh-4341 + # Unpacking should work when structured dtype has only one field + txt = TextIO("21\n35") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array([21, 35], dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal(expected.dtype, test.dtype) + + def test_squeeze_scalar(self): + # Regression test for gh-4341 + # Unpacking a scalar should give zero-dim output, + # even if dtype is structured + txt = TextIO("1") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array((1,), dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal((), test.shape) + assert_equal(expected.dtype, test.dtype) + + +class TestPathUsage: + # Test that pathlib.Path can be used + def test_loadtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([[1.1, 2], [3, 4]]) + np.savetxt(path, a) + x = np.loadtxt(path) + assert_array_equal(x, a) + + def test_save_load(self): + # Test that pathlib.Path instances can be used with save. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path) + assert_array_equal(data, a) + + def test_save_load_memmap(self): + # Test that pathlib.Path instances can be loaded mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path, mmap_mode='r') + assert_array_equal(data, a) + # close the mem-mapped file + del data + if IS_PYPY: + break_cycles() + break_cycles() + + def test_save_load_memmap_readwrite(self): + # Test that pathlib.Path instances can be written mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + b = np.load(path, mmap_mode='r+') + a[0][0] = 5 + b[0][0] = 5 + del b # closes the file + if IS_PYPY: + break_cycles() + break_cycles() + data = np.load(path) + assert_array_equal(data, a) + + def test_savez_load(self): + # Test that pathlib.Path instances can be used with savez. + with temppath(suffix='.npz') as path: + path = Path(path) + np.savez(path, lab='place holder') + with np.load(path) as data: + assert_array_equal(data['lab'], 'place holder') + + def test_savez_compressed_load(self): + # Test that pathlib.Path instances can be used with savez. + with temppath(suffix='.npz') as path: + path = Path(path) + np.savez_compressed(path, lab='place holder') + data = np.load(path) + assert_array_equal(data['lab'], 'place holder') + data.close() + + def test_genfromtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([(1, 2), (3, 4)]) + np.savetxt(path, a) + data = np.genfromtxt(path) + assert_array_equal(a, data) + + def test_ndfromtxt(self): + # Test outputting a standard ndarray + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write(u'1 2\n3 4') + + control = np.array([[1, 2], [3, 4]], dtype=int) + test = np.genfromtxt(path, dtype=int) + assert_array_equal(test, control) + + def test_mafromtxt(self): + # From `test_fancy_dtype_alt` above + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write(u'1,2,3.0\n4,5,6.0\n') + + test = np.genfromtxt(path, delimiter=',', usemask=True) + control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)]) + assert_equal(test, control) + + def test_recfromtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write(u'A,B\n0,1\n2,3') + + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.recfromtxt(path, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + def test_recfromcsv(self): + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write(u'A,B\n0,1\n2,3') + + kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) + test = np.recfromcsv(path, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + +def test_gzip_load(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + +# These next two classes encode the minimal API needed to save()/load() arrays. +# The `test_ducktyping` ensures they work correctly +class JustWriter: + def __init__(self, base): + self.base = base + + def write(self, s): + return self.base.write(s) + + def flush(self): + return self.base.flush() + +class JustReader: + def __init__(self, base): + self.base = base + + def read(self, n): + return self.base.read(n) + + def seek(self, off, whence=0): + return self.base.seek(off, whence) + + +def test_ducktyping(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = JustWriter(s) + + np.save(f, a) + f.flush() + s.seek(0) + + f = JustReader(s) + assert_array_equal(np.load(f), a) + + + +def test_gzip_loadtxt(): + # Thanks to another windows brokenness, we can't use + # NamedTemporaryFile: a file created from this function cannot be + # reopened by another open call. So we first put the gzipped string + # of the test reference array, write it to a securely opened file, + # which is then read from by the loadtxt function + s = BytesIO() + g = gzip.GzipFile(fileobj=s, mode='w') + g.write(b'1 2 3\n') + g.close() + + s.seek(0) + with temppath(suffix='.gz') as name: + with open(name, 'wb') as f: + f.write(s.read()) + res = np.loadtxt(name) + s.close() + + assert_array_equal(res, [1, 2, 3]) + + +def test_gzip_loadtxt_from_string(): + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + f.write(b'1 2 3\n') + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.loadtxt(f), [1, 2, 3]) + + +def test_npzfile_dict(): + s = BytesIO() + x = np.zeros((3, 3)) + y = np.zeros((3, 3)) + + np.savez(s, x=x, y=y) + s.seek(0) + + z = np.load(s) + + assert_('x' in z) + assert_('y' in z) + assert_('x' in z.keys()) + assert_('y' in z.keys()) + + for f, a in z.items(): + assert_(f in ['x', 'y']) + assert_equal(a.shape, (3, 3)) + + assert_(len(z.items()) == 2) + + for f in z: + assert_(f in ['x', 'y']) + + assert_('x' in z.keys()) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_load_refcount(): + # Check that objects returned by np.load are directly freed based on + # their refcount, rather than needing the gc to collect them. + + f = BytesIO() + np.savez(f, [1, 2, 3]) + f.seek(0) + + with assert_no_gc_cycles(): + np.load(f) + + f.seek(0) + dt = [("a", 'u1', 2), ("b", 'u1', 2)] + with assert_no_gc_cycles(): + x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_mixins.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_mixins.py new file mode 100644 index 0000000000000000000000000000000000000000..3a7f5b86f4bdd5e2cb8ba3316761195dec8103df --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_mixins.py @@ -0,0 +1,216 @@ +import numbers +import operator + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + + +# NOTE: This class should be kept as an exact copy of the example from the +# docstring for NDArrayOperatorsMixin. + +class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.value) + + +def wrap_array_like(result): + if type(result) is tuple: + return tuple(ArrayLike(r) for r in result) + else: + return ArrayLike(result) + + +def _assert_equal_type_and_value(result, expected, err_msg=None): + assert_equal(type(result), type(expected), err_msg=err_msg) + if isinstance(result, tuple): + assert_equal(len(result), len(expected), err_msg=err_msg) + for result_item, expected_item in zip(result, expected): + _assert_equal_type_and_value(result_item, expected_item, err_msg) + else: + assert_equal(result.value, expected.value, err_msg=err_msg) + assert_equal(getattr(result.value, 'dtype', None), + getattr(expected.value, 'dtype', None), err_msg=err_msg) + + +_ALL_BINARY_OPERATORS = [ + operator.lt, + operator.le, + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.floordiv, + operator.mod, + divmod, + pow, + operator.lshift, + operator.rshift, + operator.and_, + operator.xor, + operator.or_, +] + + +class TestNDArrayOperatorsMixin: + + def test_array_like_add(self): + + def check(result): + _assert_equal_type_and_value(result, ArrayLike(0)) + + check(ArrayLike(0) + 0) + check(0 + ArrayLike(0)) + + check(ArrayLike(0) + np.array(0)) + check(np.array(0) + ArrayLike(0)) + + check(ArrayLike(np.array(0)) + 0) + check(0 + ArrayLike(np.array(0))) + + check(ArrayLike(np.array(0)) + np.array(0)) + check(np.array(0) + ArrayLike(np.array(0))) + + def test_inplace(self): + array_like = ArrayLike(np.array([0])) + array_like += 1 + _assert_equal_type_and_value(array_like, ArrayLike(np.array([1]))) + + array = np.array([0]) + array += ArrayLike(1) + _assert_equal_type_and_value(array, ArrayLike(np.array([1]))) + + def test_opt_out(self): + + class OptOut: + """Object that opts out of __array_ufunc__.""" + __array_ufunc__ = None + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + array_like = ArrayLike(1) + opt_out = OptOut() + + # supported operations + assert_(array_like + opt_out is opt_out) + assert_(opt_out + array_like is opt_out) + + # not supported + with assert_raises(TypeError): + # don't use the Python default, array_like = array_like + opt_out + array_like += opt_out + with assert_raises(TypeError): + array_like - opt_out + with assert_raises(TypeError): + opt_out - array_like + + def test_subclass(self): + + class SubArrayLike(ArrayLike): + """Should take precedence over ArrayLike.""" + + x = ArrayLike(0) + y = SubArrayLike(1) + _assert_equal_type_and_value(x + y, y) + _assert_equal_type_and_value(y + x, y) + + def test_object(self): + x = ArrayLike(0) + obj = object() + with assert_raises(TypeError): + x + obj + with assert_raises(TypeError): + obj + x + with assert_raises(TypeError): + x += obj + + def test_unary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in [operator.neg, + operator.pos, + abs, + operator.invert]: + _assert_equal_type_and_value(op(array_like), ArrayLike(op(array))) + + def test_forward_binary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(array, 1)) + actual = op(array_like, 1) + err_msg = 'failed for operator {}'.format(op) + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_reflected_binary_methods(self): + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(2, 1)) + actual = op(2, ArrayLike(1)) + err_msg = 'failed for operator {}'.format(op) + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_matmul(self): + array = np.array([1, 2], dtype=np.float64) + array_like = ArrayLike(array) + expected = ArrayLike(np.float64(5)) + _assert_equal_type_and_value(expected, np.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array, array_like)) + + def test_ufunc_at(self): + array = ArrayLike(np.array([1, 2, 3, 4])) + assert_(np.negative.at(array, np.array([0, 1])) is None) + _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4])) + + def test_ufunc_two_outputs(self): + mantissa, exponent = np.frexp(2 ** -3) + expected = (ArrayLike(mantissa), ArrayLike(exponent)) + _assert_equal_type_and_value( + np.frexp(ArrayLike(2 ** -3)), expected) + _assert_equal_type_and_value( + np.frexp(ArrayLike(np.array(2 ** -3))), expected) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_nanfunctions.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_nanfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..0b527e70872f1a3b42aafcbc29c8b6e2357acaeb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_nanfunctions.py @@ -0,0 +1,998 @@ +import warnings +import pytest + +import numpy as np +from numpy.lib.nanfunctions import _nan_mask, _replace_nan +from numpy.testing import ( + assert_, assert_equal, assert_almost_equal, assert_no_warnings, + assert_raises, assert_array_equal, suppress_warnings + ) + + +# Test data +_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], + [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], + [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], + [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) + + +# Rows of _ndat with nans removed +_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), + np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), + np.array([0.1042, -0.5954]), + np.array([0.1610, 0.1859, 0.3146])] + +# Rows of _ndat with nans converted to ones +_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], + [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], + [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], + [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) + +# Rows of _ndat with nans converted to zeros +_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], + [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], + [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], + [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) + + +class TestNanFunctions_MinMax: + + nanfuncs = [np.nanmin, np.nanmax] + stdfuncs = [np.min, np.max] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for f in self.nanfuncs: + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalars + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(np.nan))) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_masked(self): + mat = np.ma.fix_invalid(_ndat) + msk = mat._mask.copy() + for f in [np.nanmin]: + res = f(mat, axis=1) + tgt = f(_ndat, axis=1) + assert_equal(res, tgt) + assert_equal(mat._mask, msk) + assert_(not np.isinf(mat).any()) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + # check that rows of nan are dealt with for subclasses (#4628) + mine[1] = np.nan + for f in self.nanfuncs: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(np.isnan(res[1]) and not np.isnan(res[0]) + and not np.isnan(res[2])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine) + assert_(res.shape == ()) + assert_(res != np.nan) + assert_(len(w) == 0) + + def test_object_array(self): + arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object) + assert_equal(np.nanmin(arr), 1.0) + assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + # assert_equal does not work on object arrays of nan + assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan]) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + +class TestNanFunctions_ArgminArgmax: + + nanfuncs = [np.nanargmin, np.nanargmax] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_result_values(self): + for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): + for row in _ndat: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in") + ind = f(row) + val = row[ind] + # comparing with NaN is tricky as the result + # is always false except for NaN != NaN + assert_(not np.isnan(val)) + assert_(not fcmp(val, row).any()) + assert_(not np.equal(val, row[:ind]).any()) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for f in self.nanfuncs: + for axis in [None, 0, 1]: + assert_raises(ValueError, f, mat, axis=axis) + assert_raises(ValueError, f, np.nan) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + assert_raises(ValueError, f, mat, axis=axis) + for axis in [1]: + res = f(mat, axis=axis) + assert_equal(res, np.zeros(0)) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + +class TestNanFunctions_IntTypes: + + int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8, + np.uint16, np.uint32, np.uint64) + + mat = np.array([127, 39, 93, 87, 46]) + + def integer_arrays(self): + for dtype in self.int_types: + yield self.mat.astype(dtype) + + def test_nanmin(self): + tgt = np.min(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanmin(mat), tgt) + + def test_nanmax(self): + tgt = np.max(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanmax(mat), tgt) + + def test_nanargmin(self): + tgt = np.argmin(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanargmin(mat), tgt) + + def test_nanargmax(self): + tgt = np.argmax(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanargmax(mat), tgt) + + def test_nansum(self): + tgt = np.sum(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nansum(mat), tgt) + + def test_nanprod(self): + tgt = np.prod(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanprod(mat), tgt) + + def test_nancumsum(self): + tgt = np.cumsum(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nancumsum(mat), tgt) + + def test_nancumprod(self): + tgt = np.cumprod(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nancumprod(mat), tgt) + + def test_nanmean(self): + tgt = np.mean(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanmean(mat), tgt) + + def test_nanvar(self): + tgt = np.var(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanvar(mat), tgt) + + tgt = np.var(mat, ddof=1) + for mat in self.integer_arrays(): + assert_equal(np.nanvar(mat, ddof=1), tgt) + + def test_nanstd(self): + tgt = np.std(self.mat) + for mat in self.integer_arrays(): + assert_equal(np.nanstd(mat), tgt) + + tgt = np.std(self.mat, ddof=1) + for mat in self.integer_arrays(): + assert_equal(np.nanstd(mat, ddof=1), tgt) + + +class SharedNanFunctionsTestsMixin: + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(np.ComplexWarning) + tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_char(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(np.ComplexWarning) + tgt = rf(mat, dtype=c, axis=1).dtype.type + res = nf(mat, dtype=c, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=c, axis=None).dtype.type + res = nf(mat, dtype=c, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + array = np.eye(3) + mine = array.view(MyNDArray) + for f in self.nanfuncs: + expected_shape = f(array, axis=0).shape + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array, axis=1).shape + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array).shape + res = f(mine) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + + +class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nansum, np.nanprod] + stdfuncs = [np.sum, np.prod] + + def test_allnans(self): + # Check for FutureWarning + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = np.nansum([np.nan]*3, axis=None) + assert_(res == 0, 'result is not 0') + assert_(len(w) == 0, 'warning raised') + # Check scalar + res = np.nansum(np.nan) + assert_(res == 0, 'result is not 0') + assert_(len(w) == 0, 'warning raised') + # Check there is no warning for not all-nan + np.nansum([0]*3, axis=None) + assert_(len(w) == 0, 'unwanted warning raised') + + def test_empty(self): + for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): + mat = np.zeros((0, 3)) + tgt = [tgt_value]*3 + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = [] + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = tgt_value + res = f(mat, axis=None) + assert_equal(res, tgt) + + +class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nancumsum, np.nancumprod] + stdfuncs = [np.cumsum, np.cumprod] + + def test_allnans(self): + for f, tgt_value in zip(self.nanfuncs, [0, 1]): + # Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input + with assert_no_warnings(): + res = f([np.nan]*3, axis=None) + tgt = tgt_value*np.ones((3)) + assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value)) + # Check scalar + res = f(np.nan) + tgt = tgt_value*np.ones((1)) + assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value)) + # Check there is no warning for not all-nan + f([0]*3, axis=None) + + def test_empty(self): + for f, tgt_value in zip(self.nanfuncs, [0, 1]): + mat = np.zeros((0, 3)) + tgt = tgt_value*np.ones((0, 3)) + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = mat + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = np.zeros((0)) + res = f(mat, axis=None) + assert_equal(res, tgt) + + def test_keepdims(self): + for f, g in zip(self.nanfuncs, self.stdfuncs): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = f(mat, axis=axis, out=None) + res = g(mat, axis=axis, out=None) + assert_(res.ndim == tgt.ndim) + + for f in self.nanfuncs: + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + rs = np.random.RandomState(0) + d[rs.rand(*d.shape) < 0.5] = np.nan + res = f(d, axis=None) + assert_equal(res.shape, (1155,)) + for axis in np.arange(4): + res = f(d, axis=axis) + assert_equal(res.shape, (3, 5, 7, 11)) + + def test_result_values(self): + for axis in (-2, -1, 0, 1, None): + tgt = np.cumprod(_ndat_ones, axis=axis) + res = np.nancumprod(_ndat, axis=axis) + assert_almost_equal(res, tgt) + tgt = np.cumsum(_ndat_zeros,axis=axis) + res = np.nancumsum(_ndat, axis=axis) + assert_almost_equal(res, tgt) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.eye(3) + for axis in (-2, -1, 0, 1): + tgt = rf(mat, axis=axis) + res = nf(mat, axis=axis, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + +class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nanmean, np.nanvar, np.nanstd] + stdfuncs = [np.mean, np.var, np.std] + + def test_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool_, np.int_, np.object_]: + assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) + + def test_out_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool_, np.int_, np.object_]: + out = np.empty(_ndat.shape[0], dtype=dtype) + assert_raises(TypeError, f, _ndat, axis=1, out=out) + + def test_ddof(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in [0, 1]: + tgt = [rf(d, ddof=ddof) for d in _rdat] + res = nf(_ndat, axis=1, ddof=ddof) + assert_almost_equal(res, tgt) + + def test_ddof_too_big(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + dsize = [len(d) for d in _rdat] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in range(5): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + sup.filter(np.ComplexWarning) + tgt = [ddof >= d for d in dsize] + res = nf(_ndat, axis=1, ddof=ddof) + assert_equal(np.isnan(res), tgt) + if any(tgt): + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 0) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for f in self.nanfuncs: + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalar + assert_(np.isnan(f(np.nan))) + assert_(len(w) == 2) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + +_TIME_UNITS = ( + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" +) + +# All `inexact` + `timdelta64` type codes +_TYPE_CODES = list(np.typecodes["AllFloat"]) +_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS] + + +class TestNanFunctions_Median: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanmedian(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) + res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanmedian(d, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanmedian(d, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.median(mat, axis=1) + res = np.nanmedian(nan_mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.median(mat, axis=None) + res = np.nanmedian(nan_mat, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_small_large(self): + # test the small and large code paths, current cutoff 400 elements + for s in [5, 20, 51, 200, 1000]: + d = np.random.randn(4, s) + # Randomly set some elements to NaN: + w = np.random.randint(0, d.size, size=d.size // 5) + d.ravel()[w] = np.nan + d[:,0] = 1. # ensure at least one good value + # use normal median without nans to compare + tgt = [] + for x in d: + nonan = np.compress(~np.isnan(x), x) + tgt.append(np.median(nonan, overwrite_input=True)) + + assert_array_equal(np.nanmedian(d, axis=-1), tgt) + + def test_result_values(self): + tgt = [np.median(d) for d in _rdat] + res = np.nanmedian(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", _TYPE_CODES) + def test_allnans(self, dtype, axis): + mat = np.full((3, 3), np.nan).astype(dtype) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + + output = np.nanmedian(mat, axis=axis) + assert output.dtype == mat.dtype + assert np.isnan(output).all() + + if axis is None: + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 3) + + # Check scalar + scalar = np.array(np.nan).astype(dtype)[()] + output_scalar = np.nanmedian(scalar) + assert output_scalar.dtype == scalar.dtype + assert np.isnan(output_scalar) + + if axis is None: + assert_(len(sup.log) == 2) + else: + assert_(len(sup.log) == 4) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_(np.nanmedian(0.) == 0.) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.nanmedian, d, axis=-5) + assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5)) + assert_raises(np.AxisError, np.nanmedian, d, axis=4) + assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4)) + assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) + + def test_float_special(self): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) + assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) + assert_equal(np.nanmedian(a), inf) + + # minimum fill value check + a = np.array([[np.nan, np.nan, inf], + [np.nan, np.nan, inf]]) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) + assert_equal(np.nanmedian(a, axis=1), inf) + + # no mask path + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.nanmedian(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + if inf > 0: + assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.nanmedian(a), 4.5) + else: + assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.nanmedian(a), -2.5) + assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) + + for i in range(0, 10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=1), inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [inf] * j) + + a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) + assert_equal(np.nanmedian(a), -inf) + assert_equal(np.nanmedian(a, axis=1), -inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [-inf] * j) + + +class TestNanFunctions_Percentile: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanpercentile(ndat, 30) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.percentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + res = np.nanpercentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanpercentile(d, 90, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.percentile(mat, 42, axis=1) + res = np.nanpercentile(nan_mat, 42, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.percentile(mat, 42, axis=None) + res = np.nanpercentile(nan_mat, 42, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_result_values(self): + tgt = [np.percentile(d, 28) for d in _rdat] + res = np.nanpercentile(_ndat, 28, axis=1) + assert_almost_equal(res, tgt) + # Transpose the array to fit the output convention of numpy.percentile + tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat]) + res = np.nanpercentile(_ndat, (28, 98), axis=1) + assert_almost_equal(res, tgt) + + def test_allnans(self): + mat = np.array([np.nan]*9).reshape(3, 3) + for axis in [None, 0, 1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all()) + if axis is None: + assert_(len(w) == 1) + else: + assert_(len(w) == 3) + assert_(issubclass(w[0].category, RuntimeWarning)) + # Check scalar + assert_(np.isnan(np.nanpercentile(np.nan, 60))) + if axis is None: + assert_(len(w) == 2) + else: + assert_(len(w) == 4) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_equal(np.nanpercentile(0., 100), 0.) + a = np.arange(6) + r = np.nanpercentile(a, 50, axis=0) + assert_equal(r, 2.5) + assert_(np.isscalar(r)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=4) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, 4)) + assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) + + def test_multiple_percentiles(self): + perc = [50, 100] + mat = np.ones((4, 3)) + nan_mat = np.nan * mat + # For checking consistency in higher dimensional case + large_mat = np.ones((3, 4, 5)) + large_mat[:, 0:2:4, :] = 0 + large_mat[:, :, 3:] *= 2 + for axis in [None, 0, 1]: + for keepdim in [False, True]: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "All-NaN slice encountered") + val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) + nan_val = np.nanpercentile(nan_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val.shape, val.shape) + + val = np.percentile(large_mat, perc, axis=axis, + keepdims=keepdim) + nan_val = np.nanpercentile(large_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val, val) + + megamat = np.ones((3, 4, 5, 6)) + assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6)) + + +class TestNanFunctions_Quantile: + # most of this is already tested by TestPercentile + + def test_regression(self): + ar = np.arange(24).reshape(2, 3, 4).astype(float) + ar[0][1] = np.nan + + assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50)) + assert_equal(np.nanquantile(ar, q=0.5, axis=0), + np.nanpercentile(ar, q=50, axis=0)) + assert_equal(np.nanquantile(ar, q=0.5, axis=1), + np.nanpercentile(ar, q=50, axis=1)) + assert_equal(np.nanquantile(ar, q=[0.5], axis=1), + np.nanpercentile(ar, q=[50], axis=1)) + assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1), + np.nanpercentile(ar, q=[25, 50, 75], axis=1)) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.nanquantile(x, 0), 0.) + assert_equal(np.nanquantile(x, 1), 3.5) + assert_equal(np.nanquantile(x, 0.5), 1.75) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.nanquantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.nanquantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) + +@pytest.mark.parametrize("arr, expected", [ + # array of floats with some nans + (np.array([np.nan, 5.0, np.nan, np.inf]), + np.array([False, True, False, True])), + # int64 array that can't possibly have nans + (np.array([1, 5, 7, 9], dtype=np.int64), + True), + # bool array that can't possibly have nans + (np.array([False, True, False, True]), + True), + # 2-D complex array with nans + (np.array([[np.nan, 5.0], + [np.nan, np.inf]], dtype=np.complex64), + np.array([[False, True], + [False, True]])), + ]) +def test__nan_mask(arr, expected): + for out in [None, np.empty(arr.shape, dtype=np.bool_)]: + actual = _nan_mask(arr, out=out) + assert_equal(actual, expected) + # the above won't distinguish between True proper + # and an array of True values; we want True proper + # for types that can't possibly contain NaN + if type(expected) is not np.ndarray: + assert actual is True + + +def test__replace_nan(): + """ Test that _replace_nan returns the original array if there are no + NaNs, not a copy. + """ + for dtype in [np.bool_, np.int32, np.int64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 0) + assert mask is None + # do not make a copy if there are no nans + assert result is arr + + for dtype in [np.float32, np.float64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 2) + assert (mask == False).all() + # mask is not None, so we make a copy + assert result is not arr + assert_equal(result, arr) + + arr_nan = np.array([0, 1, np.nan], dtype=dtype) + result_nan, mask_nan = _replace_nan(arr_nan, 2) + assert_equal(mask_nan, np.array([False, False, True])) + assert result_nan is not arr_nan + assert_equal(result_nan, np.array([0, 1, 2])) + assert np.isnan(arr_nan[-1]) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_packbits.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_packbits.py new file mode 100644 index 0000000000000000000000000000000000000000..c2f1d48d4b6a5a60d603198c253e8265f4ec6346 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_packbits.py @@ -0,0 +1,376 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_equal, assert_raises +import pytest +from itertools import chain + +def test_packbits(): + # Copied from the docstring. + a = [[[1, 0, 1], [0, 1, 0]], + [[1, 1, 0], [0, 0, 1]]] + for dt in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dt) + b = np.packbits(arr, axis=-1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]])) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_empty(): + shapes = [ + (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0), + (0, 0, 20), (0, 0, 0), + ] + for dt in '?bBhHiIlLqQ': + for shape in shapes: + a = np.empty(shape, dtype=dt) + b = np.packbits(a) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, (0,)) + + +def test_packbits_empty_with_axis(): + # Original shapes and lists of packed shapes for different axes. + shapes = [ + ((0,), [(0,)]), + ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]), + ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]), + ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]), + ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]), + ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]), + ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]), + ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]), + ] + for dt in '?bBhHiIlLqQ': + for in_shape, out_shapes in shapes: + for ax, out_shape in enumerate(out_shapes): + a = np.empty(in_shape, dtype=dt) + b = np.packbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + +@pytest.mark.parametrize('bitorder', ('little', 'big')) +def test_packbits_large(bitorder): + # test data large enough for 16 byte vectorization + a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, + 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, + 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, + 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, + 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]) + a = a.repeat(3) + for dtype in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + b = np.packbits(arr, axis=None, bitorder=bitorder) + assert_equal(b.dtype, np.uint8) + r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, + 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, + 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, + 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, + 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, + 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, + 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, + 129, 248, 227, 129, 199, 31, 128] + if bitorder == 'big': + assert_array_equal(b, r) + # equal for size being multiple of 8 + assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a) + + # check last byte of different remainders (16 byte vectorization) + b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] + assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, + 198, 196, 192]) + + + arr = arr.reshape(36, 25) + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195, + 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, + 107, 75, 74, 88], + [72, 216, 248, 241, 227, 195, 202, 90, 90, 83, + 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, + 41, 104, 122, 90, 18], + [113, 120, 248, 216, 152, 24, 60, 52, 182, 150, + 150, 150, 146, 210, 210, 246, 255, 255, 223, + 151, 21, 17, 17, 131, 163], + [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, + 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, + 202, 234, 170, 168], + [0, 128, 128, 192, 80, 112, 48, 160, 160, 224, + 240, 208, 144, 128, 160, 224, 240, 208, 144, + 144, 176, 240, 224, 192, 128]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 127, 192, 0], + [ 7, 252, 15, 128], + [240, 0, 28, 0], + [255, 128, 0, 128], + [192, 31, 255, 128], + [142, 63, 0, 0], + [255, 240, 7, 0], + [ 7, 224, 14, 0], + [126, 0, 224, 0], + [255, 255, 199, 0], + [ 56, 28, 126, 0], + [113, 248, 227, 128], + [227, 142, 63, 0], + [ 0, 28, 112, 0], + [ 15, 248, 3, 128], + [ 28, 126, 56, 0], + [ 56, 255, 241, 128], + [240, 7, 224, 0], + [227, 129, 192, 128], + [255, 255, 254, 0], + [126, 0, 224, 0], + [ 3, 241, 248, 0], + [ 0, 255, 241, 128], + [128, 0, 255, 128], + [224, 1, 255, 128], + [248, 252, 126, 0], + [ 0, 7, 3, 128], + [224, 113, 248, 0], + [ 0, 252, 127, 128], + [142, 63, 224, 0], + [224, 14, 63, 0], + [ 7, 3, 128, 0], + [113, 255, 255, 128], + [ 28, 113, 199, 0], + [ 7, 227, 142, 0], + [ 14, 56, 252, 0]]) + + arr = arr.T.copy() + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255, + 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, + 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, + 7, 113, 28, 7, 14], + [127, 252, 0, 128, 31, 63, 240, 224, 0, 255, + 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, + 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, + 3, 255, 113, 227, 56], + [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, + 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, + 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, + 128, 255, 199, 142, 252], + [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, + 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, + 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 72, 113, 214, 0], + [186, 216, 120, 210, 128], + [178, 248, 248, 210, 128], + [178, 241, 216, 64, 192], + [150, 227, 152, 68, 80], + [215, 195, 24, 5, 112], + [ 87, 202, 60, 5, 48], + [ 83, 90, 52, 1, 160], + [ 83, 90, 182, 72, 160], + [195, 83, 150, 88, 224], + [199, 83, 150, 92, 240], + [206, 119, 150, 92, 208], + [204, 127, 146, 78, 144], + [204, 109, 210, 110, 128], + [140, 73, 210, 39, 160], + [140, 64, 246, 181, 224], + [136, 208, 255, 149, 240], + [136, 244, 255, 220, 208], + [ 8, 189, 223, 222, 144], + [ 40, 45, 151, 218, 144], + [105, 41, 21, 218, 176], + [107, 104, 17, 202, 240], + [ 75, 122, 17, 234, 224], + [ 74, 90, 131, 170, 192], + [ 88, 18, 163, 168, 128]]) + + + # result is the same if input is multiplied with a nonzero value + for dtype in 'bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + rnd = np.random.randint(low=np.iinfo(dtype).min, + high=np.iinfo(dtype).max, size=arr.size, + dtype=dtype) + rnd[rnd == 0] = 1 + arr *= rnd.astype(dtype) + b = np.packbits(arr, axis=-1) + assert_array_equal(np.unpackbits(b)[:-4], a) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_very_large(): + # test some with a larger arrays gh-8637 + # code is covered earlier but larger array makes crash on bug more likely + for s in range(950, 1050): + for dt in '?bBhHiIlLqQ': + x = np.ones((200, s), dtype=bool) + np.packbits(x, axis=1) + + +def test_unpackbits(): + # Copied from the docstring. + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]])) + +def test_pack_unpack_order(): + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + b_little = np.unpackbits(a, axis=1, bitorder='little') + b_big = np.unpackbits(a, axis=1, bitorder='big') + assert_array_equal(b, b_big) + assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little')) + assert_array_equal(b[:,::-1], b_little) + assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big')) + assert_raises(ValueError, np.unpackbits, a, bitorder='r') + assert_raises(TypeError, np.unpackbits, a, bitorder=10) + + + +def test_unpackbits_empty(): + a = np.empty((0,), dtype=np.uint8) + b = np.unpackbits(a) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.empty((0,))) + + +def test_unpackbits_empty_with_axis(): + # Lists of packed shapes for different axes and unpacked shapes. + shapes = [ + ([(0,)], (0,)), + ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)), + ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)), + ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)), + ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)), + ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)), + ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)), + ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)), + ] + for in_shapes, out_shape in shapes: + for ax, in_shape in enumerate(in_shapes): + a = np.empty(in_shape, dtype=np.uint8) + b = np.unpackbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + + +def test_unpackbits_large(): + # test all possible numbers via comparison to already tested packbits + d = np.arange(277, dtype=np.uint8) + assert_array_equal(np.packbits(np.unpackbits(d)), d) + assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) + d = np.tile(d, (3, 1)) + assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) + d = d.T.copy() + assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) + + +class TestCount(): + x = np.array([ + [1, 0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 1], + [1, 1, 0, 0, 0, 1, 1], + [1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 0, 1, 0, 1, 0], + ], dtype=np.uint8) + padded1 = np.zeros(57, dtype=np.uint8) + padded1[:49] = x.ravel() + padded1b = np.zeros(57, dtype=np.uint8) + padded1b[:49] = x[::-1].copy().ravel() + padded2 = np.zeros((9, 9), dtype=np.uint8) + padded2[:7, :7] = x + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1))) + def test_roundtrip(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + # test complete invertibility of packbits and unpackbits with count + packed = np.packbits(self.x, bitorder=bitorder) + unpacked = np.unpackbits(packed, count=count, bitorder=bitorder) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + ]) + def test_count(self, kwargs): + packed = np.packbits(self.x) + unpacked = np.unpackbits(packed, **kwargs) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:-1]) + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + # delta==-1 when count<0 because one extra zero of padding + @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1))) + def test_roundtrip_axis(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + packed0 = np.packbits(self.x, axis=0, bitorder=bitorder) + unpacked0 = np.unpackbits(packed0, axis=0, count=count, + bitorder=bitorder) + assert_equal(unpacked0.dtype, np.uint8) + assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1, bitorder=bitorder) + unpacked1 = np.unpackbits(packed1, axis=1, count=count, + bitorder=bitorder) + assert_equal(unpacked1.dtype, np.uint8) + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + {'bitorder' : 'little'}, + {'bitorder': 'little', 'count': None}, + {'bitorder' : 'big'}, + {'bitorder': 'big', 'count': None}, + ]) + def test_axis_count(self, kwargs): + packed0 = np.packbits(self.x, axis=0) + unpacked0 = np.unpackbits(packed0, axis=0, **kwargs) + assert_equal(unpacked0.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]]) + else: + assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1) + unpacked1 = np.unpackbits(packed1, axis=1, **kwargs) + assert_equal(unpacked1.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1]) + else: + assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1]) + + def test_bad_count(self): + packed0 = np.packbits(self.x, axis=0) + assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9) + packed1 = np.packbits(self.x, axis=1) + assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9) + packed = np.packbits(self.x) + assert_raises(ValueError, np.unpackbits, packed, count=-57) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_polynomial.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..c07ce9202fc8617444e20a80a5eb4477fb11b15c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_polynomial.py @@ -0,0 +1,282 @@ +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose + ) + + +class TestPolynomial: + def test_poly1d_str_and_repr(self): + p = np.poly1d([1., 2, 3]) + assert_equal(repr(p), 'poly1d([1., 2., 3.])') + assert_equal(str(p), + ' 2\n' + '1 x + 2 x + 3') + + q = np.poly1d([3., 2, 1]) + assert_equal(repr(q), 'poly1d([3., 2., 1.])') + assert_equal(str(q), + ' 2\n' + '3 x + 2 x + 1') + + r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j]) + assert_equal(str(r), + ' 3 2\n' + '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)') + + assert_equal(str(np.poly1d([-3, -2, -1])), + ' 2\n' + '-3 x - 2 x - 1') + + def test_poly1d_resolution(self): + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p(0), 3.0) + assert_equal(p(5), 38.0) + assert_equal(q(0), 1.0) + assert_equal(q(5), 86.0) + + def test_poly1d_math(self): + # here we use some simple coeffs to make calculations easier + p = np.poly1d([1., 2, 4]) + q = np.poly1d([4., 2, 1]) + assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) + assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.])) + assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.])) + + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) + assert_equal(p + q, np.poly1d([4., 4., 4.])) + assert_equal(p - q, np.poly1d([-2., 0., 2.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) + assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) + assert_equal(p.deriv(), np.poly1d([2., 2.])) + assert_equal(p.deriv(2), np.poly1d([2.])) + assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])), + (np.poly1d([1., -1.]), np.poly1d([0.]))) + + def test_poly1d_misc(self): + p = np.poly1d([1., 2, 3]) + assert_equal(np.asarray(p), np.array([1., 2., 3.])) + assert_equal(len(p), 2) + assert_equal((p[0], p[1], p[2], p[3]), (3.0, 2.0, 1.0, 0)) + + def test_poly1d_variable_arg(self): + q = np.poly1d([1., 2, 3], variable='y') + assert_equal(str(q), + ' 2\n' + '1 y + 2 y + 3') + q = np.poly1d([1., 2, 3], variable='lambda') + assert_equal(str(q), + ' 2\n' + '1 lambda + 2 lambda + 3') + + def test_poly(self): + assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), + [1, -3, -2, 6]) + + # From matlab docs + A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] + assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) + + # Should produce real output for perfect conjugates + assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) + assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j, + 1-2j, 1.+3.5j, 1-3.5j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) + assert_(np.isrealobj(np.poly([1j, -1j]))) + assert_(np.isrealobj(np.poly([1, -1]))) + + assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) + + np.random.seed(42) + a = np.random.randn(100) + 1j*np.random.randn(100) + assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) + + def test_roots(self): + assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + + def test_str_leading_zeros(self): + p = np.poly1d([4, 3, 2, 1]) + p[3] = 0 + assert_equal(str(p), + " 2\n" + "3 x + 2 x + 1") + + p = np.poly1d([1, 2]) + p[0] = 0 + p[1] = 0 + assert_equal(str(p), " \n0") + + def test_polyfit(self): + c = np.array([3., 2., 1.]) + x = np.linspace(0, 2, 7) + y = np.polyval(c, x) + err = [1, -1, 1, -1, 1, -1, 1] + weights = np.arange(8, 1, -1)**2/7.0 + + # Check exception when too few points for variance estimate. Note that + # the estimate requires the number of data points to exceed + # degree + 1 + assert_raises(ValueError, np.polyfit, + [1], [1], deg=0, cov=True) + + # check 1D case + m, cov = np.polyfit(x, y+err, 2, cov=True) + est = [3.8571, 0.2857, 1.619] + assert_almost_equal(est, m, decimal=4) + val0 = [[ 1.4694, -2.9388, 0.8163], + [-2.9388, 6.3673, -2.1224], + [ 0.8163, -2.1224, 1.161 ]] + assert_almost_equal(val0, cov, decimal=4) + + m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) + assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) + val = [[ 4.3964, -5.0052, 0.4878], + [-5.0052, 6.8067, -0.9089], + [ 0.4878, -0.9089, 0.3337]] + assert_almost_equal(val, cov2, decimal=4) + + m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled") + assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) + val = [[ 0.1473, -0.1677, 0.0163], + [-0.1677, 0.228 , -0.0304], + [ 0.0163, -0.0304, 0.0112]] + assert_almost_equal(val, cov3, decimal=4) + + # check 2D (n,1) case + y = y[:, np.newaxis] + c = c[:, np.newaxis] + assert_almost_equal(c, np.polyfit(x, y, 2)) + # check 2D (n,2) case + yy = np.concatenate((y, y), axis=1) + cc = np.concatenate((c, c), axis=1) + assert_almost_equal(cc, np.polyfit(x, yy, 2)) + + m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) + assert_almost_equal(est, m[:, 0], decimal=4) + assert_almost_equal(est, m[:, 1], decimal=4) + assert_almost_equal(val0, cov[:, :, 0], decimal=4) + assert_almost_equal(val0, cov[:, :, 1], decimal=4) + + # check order 1 (deg=0) case, were the analytic results are simple + np.random.seed(123) + y = np.random.normal(size=(4, 10000)) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True) + # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5. + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # Without scaling, since reduced chi2 is 1, the result should be the same. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]), + deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.5) + # If we estimate our errors wrong, no change with scaling: + w = np.full(y.shape[0], 1./0.5) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # But if we do not scale, our estimate for the error in the mean will + # differ. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.25) + + def test_objects(self): + from decimal import Decimal + p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) + p2 = p * Decimal('1.333333333333333') + assert_(p2[1] == Decimal("3.9999999999999990")) + p2 = p.deriv() + assert_(p2[1] == Decimal('8.0')) + p2 = p.integ() + assert_(p2[3] == Decimal("1.333333333333333333333333333")) + assert_(p2[2] == Decimal('1.5')) + assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) + p = np.poly([Decimal(1), Decimal(2)]) + assert_equal(np.poly([Decimal(1), Decimal(2)]), + [1, Decimal(-3), Decimal(2)]) + + def test_complex(self): + p = np.poly1d([3j, 2j, 1j]) + p2 = p.integ() + assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) + p2 = p.deriv() + assert_((p2.coeffs == [6j, 2j]).all()) + + def test_integ_coeffs(self): + p = np.poly1d([3, 2, 1]) + p2 = p.integ(3, k=[9, 7, 6]) + assert_( + (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) + + def test_zero_dims(self): + try: + np.poly(np.zeros((0, 0))) + except ValueError: + pass + + def test_poly_int_overflow(self): + """ + Regression test for gh-5096. + """ + v = np.arange(1, 21) + assert_almost_equal(np.poly(v), np.poly(np.diag(v))) + + def test_zero_poly_dtype(self): + """ + Regression test for gh-16354. + """ + z = np.array([0, 0, 0]) + p = np.poly1d(z.astype(np.int64)) + assert_equal(p.coeffs.dtype, np.int64) + + p = np.poly1d(z.astype(np.float32)) + assert_equal(p.coeffs.dtype, np.float32) + + p = np.poly1d(z.astype(np.complex64)) + assert_equal(p.coeffs.dtype, np.complex64) + + def test_poly_eq(self): + p = np.poly1d([1, 2, 3]) + p2 = np.poly1d([1, 2, 4]) + assert_equal(p == None, False) + assert_equal(p != None, True) + assert_equal(p == p, True) + assert_equal(p == p2, False) + assert_equal(p != p2, True) + + def test_polydiv(self): + b = np.poly1d([2, 6, 6, 1]) + a = np.poly1d([-1j, (1+2j), -(2+1j), 1]) + q, r = np.polydiv(b, a) + assert_equal(q.coeffs.dtype, np.complex128) + assert_equal(r.coeffs.dtype, np.complex128) + assert_equal(q*a + r, b) + + c = [1, 2, 3] + d = np.poly1d([1, 2, 3]) + s, t = np.polydiv(c, d) + assert isinstance(s, np.poly1d) + assert isinstance(t, np.poly1d) + u, v = np.polydiv(d, c) + assert isinstance(u, np.poly1d) + assert isinstance(v, np.poly1d) + + def test_poly_coeffs_mutable(self): + """ Coefficients should be modifiable """ + p = np.poly1d([1, 2, 3]) + + p.coeffs += 1 + assert_equal(p.coeffs, [2, 3, 4]) + + p.coeffs[2] += 10 + assert_equal(p.coeffs, [2, 3, 14]) + + # this never used to be allowed - let's not add features to deprecated + # APIs + assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_recfunctions.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_recfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..1668d54e167a6b018e76781a4f16ef14e03c092c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_recfunctions.py @@ -0,0 +1,979 @@ +import pytest + +import numpy as np +import numpy.ma as ma +from numpy.ma.mrecords import MaskedRecords +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises +from numpy.lib.recfunctions import ( + drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, + find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, + repack_fields, unstructured_to_structured, structured_to_unstructured, + apply_along_fields, require_fields, assign_fields_by_name) +get_fieldspec = np.lib.recfunctions._get_fieldspec +get_names = np.lib.recfunctions.get_names +get_names_flat = np.lib.recfunctions.get_names_flat +zip_descr = np.lib.recfunctions._zip_descr +zip_dtype = np.lib.recfunctions._zip_dtype + + +class TestRecFunctions: + # Misc tests + + def setup(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array([('A', 1.), ('B', 2.)], + dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_zip_descr(self): + # Test zip_descr + (w, x, y, z) = self.data + + # Std array + test = zip_descr((x, x), flatten=True) + assert_equal(test, + np.dtype([('', int), ('', int)])) + test = zip_descr((x, x), flatten=False) + assert_equal(test, + np.dtype([('', int), ('', int)])) + + # Std & flexible-dtype + test = zip_descr((x, z), flatten=True) + assert_equal(test, + np.dtype([('', int), ('A', '|S3'), ('B', float)])) + test = zip_descr((x, z), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('A', '|S3'), ('B', float)])])) + + # Standard & nested dtype + test = zip_descr((x, w), flatten=True) + assert_equal(test, + np.dtype([('', int), + ('a', int), + ('ba', float), ('bb', int)])) + test = zip_descr((x, w), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('a', int), + ('b', [('ba', float), ('bb', int)])])])) + + def test_drop_fields(self): + # Test drop_fields + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + + # A basic field + test = drop_fields(a, 'a') + control = np.array([((2, 3.0),), ((5, 6.0),)], + dtype=[('b', [('ba', float), ('bb', int)])]) + assert_equal(test, control) + + # Another basic field (but nesting two fields) + test = drop_fields(a, 'b') + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # A nested sub-field + test = drop_fields(a, ['ba', ]) + control = np.array([(1, (3.0,)), (4, (6.0,))], + dtype=[('a', int), ('b', [('bb', int)])]) + assert_equal(test, control) + + # All the nested sub-field from a field: zap that field + test = drop_fields(a, ['ba', 'bb']) + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # dropping all fields results in an array with no fields + test = drop_fields(a, ['a', 'b']) + control = np.array([(), ()], dtype=[]) + assert_equal(test, control) + + def test_rename_fields(self): + # Test rename fields + a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + dtype=[('a', int), + ('b', [('ba', float), ('bb', (float, 2))])]) + test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) + newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] + control = a.view(newdtype) + assert_equal(test.dtype, newdtype) + assert_equal(test, control) + + def test_get_names(self): + # Test get_names + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ('ba', 'bb')))) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ()))) + + ndtype = np.dtype([]) + test = get_names(ndtype) + assert_equal(test, ()) + + def test_get_names_flat(self): + # Test get_names_flat + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names_flat(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b', 'ba', 'bb')) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b')) + + ndtype = np.dtype([]) + test = get_names_flat(ndtype) + assert_equal(test, ()) + + def test_get_fieldstructure(self): + # Test get_fieldstructure + + # No nested fields + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': []}) + + # One 1-nested field + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) + + # One 2-nested fields + ndtype = np.dtype([('A', int), + ('B', [('BA', int), + ('BB', [('BBA', int), ('BBB', int)])])]) + test = get_fieldstructure(ndtype) + control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], + 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + assert_equal(test, control) + + # 0 fields + ndtype = np.dtype([]) + test = get_fieldstructure(ndtype) + assert_equal(test, {}) + + def test_find_duplicates(self): + # Test find_duplicates + a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), + (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], + mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), + (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], + dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 2] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='A', return_index=True) + control = [0, 1, 2, 3, 5] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='B', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BA', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BB', return_index=True) + control = [0, 1, 2, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_find_duplicates_ignoremask(self): + # Test the ignoremask option of find_duplicates + ndtype = [('a', int)] + a = ma.array([1, 1, 1, 2, 2, 3, 3], + mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + test = find_duplicates(a, ignoremask=True, return_index=True) + control = [0, 1, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 1, 2, 3, 4, 6] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_repack_fields(self): + dt = np.dtype('u1,f4,i8', align=True) + a = np.zeros(2, dtype=dt) + + assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) + assert_equal(repack_fields(a).itemsize, 13) + assert_equal(repack_fields(repack_fields(dt), align=True), dt) + + # make sure type is preserved + dt = np.dtype((np.record, dt)) + assert_(repack_fields(dt).type is np.record) + + def test_structured_to_unstructured(self): + a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + out = structured_to_unstructured(a) + assert_equal(out, np.zeros((4,5), dtype='f8')) + + b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) + assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ])) + out = np.mean(structured_to_unstructured(b[['x']]), axis=-1) + assert_equal(out, np.array([ 1. , 4. , 7. , 10. ])) + + c = np.arange(20).reshape((4,5)) + out = unstructured_to_structured(c, a.dtype) + want = np.array([( 0, ( 1., 2), [ 3., 4.]), + ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), + (15, (16., 17), [18., 19.])], + dtype=[('a', 'i4'), + ('b', [('f0', 'f4'), ('f1', 'u2')]), + ('c', 'f4', (2,))]) + assert_equal(out, want) + + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + assert_equal(apply_along_fields(np.mean, d), + np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ])) + assert_equal(apply_along_fields(np.mean, d[['x', 'z']]), + np.array([ 3. , 5.5, 9. , 11. ])) + + # check that for uniform field dtypes we get a view, not a copy: + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(dd.base is d) + assert_(ddd.base is d) + + # including uniform fields with subarrays unpacked + d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]), + (8, [9, 10], [[11, 12], [13, 14]])], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2)))]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(dd.base is d) + assert_(ddd.base is d) + + # test that nested fields with identical names don't break anything + point = np.dtype([('x', int), ('y', int)]) + triangle = np.dtype([('a', point), ('b', point), ('c', point)]) + arr = np.zeros(10, triangle) + res = structured_to_unstructured(arr, dtype=int) + assert_equal(res, np.zeros((10, 6), dtype=int)) + + + # test nested combinations of subarrays and structured arrays, gh-13333 + def subarray(dt, shape): + return np.dtype((dt, shape)) + + def structured(*dts): + return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)]) + + def inspect(dt, dtype=None): + arr = np.zeros((), dt) + ret = structured_to_unstructured(arr, dtype=dtype) + backarr = unstructured_to_structured(ret, dt) + return ret.shape, ret.dtype, backarr.dtype + + dt = structured(subarray(structured(np.int32, np.int32), 3)) + assert_equal(inspect(dt), ((6,), np.int32, dt)) + + dt = structured(subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((4,), np.int32, dt)) + + dt = structured(np.int32) + assert_equal(inspect(dt), ((1,), np.int32, dt)) + + dt = structured(np.int32, subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((5,), np.int32, dt)) + + dt = structured() + assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt)) + + # these currently don't work, but we may make it work in the future + assert_raises(NotImplementedError, structured_to_unstructured, + np.zeros(3, dt), dtype=np.int32) + assert_raises(NotImplementedError, unstructured_to_structured, + np.zeros((3,0), dtype=np.int32)) + + def test_field_assignment_by_name(self): + a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + newdt = [('b', 'f4'), ('c', 'u1')] + + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + + b = np.array([(1,2), (3,4)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype)) + + # test nested fields + a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])]) + newdt = [('a', [('c', 'u1')])] + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + b = np.array([((2,),), ((3,),)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype)) + + # test unstructured code path for 0d arrays + a, b = np.array(3), np.array(0) + assign_fields_by_name(b, a) + assert_equal(b[()], 3) + + +class TestRecursiveFillFields: + # Test recursive_fill_fields. + def test_simple_flexible(self): + # Test recursive_fill_fields on flexible-array + a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + b = np.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = np.array([(1, 10.), (2, 20.), (0, 0.)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + def test_masked_flexible(self): + # Test recursive_fill_fields on masked flexible-array + a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], + dtype=[('A', int), ('B', float)]) + b = ma.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = ma.array([(1, 10.), (2, 20.), (0, 0.)], + mask=[(0, 1), (1, 0), (0, 0)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + +class TestMergeArrays: + # Test merge_arrays + + def setup(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array( + [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test merge_arrays on a single array. + (_, x, _, z) = self.data + + test = merge_arrays(x) + control = np.array([(1,), (2,)], dtype=[('f0', int)]) + assert_equal(test, control) + test = merge_arrays((x,)) + assert_equal(test, control) + + test = merge_arrays(z, flatten=False) + assert_equal(test, z) + test = merge_arrays(z, flatten=True) + assert_equal(test, z) + + def test_solo_w_flatten(self): + # Test merge_arrays on a single array w & w/o flattening + w = self.data[0] + test = merge_arrays(w, flatten=False) + assert_equal(test, w) + + test = merge_arrays(w, flatten=True) + control = np.array([(1, 2, 3.0), (4, 5, 6.0)], + dtype=[('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + def test_standard(self): + # Test standard & standard + # Test merge arrays + (_, x, y, _) = self.data + test = merge_arrays((x, y), usemask=False) + control = np.array([(1, 10), (2, 20), (-1, 30)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + + test = merge_arrays((x, y), usemask=True) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_flatten(self): + # Test standard & flexible + (_, x, _, z) = self.data + test = merge_arrays((x, z), flatten=True) + control = np.array([(1, 'A', 1.), (2, 'B', 2.)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + + test = merge_arrays((x, z), flatten=False) + control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], + dtype=[('f0', int), + ('f1', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + def test_flatten_wflexible(self): + # Test flatten standard & nested + (w, x, _, _) = self.data + test = merge_arrays((x, w), flatten=True) + control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], + dtype=[('f0', int), + ('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + test = merge_arrays((x, w), flatten=False) + controldtype = [('f0', int), + ('f1', [('a', int), + ('b', [('ba', float), ('bb', int), ('bc', [])])])] + control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], + dtype=controldtype) + assert_equal(test, control) + + def test_wmasked_arrays(self): + # Test merge_arrays masked arrays + (_, x, _, _) = self.data + mx = ma.array([1, 2, 3], mask=[1, 0, 0]) + test = merge_arrays((x, mx), usemask=True) + control = ma.array([(1, 1), (2, 2), (-1, 3)], + mask=[(0, 1), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + test = merge_arrays((x, mx), usemask=True, asrecarray=True) + assert_equal(test, control) + assert_(isinstance(test, MaskedRecords)) + + def test_w_singlefield(self): + # Test single field + test = merge_arrays((np.array([1, 2]).view([('a', int)]), + np.array([10., 20., 30.])),) + control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('a', int), ('f1', float)]) + assert_equal(test, control) + + def test_w_shorter_flex(self): + # Test merge_arrays w/ a shorter flexndarray. + z = self.data[-1] + + # Fixme, this test looks incomplete and broken + #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + # dtype=[('A', '|S3'), ('B', float), ('C', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes warnings about unused variables + merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + dtype=[('A', '|S3'), ('B', float), ('C', int)]) + + def test_singlerecord(self): + (_, x, y, z) = self.data + test = merge_arrays((x[0], y[0], z[0]), usemask=False) + control = np.array([(1, 10, ('A', 1))], + dtype=[('f0', int), + ('f1', int), + ('f2', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + +class TestAppendFields: + # Test append_fields + + def setup(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_append_single(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, 'A', data=[10, 20, 30]) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('A', int)],) + assert_equal(test, control) + + def test_append_double(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) + control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], + mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], + dtype=[('f0', int), ('A', int), ('B', int)],) + assert_equal(test, control) + + def test_append_on_flex(self): + # Test append_fields on flexible type arrays + z = self.data[-1] + test = append_fields(z, 'C', data=[10, 20, 30]) + control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], + mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('C', int)],) + assert_equal(test, control) + + def test_append_on_nested(self): + # Test append_fields on nested fields + w = self.data[0] + test = append_fields(w, 'C', data=[10, 20, 30]) + control = ma.array([(1, (2, 3.0), 10), + (4, (5, 6.0), 20), + (-1, (-1, -1.), 30)], + mask=[( + 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], + dtype=[('a', int), + ('b', [('ba', float), ('bb', int)]), + ('C', int)],) + assert_equal(test, control) + + +class TestStackArrays: + # Test stack_arrays + def setup(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test stack_arrays on single arrays + (_, x, _, _) = self.data + test = stack_arrays((x,)) + assert_equal(test, x) + assert_(test is x) + + test = stack_arrays(x) + assert_equal(test, x) + assert_(test is x) + + def test_unnamed_fields(self): + # Tests combinations of arrays w/o named fields + (_, x, y, _) = self.data + + test = stack_arrays((x, x), usemask=False) + control = np.array([1, 2, 1, 2]) + assert_equal(test, control) + + test = stack_arrays((x, y), usemask=False) + control = np.array([1, 2, 10, 20, 30]) + assert_equal(test, control) + + test = stack_arrays((y, x), usemask=False) + control = np.array([10, 20, 30, 1, 2]) + assert_equal(test, control) + + def test_unnamed_and_named_fields(self): + # Test combination of arrays w/ & w/o named fields + (_, x, _, z) = self.data + + test = stack_arrays((x, z)) + control = ma.array([(1, -1, -1), (2, -1, -1), + (-1, 'A', 1), (-1, 'B', 2)], + mask=[(0, 1, 1), (0, 1, 1), + (1, 0, 0), (1, 0, 0)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + + def test_matching_named_fields(self): + # Test combination of arrays w/ matching field names + (_, x, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + test = stack_arrays((z, zz)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, zz, x)) + ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] + control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), + ('a', 10., 100., -1), ('b', 20., 200., -1), + ('c', 30., 300., -1), + (-1, -1, -1, 1), (-1, -1, -1, 2)], + dtype=ndtype, + mask=[(0, 0, 1, 1), (0, 0, 1, 1), + (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), + (1, 1, 1, 0), (1, 1, 1, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_defaults(self): + # Test defaults: no exception raised if keys of defaults are not fields. + (_, _, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} + test = stack_arrays((z, zz), defaults=defaults) + control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_autoconversion(self): + # Tests autoconversion + adtype = [('A', int), ('B', bool), ('C', float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [('A', int), ('B', float), ('C', float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + test = stack_arrays((a, b), autoconvert=True) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + with assert_raises(TypeError): + stack_arrays((a, b), autoconvert=False) + + def test_checktitles(self): + # Test using titles in the field names + adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + test = stack_arrays((a, b)) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_subdtype(self): + z = np.array([ + ('A', 1), ('B', 2) + ], dtype=[('A', '|S3'), ('B', float, (1,))]) + zz = np.array([ + ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) + ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) + + res = stack_arrays((z, zz)) + expected = ma.array( + data=[ + (b'A', [1.0], 0), + (b'B', [2.0], 0), + (b'a', [10.0], 100.0), + (b'b', [20.0], 200.0), + (b'c', [30.0], 300.0)], + mask=[ + (False, [False], True), + (False, [False], True), + (False, [False], False), + (False, [False], False), + (False, [False], False) + ], + dtype=zz.dtype + ) + assert_equal(res.dtype, expected.dtype) + assert_equal(res, expected) + assert_equal(res.mask, expected.mask) + + +class TestJoinBy: + def setup(self): + self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('c', int)]) + self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('d', int)]) + + def test_inner_join(self): + # Basic test of join_by + a, b = self.a, self.b + + test = join_by('a', a, b, jointype='inner') + control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), + (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), + (9, 59, 69, 109, 104)], + dtype=[('a', int), ('b1', int), ('b2', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_join(self): + a, b = self.a, self.b + + # Fixme, this test is broken + #test = join_by(('a', 'b'), a, b) + #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), + # (7, 57, 107, 102), (8, 58, 108, 103), + # (9, 59, 109, 104)], + # dtype=[('a', int), ('b', int), + # ('c', int), ('d', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes unused variable warnings + join_by(('a', 'b'), a, b) + np.array([(5, 55, 105, 100), (6, 56, 106, 101), + (7, 57, 107, 102), (8, 58, 108, 103), + (9, 59, 109, 104)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + + def test_join_subdtype(self): + # tests the bug in https://stackoverflow.com/q/44769632/102441 + foo = np.array([(1,)], + dtype=[('key', int)]) + bar = np.array([(1, np.array([1,2,3]))], + dtype=[('key', int), ('value', 'uint16', 3)]) + res = join_by('key', foo, bar) + assert_equal(res, bar.view(ma.MaskedArray)) + + def test_outer_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'outer') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (5, 65, -1, 100), (6, 56, 106, -1), + (6, 66, -1, 101), (7, 57, 107, -1), + (7, 67, -1, 102), (8, 58, 108, -1), + (8, 68, -1, 103), (9, 59, 109, -1), + (9, 69, -1, 104), (10, 70, -1, 105), + (11, 71, -1, 106), (12, 72, -1, 107), + (13, 73, -1, 108), (14, 74, -1, 109)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_leftouter_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'leftouter') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (6, 56, 106, -1), (7, 57, 107, -1), + (8, 58, 108, -1), (9, 59, 109, -1)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1)], + dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_different_field_order(self): + # gh-8940 + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + # this should not give a FutureWarning: + j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) + assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) + + def test_duplicate_keys(self): + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) + + @pytest.mark.xfail(reason="See comment at gh-9343") + def test_same_name_different_dtypes_key(self): + a_dtype = np.dtype([('key', 'S5'), ('value', ' 2**32 + + +def _add_keepdims(func): + """ hack in keepdims behavior into a function taking an axis """ + @functools.wraps(func) + def wrapped(a, axis, **kwargs): + res = func(a, axis=axis, **kwargs) + if axis is None: + axis = 0 # res is now a scalar, so we can insert this anywhere + return np.expand_dims(res, axis=axis) + return wrapped + + +class TestTakeAlongAxis: + def test_argequivalent(self): + """ Test it translates from arg to """ + from numpy.random import rand + a = rand(3, 4, 5) + + funcs = [ + (np.sort, np.argsort, dict()), + (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), + (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), + (np.partition, np.argpartition, dict(kth=2)), + ] + + for func, argfunc, kwargs in funcs: + for axis in list(range(a.ndim)) + [None]: + a_func = func(a, axis=axis, **kwargs) + ai_func = argfunc(a, axis=axis, **kwargs) + assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) + + def test_invalid(self): + """ Test it errors when indices has too few dimensions """ + a = np.ones((10, 10)) + ai = np.ones((10, 2), dtype=np.intp) + + # sanity check + take_along_axis(a, ai, axis=1) + + # not enough indices + assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) + # bool arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) + # float arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) + # invalid axis + assert_raises(np.AxisError, take_along_axis, a, ai, axis=10) + + def test_empty(self): + """ Test everything is ok with empty results, even with inserted dims """ + a = np.ones((3, 4, 5)) + ai = np.ones((3, 0, 5), dtype=np.intp) + + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, ai.shape) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.ones((1, 2, 5), dtype=np.intp) + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, (3, 2, 5)) + + +class TestPutAlongAxis: + def test_replace_max(self): + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + + for axis in list(range(a_base.ndim)) + [None]: + # we mutate this in the loop + a = a_base.copy() + + # replace the max with a small value + i_max = _add_keepdims(np.argmax)(a, axis=axis) + put_along_axis(a, i_max, -99, axis=axis) + + # find the new minimum, which should max + i_min = _add_keepdims(np.argmin)(a, axis=axis) + + assert_equal(i_min, i_max) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 + put_along_axis(a, ai, 20, axis=1) + assert_equal(take_along_axis(a, ai, axis=1), 20) + + +class TestApplyAlongAxis: + def test_simple(self): + a = np.ones((20, 10), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_simple101(self): + a = np.ones((10, 101), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_3d(self): + a = np.arange(27).reshape((3, 3, 3)) + assert_array_equal(apply_along_axis(np.sum, 0, a), + [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) + + def test_preserve_subclass(self): + def double(row): + return row * 2 + + class MyNDArray(np.ndarray): + pass + + m = np.array([[0, 1], [2, 3]]).view(MyNDArray) + expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) + + result = apply_along_axis(double, 0, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + result = apply_along_axis(double, 1, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + def test_subclass(self): + class MinimalSubclass(np.ndarray): + data = 1 + + def minimal_function(array): + return array.data + + a = np.zeros((6, 3)).view(MinimalSubclass) + + assert_array_equal( + apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) + ) + + def test_scalar_array(self, cls=np.ndarray): + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(np.sum, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + def test_0d_array(self, cls=np.ndarray): + def sum_to_0d(x): + """ Sum x, returning a 0d array of the same class """ + assert_equal(x.ndim, 1) + return np.squeeze(np.sum(x, keepdims=True)) + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(sum_to_0d, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + res = apply_along_axis(sum_to_0d, 1, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) + + def test_axis_insertion(self, cls=np.ndarray): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + return (x[::-1] * x[1:,None]).view(cls) + + a2d = np.arange(6*3).reshape((6, 3)) + + # 2d insertion along first axis + actual = apply_along_axis(f1to2, 0, a2d) + expected = np.stack([ + f1to2(a2d[:,i]) for i in range(a2d.shape[1]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 2d insertion along last axis + actual = apply_along_axis(f1to2, 1, a2d) + expected = np.stack([ + f1to2(a2d[i,:]) for i in range(a2d.shape[0]) + ], axis=0).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 3d insertion along middle axis + a3d = np.arange(6*5*3).reshape((6, 5, 3)) + + actual = apply_along_axis(f1to2, 1, a3d) + expected = np.stack([ + np.stack([ + f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) + ], axis=0) + for j in range(a3d.shape[2]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + def test_subclass_preservation(self): + class MinimalSubclass(np.ndarray): + pass + self.test_scalar_array(MinimalSubclass) + self.test_0d_array(MinimalSubclass) + self.test_axis_insertion(MinimalSubclass) + + def test_axis_insertion_ma(self): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + res = x[::-1] * x[1:,None] + return np.ma.masked_where(res%5==0, res) + a = np.arange(6*3).reshape((6, 3)) + res = apply_along_axis(f1to2, 0, a) + assert_(isinstance(res, np.ma.masked_array)) + assert_equal(res.ndim, 3) + assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) + assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) + assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) + + def test_tuple_func1d(self): + def sample_1d(x): + return x[1], x[0] + res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) + assert_array_equal(res, np.array([[2, 1], [4, 3]])) + + def test_empty(self): + # can't apply_along_axis when there's no chance to call the function + def never_call(x): + assert_(False) # should never be reached + + a = np.empty((0, 0)) + assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) + assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) + + # but it's sometimes ok with some non-zero dimensions + def empty_to_1(x): + assert_(len(x) == 0) + return 1 + + a = np.empty((10, 0)) + actual = np.apply_along_axis(empty_to_1, 1, a) + assert_equal(actual, np.ones(10)) + assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) + + def test_with_iterable_object(self): + # from issue 5248 + d = np.array([ + [{1, 11}, {2, 22}, {3, 33}], + [{4, 44}, {5, 55}, {6, 66}] + ]) + actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) + expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) + + assert_equal(actual, expected) + + # issue 8642 - assert_equal doesn't detect this! + for i in np.ndindex(actual.shape): + assert_equal(type(actual[i]), type(expected[i])) + + +class TestApplyOverAxes: + def test_simple(self): + a = np.arange(24).reshape(2, 3, 4) + aoa_a = apply_over_axes(np.sum, a, [0, 2]) + assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) + + +class TestExpandDims: + def test_functionality(self): + s = (2, 3, 4, 5) + a = np.empty(s) + for axis in range(-5, 4): + b = expand_dims(a, axis) + assert_(b.shape[axis] == 1) + assert_(np.squeeze(b).shape == s) + + def test_axis_tuple(self): + a = np.empty((3, 3, 3)) + assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) + assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) + assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) + assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) + + def test_axis_out_of_range(self): + s = (2, 3, 4, 5) + a = np.empty(s) + assert_raises(np.AxisError, expand_dims, a, -6) + assert_raises(np.AxisError, expand_dims, a, 5) + + a = np.empty((3, 3, 3)) + assert_raises(np.AxisError, expand_dims, a, (0, -6)) + assert_raises(np.AxisError, expand_dims, a, (0, 5)) + + def test_repeated_axis(self): + a = np.empty((3, 3, 3)) + assert_raises(ValueError, expand_dims, a, axis=(1, 1)) + + def test_subclasses(self): + a = np.arange(10).reshape((2, 5)) + a = np.ma.array(a, mask=a%3 == 0) + + expanded = np.expand_dims(a, axis=1) + assert_(isinstance(expanded, np.ma.MaskedArray)) + assert_equal(expanded.shape, (2, 1, 5)) + assert_equal(expanded.mask.shape, (2, 1, 5)) + + +class TestArraySplit: + def test_integer_0_split(self): + a = np.arange(10) + assert_raises(ValueError, array_split, a, 0) + + def test_integer_split(self): + a = np.arange(10) + res = array_split(a, 1) + desired = [np.arange(10)] + compare_results(res, desired) + + res = array_split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + res = array_split(a, 3) + desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] + compare_results(res, desired) + + res = array_split(a, 4) + desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), + np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 5) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 6) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 7) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 8) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), + np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), + np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 9) + desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), + np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), + np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 10) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 11) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10), np.array([])] + compare_results(res, desired) + + def test_integer_split_2D_rows(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=0) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + # Same thing for manual splits: + res = array_split(a, [0, 1, 2], axis=0) + tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), + np.array([np.arange(10)])] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + def test_integer_split_2D_cols(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=-1) + desired = [np.array([np.arange(4), np.arange(4)]), + np.array([np.arange(4, 7), np.arange(4, 7)]), + np.array([np.arange(7, 10), np.arange(7, 10)])] + compare_results(res, desired) + + def test_integer_split_2D_default(self): + """ This will fail if we change default axis + """ + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + # perhaps should check higher dimensions + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_integer_split_2D_rows_greater_max_int32(self): + a = np.broadcast_to([0], (1 << 32, 2)) + res = array_split(a, 4) + chunk = np.broadcast_to([0], (1 << 30, 2)) + tgt = [chunk] * 4 + for i in range(len(tgt)): + assert_equal(res[i].shape, tgt[i].shape) + + def test_index_split_simple(self): + a = np.arange(10) + indices = [1, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_low_bound(self): + a = np.arange(10) + indices = [0, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_high_bound(self): + a = np.arange(10) + indices = [0, 5, 7, 10, 12] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10), np.array([]), np.array([])] + compare_results(res, desired) + + +class TestSplit: + # The split function is essentially the same as array_split, + # except that it test if splitting will result in an + # equal split. Only test for this case. + + def test_equal_split(self): + a = np.arange(10) + res = split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + def test_unequal_split(self): + a = np.arange(10) + assert_raises(ValueError, split, a, 3) + + +class TestColumnStack: + def test_non_iterable(self): + assert_raises(TypeError, column_stack, 1) + + def test_1D_arrays(self): + # example from docstring + a = np.array((1, 2, 3)) + b = np.array((2, 3, 4)) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_2D_arrays(self): + # same as hstack 2D docstring example + a = np.array([[1], [2], [3]]) + b = np.array([[2], [3], [4]]) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_generator(self): + with assert_warns(FutureWarning): + column_stack((np.arange(3) for _ in range(2))) + + +class TestDstack: + def test_non_iterable(self): + assert_raises(TypeError, dstack, 1) + + def test_0D_array(self): + a = np.array(1) + b = np.array(2) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = np.array([1]) + b = np.array([2]) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = np.array([[1], [2]]) + b = np.array([[1], [2]]) + res = dstack([a, b]) + desired = np.array([[[1, 1]], [[2, 2, ]]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = np.array([1, 2]) + b = np.array([1, 2]) + res = dstack([a, b]) + desired = np.array([[[1, 1], [2, 2]]]) + assert_array_equal(res, desired) + + def test_generator(self): + with assert_warns(FutureWarning): + dstack((np.arange(3) for _ in range(2))) + + +# array_split has more comprehensive test of splitting. +# only do simple test on hsplit, vsplit, and dsplit +class TestHsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, hsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + try: + hsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + res = hsplit(a, 2) + desired = [np.array([1, 2]), np.array([3, 4])] + compare_results(res, desired) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = hsplit(a, 2) + desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] + compare_results(res, desired) + + +class TestVsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, vsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, vsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + try: + vsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = vsplit(a, 2) + desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] + compare_results(res, desired) + + +class TestDsplit: + # Only testing for integer splits. + def test_non_iterable(self): + assert_raises(ValueError, dsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, dsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + assert_raises(ValueError, dsplit, a, 2) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + try: + dsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_3D_array(self): + a = np.array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]) + res = dsplit(a, 2) + desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), + np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] + compare_results(res, desired) + + +class TestSqueeze: + def test_basic(self): + from numpy.random import rand + + a = rand(20, 10, 10, 1, 1) + b = rand(20, 1, 10, 1, 20) + c = rand(1, 1, 20, 10) + assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) + assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) + assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) + + # Squeezing to 0-dim should still give an ndarray + a = [[[1.5]]] + res = np.squeeze(a) + assert_equal(res, 1.5) + assert_equal(res.ndim, 0) + assert_equal(type(res), np.ndarray) + + +class TestKron: + def test_return_type(self): + class myarray(np.ndarray): + __array_priority__ = 0.0 + + a = np.ones([2, 2]) + ma = myarray(a.shape, a.dtype, a.data) + assert_equal(type(kron(a, a)), np.ndarray) + assert_equal(type(kron(ma, ma)), myarray) + assert_equal(type(kron(a, ma)), np.ndarray) + assert_equal(type(kron(ma, a)), myarray) + + +class TestTile: + def test_basic(self): + a = np.array([0, 1, 2]) + b = [[1, 2], [3, 4]] + assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) + assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) + assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) + assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) + assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], + [1, 2, 1, 2], [3, 4, 3, 4]]) + + def test_tile_one_repetition_on_array_gh4679(self): + a = np.arange(5) + b = tile(a, 1) + b += 2 + assert_equal(a, np.arange(5)) + + def test_empty(self): + a = np.array([[[]]]) + b = np.array([[], []]) + c = tile(b, 2).shape + d = tile(a, (3, 2, 5)).shape + assert_equal(c, (2, 0)) + assert_equal(d, (3, 2, 0)) + + def test_kroncompare(self): + from numpy.random import randint + + reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] + shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] + for s in shape: + b = randint(0, 10, size=s) + for r in reps: + a = np.ones(r, b.dtype) + large = tile(b, r) + klarge = kron(a, b) + assert_equal(large, klarge) + + +class TestMayShareMemory: + def test_basic(self): + d = np.ones((50, 60)) + d2 = np.ones((30, 60, 6)) + assert_(np.may_share_memory(d, d)) + assert_(np.may_share_memory(d, d[::-1])) + assert_(np.may_share_memory(d, d[::2])) + assert_(np.may_share_memory(d, d[1:, ::-1])) + + assert_(not np.may_share_memory(d[::-1], d2)) + assert_(not np.may_share_memory(d[::2], d2)) + assert_(not np.may_share_memory(d[1:, ::-1], d2)) + assert_(np.may_share_memory(d2[1:, ::-1], d2)) + + +# Utility +def compare_results(res, desired): + for i in range(len(desired)): + assert_array_equal(res[i], desired[i]) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_stride_tricks.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_stride_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..3904bc03704e6cfa8a57b284000929f3ec788e1f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_stride_tricks.py @@ -0,0 +1,645 @@ +import numpy as np +from numpy.core._rational_tests import rational +from numpy.testing import ( + assert_equal, assert_array_equal, assert_raises, assert_, + assert_raises_regex, assert_warns, + ) +from numpy.lib.stride_tricks import ( + as_strided, broadcast_arrays, _broadcast_shape, broadcast_to, + broadcast_shapes, sliding_window_view, + ) +import pytest + + +def assert_shapes_correct(input_shapes, expected_shape): + # Broadcast a list of arrays with the given input shapes and check the + # common output shape. + + inarrays = [np.zeros(s) for s in input_shapes] + outarrays = broadcast_arrays(*inarrays) + outshapes = [a.shape for a in outarrays] + expected = [expected_shape] * len(inarrays) + assert_equal(outshapes, expected) + + +def assert_incompatible_shapes_raise(input_shapes): + # Broadcast a list of arrays with the given (incompatible) input shapes + # and check that they raise a ValueError. + + inarrays = [np.zeros(s) for s in input_shapes] + assert_raises(ValueError, broadcast_arrays, *inarrays) + + +def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): + # Broadcast two shapes against each other and check that the data layout + # is the same as if a ufunc did the broadcasting. + + x0 = np.zeros(shape0, dtype=int) + # Note that multiply.reduce's identity element is 1.0, so when shape1==(), + # this gives the desired n==1. + n = int(np.multiply.reduce(shape1)) + x1 = np.arange(n).reshape(shape1) + if transposed: + x0 = x0.T + x1 = x1.T + if flipped: + x0 = x0[::-1] + x1 = x1[::-1] + # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the + # result should be exactly the same as the broadcasted view of x1. + y = x0 + x1 + b0, b1 = broadcast_arrays(x0, x1) + assert_array_equal(y, b1) + + +def test_same(): + x = np.arange(10) + y = np.arange(10) + bx, by = broadcast_arrays(x, y) + assert_array_equal(x, bx) + assert_array_equal(y, by) + +def test_broadcast_kwargs(): + # ensure that a TypeError is appropriately raised when + # np.broadcast_arrays() is called with any keyword + # argument other than 'subok' + x = np.arange(10) + y = np.arange(10) + + with assert_raises_regex(TypeError, 'got an unexpected keyword'): + broadcast_arrays(x, y, dtype='float64') + + +def test_one_off(): + x = np.array([[1, 2, 3]]) + y = np.array([[1], [2], [3]]) + bx, by = broadcast_arrays(x, y) + bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + by0 = bx0.T + assert_array_equal(bx0, bx) + assert_array_equal(by0, by) + + +def test_same_input_shapes(): + # Check that the final shape is just the input shape. + + data = [ + (), + (1,), + (3,), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), + ] + for shape in data: + input_shapes = [shape] + # Single input. + assert_shapes_correct(input_shapes, shape) + # Double input. + input_shapes2 = [shape, shape] + assert_shapes_correct(input_shapes2, shape) + # Triple input. + input_shapes3 = [shape, shape, shape] + assert_shapes_correct(input_shapes3, shape) + + +def test_two_compatible_by_ones_input_shapes(): + # Check that two different input shapes of the same length, but some have + # ones, broadcast to the correct shape. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_two_compatible_by_prepending_ones_input_shapes(): + # Check that two different input shapes (of different lengths) broadcast + # to the correct shape. + + data = [ + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_incompatible_shapes_raise_valueerror(): + # Check that a ValueError is raised for incompatible shapes. + + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + ] + for input_shapes in data: + assert_incompatible_shapes_raise(input_shapes) + # Reverse the input shapes since broadcasting should be symmetric. + assert_incompatible_shapes_raise(input_shapes[::-1]) + + +def test_same_as_ufunc(): + # Check that the data layout is the same as if a ufunc did the operation. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], + "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) + # Reverse the input shapes since broadcasting should be symmetric. + assert_same_as_ufunc(input_shapes[1], input_shapes[0]) + # Try them transposed, too. + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) + # ... and flipped for non-rank-0 inputs in order to test negative + # strides. + if () not in input_shapes: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) + + +def test_broadcast_to_succeeds(): + data = [ + [np.array(0), (0,), np.array(0)], + [np.array(0), (1,), np.zeros(1)], + [np.array(0), (3,), np.zeros(3)], + [np.ones(1), (1,), np.ones(1)], + [np.ones(1), (2,), np.ones(2)], + [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], + [np.arange(3), (3,), np.arange(3)], + [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], + [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], + # test if shape is not a tuple + [np.ones(0), 0, np.ones(0)], + [np.ones(1), 1, np.ones(1)], + [np.ones(1), 2, np.ones(2)], + # these cases with size 0 are strange, but they reproduce the behavior + # of broadcasting with ufuncs (see test_same_as_ufunc above) + [np.ones(1), (0,), np.ones(0)], + [np.ones((1, 2)), (0, 2), np.ones((0, 2))], + [np.ones((2, 1)), (2, 0), np.ones((2, 0))], + ] + for input_array, shape, expected in data: + actual = broadcast_to(input_array, shape) + assert_array_equal(expected, actual) + + +def test_broadcast_to_raises(): + data = [ + [(0,), ()], + [(1,), ()], + [(3,), ()], + [(3,), (1,)], + [(3,), (2,)], + [(3,), (4,)], + [(1, 2), (2, 1)], + [(1, 1), (1,)], + [(1,), -1], + [(1,), (-1,)], + [(1, 2), (-1, 2)], + ] + for orig_shape, target_shape in data: + arr = np.zeros(orig_shape) + assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) + + +def test_broadcast_shape(): + # tests internal _broadcast_shape + # _broadcast_shape is already exercised indirectly by broadcast_arrays + # _broadcast_shape is also exercised by the public broadcast_shapes function + assert_equal(_broadcast_shape(), ()) + assert_equal(_broadcast_shape([1, 2]), (2,)) + assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) + assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) + bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 + assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) + + +def test_broadcast_shapes_succeeds(): + # tests public broadcast_shapes + data = [ + [[], ()], + [[()], ()], + [[(7,)], (7,)], + [[(1, 2), (2,)], (1, 2)], + [[(1, 1)], (1, 1)], + [[(1, 1), (3, 4)], (3, 4)], + [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)], + [[(5, 6, 1)], (5, 6, 1)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + [[(1,), (3,)], (3,)], + [[2, (3, 2)], (3, 2)], + ] + for input_shapes, target_shape in data: + assert_equal(broadcast_shapes(*input_shapes), target_shape) + + assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2)) + assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,)) + + +def test_broadcast_shapes_raises(): + # tests public broadcast_shapes + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + [(1, 2), (3,1), (3,2), (10, 5)], + [2, (2, 3)], + ] + for input_shapes in data: + assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes)) + + bad_args = [(2,)] * 32 + [(3,)] * 32 + assert_raises(ValueError, lambda: broadcast_shapes(*bad_args)) + + +def test_as_strided(): + a = np.array([None]) + a_view = as_strided(a) + expected = np.array([None]) + assert_array_equal(a_view, np.array([None])) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + expected = np.array([1, 3]) + assert_array_equal(a_view, expected) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) + expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + assert_array_equal(a_view, expected) + + # Regression test for gh-5081 + dt = np.dtype([('num', 'i4'), ('obj', 'O')]) + a = np.empty((4,), dtype=dt) + a['num'] = np.arange(1, 5) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + expected_num = [[1, 2, 3, 4]] * 3 + expected_obj = [[None]*4]*3 + assert_equal(a_view.dtype, dt) + assert_array_equal(expected_num, a_view['num']) + assert_array_equal(expected_obj, a_view['obj']) + + # Make sure that void types without fields are kept unchanged + a = np.empty((4,), dtype='V4') + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Make sure that the only type that could fail is properly handled + dt = np.dtype({'names': [''], 'formats': ['V4']}) + a = np.empty((4,), dtype=dt) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Custom dtypes should not be lost (gh-9161) + r = [rational(i) for i in range(4)] + a = np.array(r, dtype=rational) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + assert_array_equal([r] * 3, a_view) + + +class TestSlidingWindowView: + def test_1d(self): + arr = np.arange(5) + arr_view = sliding_window_view(arr, 2) + expected = np.array([[0, 1], + [1, 2], + [2, 3], + [3, 4]]) + assert_array_equal(arr_view, expected) + + def test_2d(self): + i, j = np.ogrid[:3, :4] + arr = 10*i + j + shape = (2, 2) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1], [10, 11]], + [[1, 2], [11, 12]], + [[2, 3], [12, 13]]], + [[[10, 11], [20, 21]], + [[11, 12], [21, 22]], + [[12, 13], [22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_with_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10*i + j + arr_view = sliding_window_view(arr, 3, 0) + expected = np.array([[[0, 10, 20], + [1, 11, 21], + [2, 12, 22], + [3, 13, 23]]]) + assert_array_equal(arr_view, expected) + + def test_2d_repeated_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10*i + j + arr_view = sliding_window_view(arr, (2, 3), (1, 1)) + expected = np.array([[[[0, 1, 2], + [1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_without_axis(self): + i, j = np.ogrid[:4, :4] + arr = 10*i + j + shape = (2, 3) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1, 2], [10, 11, 12]], + [[1, 2, 3], [11, 12, 13]]], + [[[10, 11, 12], [20, 21, 22]], + [[11, 12, 13], [21, 22, 23]]], + [[[20, 21, 22], [30, 31, 32]], + [[21, 22, 23], [31, 32, 33]]]]) + assert_array_equal(arr_view, expected) + + def test_errors(self): + i, j = np.ogrid[:4, :4] + arr = 10*i + j + with pytest.raises(ValueError, match='cannot contain negative values'): + sliding_window_view(arr, (-1, 3)) + with pytest.raises( + ValueError, + match='must provide window_shape for all dimensions of `x`'): + sliding_window_view(arr, (1,)) + with pytest.raises( + ValueError, + match='Must provide matching length window_shape and axis'): + sliding_window_view(arr, (1, 3, 4), axis=(0, 1)) + with pytest.raises( + ValueError, + match='window shape cannot be larger than input array'): + sliding_window_view(arr, (5, 5)) + + def test_writeable(self): + arr = np.arange(5) + view = sliding_window_view(arr, 2, writeable=False) + assert_(not view.flags.writeable) + with pytest.raises( + ValueError, + match='assignment destination is read-only'): + view[0, 0] = 3 + view = sliding_window_view(arr, 2, writeable=True) + assert_(view.flags.writeable) + view[0, 1] = 3 + assert_array_equal(arr, np.array([0, 3, 2, 3, 4])) + + def test_subok(self): + class MyArray(np.ndarray): + pass + + arr = np.arange(5).view(MyArray) + assert_(not isinstance(sliding_window_view(arr, 2, + subok=False), + MyArray)) + assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray)) + # Default behavior + assert_(not isinstance(sliding_window_view(arr, 2), MyArray)) + + +def as_strided_writeable(): + arr = np.ones(10) + view = as_strided(arr, writeable=False) + assert_(not view.flags.writeable) + + # Check that writeable also is fine: + view = as_strided(arr, writeable=True) + assert_(view.flags.writeable) + view[...] = 3 + assert_array_equal(arr, np.full_like(arr, 3)) + + # Test that things do not break down for readonly: + arr.flags.writeable = False + view = as_strided(arr, writeable=False) + view = as_strided(arr, writeable=True) + assert_(not view.flags.writeable) + + +class VerySimpleSubClass(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, subok=True, **kwargs).view(cls) + + +class SimpleSubClass(VerySimpleSubClass): + def __new__(cls, *args, **kwargs): + self = np.array(*args, subok=True, **kwargs).view(cls) + self.info = 'simple' + return self + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + ' finalized' + + +def test_subclasses(): + # test that subclass is preserved only if subok=True + a = VerySimpleSubClass([1, 2, 3, 4]) + assert_(type(a) is VerySimpleSubClass) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + assert_(type(a_view) is np.ndarray) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is VerySimpleSubClass) + # test that if a subclass has __array_finalize__, it is used + a = SimpleSubClass([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + + # similar tests for broadcast_arrays + b = np.arange(len(a)).reshape(-1, 1) + a_view, b_view = broadcast_arrays(a, b) + assert_(type(a_view) is np.ndarray) + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + a_view, b_view = broadcast_arrays(a, b, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + + # and for broadcast_to + shape = (2, 4) + a_view = broadcast_to(a, shape) + assert_(type(a_view) is np.ndarray) + assert_(a_view.shape == shape) + a_view = broadcast_to(a, shape, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(a_view.shape == shape) + + +def test_writeable(): + # broadcast_to should return a readonly array + original = np.array([1, 2, 3]) + result = broadcast_to(original, (2, 3)) + assert_equal(result.flags.writeable, False) + assert_raises(ValueError, result.__setitem__, slice(None), 0) + + # but the result of broadcast_arrays needs to be writeable, to + # preserve backwards compatibility + for is_broadcast, results in [(False, broadcast_arrays(original,)), + (True, broadcast_arrays(0, original))]: + for result in results: + # This will change to False in a future version + if is_broadcast: + with assert_warns(FutureWarning): + assert_equal(result.flags.writeable, True) + with assert_warns(DeprecationWarning): + result[:] = 0 + # Warning not emitted, writing to the array resets it + assert_equal(result.flags.writeable, True) + else: + # No warning: + assert_equal(result.flags.writeable, True) + + for results in [broadcast_arrays(original), + broadcast_arrays(0, original)]: + for result in results: + # resets the warn_on_write DeprecationWarning + result.flags.writeable = True + # check: no warning emitted + assert_equal(result.flags.writeable, True) + result[:] = 0 + + # keep readonly input readonly + original.flags.writeable = False + _, result = broadcast_arrays(0, original) + assert_equal(result.flags.writeable, False) + + # regression test for GH6491 + shape = (2,) + strides = [0] + tricky_array = as_strided(np.array(0), shape, strides) + other = np.zeros((1,)) + first, second = broadcast_arrays(tricky_array, other) + assert_(first.shape == second.shape) + + +def test_writeable_memoryview(): + # The result of broadcast_arrays exports as a non-writeable memoryview + # because otherwise there is no good way to opt in to the new behaviour + # (i.e. you would need to set writeable to False explicitly). + # See gh-13929. + original = np.array([1, 2, 3]) + + for is_broadcast, results in [(False, broadcast_arrays(original,)), + (True, broadcast_arrays(0, original))]: + for result in results: + # This will change to False in a future version + if is_broadcast: + # memoryview(result, writable=True) will give warning but cannot + # be tested using the python API. + assert memoryview(result).readonly + else: + assert not memoryview(result).readonly + + +def test_reference_types(): + input_array = np.array('a', dtype=object) + expected = np.array(['a'] * 3, dtype=object) + actual = broadcast_to(input_array, (3,)) + assert_array_equal(expected, actual) + + actual, _ = broadcast_arrays(input_array, np.ones(3)) + assert_array_equal(expected, actual) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_twodim_base.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_twodim_base.py new file mode 100644 index 0000000000000000000000000000000000000000..9118200dac671753dfc077903602499136f8c206 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_twodim_base.py @@ -0,0 +1,532 @@ +"""Test functions for matrix module + +""" +from numpy.testing import ( + assert_equal, assert_array_equal, assert_array_max_ulp, + assert_array_almost_equal, assert_raises, assert_ + ) + +from numpy import ( + arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d, + tri, mask_indices, triu_indices, triu_indices_from, tril_indices, + tril_indices_from, vander, + ) + +import numpy as np + + +from numpy.core.tests.test_overrides import requires_array_function + + +def get_mat(n): + data = arange(n) + data = add.outer(data, data) + return data + + +class TestEye: + def test_basic(self): + assert_equal(eye(4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]])) + + assert_equal(eye(4, dtype='f'), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], 'f')) + + assert_equal(eye(3) == 1, + eye(3, dtype=bool)) + + def test_diag(self): + assert_equal(eye(4, k=1), + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, k=-1), + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_2d(self): + assert_equal(eye(4, 3), + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0]])) + + assert_equal(eye(3, 4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_diag2d(self): + assert_equal(eye(3, 4, k=2), + array([[0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, 3, k=-2), + array([[0, 0, 0], + [0, 0, 0], + [1, 0, 0], + [0, 1, 0]])) + + def test_eye_bounds(self): + assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) + assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) + assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) + assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) + assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) + assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) + assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) + + def test_strings(self): + assert_equal(eye(2, 2, dtype='S3'), + [[b'1', b''], [b'', b'1']]) + + def test_bool(self): + assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) + + def test_order(self): + mat_c = eye(4, 3, k=-1) + mat_f = eye(4, 3, k=-1, order='F') + assert_equal(mat_c, mat_f) + assert mat_c.flags.c_contiguous + assert not mat_c.flags.f_contiguous + assert not mat_f.flags.c_contiguous + assert mat_f.flags.f_contiguous + + +class TestDiag: + def test_vector(self): + vals = (100 * arange(5)).astype('l') + b = zeros((5, 5)) + for k in range(5): + b[k, k] = vals[k] + assert_equal(diag(vals), b) + b = zeros((7, 7)) + c = b.copy() + for k in range(5): + b[k, k + 2] = vals[k] + c[k + 2, k] = vals[k] + assert_equal(diag(vals, k=2), b) + assert_equal(diag(vals, k=-2), c) + + def test_matrix(self, vals=None): + if vals is None: + vals = (100 * get_mat(5) + 1).astype('l') + b = zeros((5,)) + for k in range(5): + b[k] = vals[k, k] + assert_equal(diag(vals), b) + b = b * 0 + for k in range(3): + b[k] = vals[k, k + 2] + assert_equal(diag(vals, 2), b[:3]) + for k in range(3): + b[k] = vals[k + 2, k] + assert_equal(diag(vals, -2), b[:3]) + + def test_fortran_order(self): + vals = array((100 * get_mat(5) + 1), order='F', dtype='l') + self.test_matrix(vals) + + def test_diag_bounds(self): + A = [[1, 2], [3, 4], [5, 6]] + assert_equal(diag(A, k=2), []) + assert_equal(diag(A, k=1), [2]) + assert_equal(diag(A, k=0), [1, 4]) + assert_equal(diag(A, k=-1), [3, 6]) + assert_equal(diag(A, k=-2), [5]) + assert_equal(diag(A, k=-3), []) + + def test_failure(self): + assert_raises(ValueError, diag, [[[1]]]) + + +class TestFliplr: + def test_basic(self): + assert_raises(ValueError, fliplr, ones(4)) + a = get_mat(4) + b = a[:, ::-1] + assert_equal(fliplr(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(fliplr(a), b) + + +class TestFlipud: + def test_basic(self): + a = get_mat(4) + b = a[::-1, :] + assert_equal(flipud(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(flipud(a), b) + + +class TestHistogram2d: + def test_simple(self): + x = array( + [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) + y = array( + [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) + xedges = np.linspace(0, 1, 10) + yedges = np.linspace(0, 1, 10) + H = histogram2d(x, y, (xedges, yedges))[0] + answer = array( + [[0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + assert_array_equal(H.T, answer) + H = histogram2d(x, y, xedges)[0] + assert_array_equal(H.T, answer) + H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) + assert_array_equal(H, eye(10, 10)) + assert_array_equal(xedges, np.linspace(0, 9, 11)) + assert_array_equal(yedges, np.linspace(0, 9, 11)) + + def test_asym(self): + x = array([1, 1, 2, 3, 4, 4, 4, 5]) + y = array([1, 3, 2, 0, 1, 2, 3, 4]) + H, xed, yed = histogram2d( + x, y, (6, 5), range=[[0, 6], [0, 5]], density=True) + answer = array( + [[0., 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + assert_array_almost_equal(H, answer/8., 3) + assert_array_equal(xed, np.linspace(0, 6, 7)) + assert_array_equal(yed, np.linspace(0, 5, 6)) + + def test_density(self): + x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + H, xed, yed = histogram2d( + x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True) + answer = array([[1, 1, .5], + [1, 1, .5], + [.5, .5, .25]])/9. + assert_array_almost_equal(H, answer, 3) + + def test_all_outliers(self): + r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 + H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) + assert_array_equal(H, 0) + + def test_empty(self): + a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, array([[0.]])) + + a, edge1, edge2 = histogram2d([], [], bins=4) + assert_array_max_ulp(a, np.zeros((4, 4))) + + def test_binparameter_combination(self): + x = array( + [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, + 0.59944483, 1]) + y = array( + [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, + 0.15886423, 1]) + edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1) + H, xe, ye = histogram2d(x, y, (edges, 4)) + answer = array( + [[2., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 1., 0., 0.], + [1., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1])) + H, xe, ye = histogram2d(x, y, (4, edges)) + answer = array( + [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1])) + + @requires_array_function + def test_dispatch(self): + class ShouldDispatch: + def __array_function__(self, function, types, args, kwargs): + return types, args, kwargs + + xy = [1, 2] + s_d = ShouldDispatch() + r = histogram2d(s_d, xy) + # Cannot use assert_equal since that dispatches... + assert_(r == ((ShouldDispatch,), (s_d, xy), {})) + r = histogram2d(xy, s_d) + assert_(r == ((ShouldDispatch,), (xy, s_d), {})) + r = histogram2d(xy, xy, bins=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=s_d))) + r = histogram2d(xy, xy, bins=[s_d, 5]) + assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=[s_d, 5]))) + assert_raises(Exception, histogram2d, xy, xy, bins=[s_d]) + r = histogram2d(xy, xy, weights=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d))) + + +class TestTri: + def test_dtype(self): + out = array([[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]) + assert_array_equal(tri(3), out) + assert_array_equal(tri(3, dtype=bool), out.astype(bool)) + + +def test_tril_triu_ndim2(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.ones((2, 2), dtype=dtype) + b = np.tril(a) + c = np.triu(a) + assert_array_equal(b, [[1, 0], [1, 1]]) + assert_array_equal(c, b.T) + # should return the same dtype as the original array + assert_equal(b.dtype, a.dtype) + assert_equal(c.dtype, a.dtype) + + +def test_tril_triu_ndim3(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.array([ + [[1, 1], [1, 1]], + [[1, 1], [1, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_tril_desired = np.array([ + [[1, 0], [1, 1]], + [[1, 0], [1, 0]], + [[1, 0], [0, 0]], + ], dtype=dtype) + a_triu_desired = np.array([ + [[1, 1], [0, 1]], + [[1, 1], [0, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_triu_observed = np.triu(a) + a_tril_observed = np.tril(a) + assert_array_equal(a_triu_observed, a_triu_desired) + assert_array_equal(a_tril_observed, a_tril_desired) + assert_equal(a_triu_observed.dtype, a.dtype) + assert_equal(a_tril_observed.dtype, a.dtype) + + +def test_tril_triu_with_inf(): + # Issue 4859 + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + out_tril = np.array([[1, 0, 0], + [1, 1, 0], + [np.inf, 1, 1]]) + out_triu = out_tril.T + assert_array_equal(np.triu(arr), out_triu) + assert_array_equal(np.tril(arr), out_tril) + + +def test_tril_triu_dtype(): + # Issue 4916 + # tril and triu should return the same dtype as input + for c in np.typecodes['All']: + if c == 'V': + continue + arr = np.zeros((3, 3), dtype=c) + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + # check special cases + arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], + ['2004-01-01T12:00', '2003-01-03T13:45']], + dtype='datetime64') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + arr = np.zeros((3,3), dtype='f4,f4') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + +def test_mask_indices(): + # simple test without offset + iu = mask_indices(3, np.triu) + a = np.arange(9).reshape(3, 3) + assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8])) + # Now with an offset + iu1 = mask_indices(3, np.triu, 1) + assert_array_equal(a[iu1], array([1, 2, 5])) + + +def test_tril_indices(): + # indices without and with offset + il1 = tril_indices(4) + il2 = tril_indices(4, k=2) + il3 = tril_indices(4, m=5) + il4 = tril_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # indexing: + assert_array_equal(a[il1], + array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) + assert_array_equal(b[il3], + array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) + + # And for assigning values: + a[il1] = -1 + assert_array_equal(a, + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]])) + b[il3] = -1 + assert_array_equal(b, + array([[-1, 2, 3, 4, 5], + [-1, -1, 8, 9, 10], + [-1, -1, -1, 14, 15], + [-1, -1, -1, -1, 20]])) + # These cover almost the whole array (two diagonals right of the main one): + a[il2] = -10 + assert_array_equal(a, + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]])) + b[il4] = -10 + assert_array_equal(b, + array([[-10, -10, -10, 4, 5], + [-10, -10, -10, -10, 10], + [-10, -10, -10, -10, -10], + [-10, -10, -10, -10, -10]])) + + +class TestTriuIndices: + def test_triu_indices(self): + iu1 = triu_indices(4) + iu2 = triu_indices(4, k=2) + iu3 = triu_indices(4, m=5) + iu4 = triu_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # Both for indexing: + assert_array_equal(a[iu1], + array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) + assert_array_equal(b[iu3], + array([1, 2, 3, 4, 5, 7, 8, 9, + 10, 13, 14, 15, 19, 20])) + + # And for assigning values: + a[iu1] = -1 + assert_array_equal(a, + array([[-1, -1, -1, -1], + [5, -1, -1, -1], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu3] = -1 + assert_array_equal(b, + array([[-1, -1, -1, -1, -1], + [6, -1, -1, -1, -1], + [11, 12, -1, -1, -1], + [16, 17, 18, -1, -1]])) + + # These cover almost the whole array (two diagonals right of the + # main one): + a[iu2] = -10 + assert_array_equal(a, + array([[-1, -1, -10, -10], + [5, -1, -1, -10], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu4] = -10 + assert_array_equal(b, + array([[-1, -1, -10, -10, -10], + [6, -1, -1, -10, -10], + [11, 12, -1, -1, -10], + [16, 17, 18, -1, -1]])) + + +class TestTrilIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, tril_indices_from, np.ones((2,))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) + + +class TestTriuIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, triu_indices_from, np.ones((2,))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) + + +class TestVander: + def test_basic(self): + c = np.array([0, 1, -2, 3]) + v = vander(c) + powers = np.array([[0, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + [16, -8, 4, -2, 1], + [81, 27, 9, 3, 1]]) + # Check default value of N: + assert_array_equal(v, powers[:, 1:]) + # Check a range of N values, including 0 and 5 (greater than default) + m = powers.shape[1] + for n in range(6): + v = vander(c, N=n) + assert_array_equal(v, powers[:, m-n:m]) + + def test_dtypes(self): + c = array([11, -12, 13], dtype=np.int8) + v = vander(c) + expected = np.array([[121, 11, 1], + [144, -12, 1], + [169, 13, 1]]) + assert_array_equal(v, expected) + + c = array([1.0+1j, 1.0-1j]) + v = vander(c, N=3) + expected = np.array([[2j, 1+1j, 1], + [-2j, 1-1j, 1]]) + # The data is floating point, but the values are small integers, + # so assert_array_equal *should* be safe here (rather than, say, + # assert_array_almost_equal). + assert_array_equal(v, expected) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_type_check.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_type_check.py new file mode 100644 index 0000000000000000000000000000000000000000..10218200875628bdbfc83f68eb6b246da1016d6d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_type_check.py @@ -0,0 +1,478 @@ +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises + ) +from numpy.lib.type_check import ( + common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, + nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close + ) + + +def assert_all(x): + assert_(np.all(x), x) + + +class TestCommonType: + def test_basic(self): + ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) + af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) + af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) + af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) + acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) + acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) + assert_(common_type(ai32) == np.float64) + assert_(common_type(af16) == np.float16) + assert_(common_type(af32) == np.float32) + assert_(common_type(af64) == np.float64) + assert_(common_type(acs) == np.csingle) + assert_(common_type(acd) == np.cdouble) + + +class TestMintypecode: + + def test_default_1(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype), 'd') + assert_equal(mintypecode('f'), 'f') + assert_equal(mintypecode('d'), 'd') + assert_equal(mintypecode('F'), 'F') + assert_equal(mintypecode('D'), 'D') + + def test_default_2(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype+'f'), 'f') + assert_equal(mintypecode(itype+'d'), 'd') + assert_equal(mintypecode(itype+'F'), 'F') + assert_equal(mintypecode(itype+'D'), 'D') + assert_equal(mintypecode('ff'), 'f') + assert_equal(mintypecode('fd'), 'd') + assert_equal(mintypecode('fF'), 'F') + assert_equal(mintypecode('fD'), 'D') + assert_equal(mintypecode('df'), 'd') + assert_equal(mintypecode('dd'), 'd') + #assert_equal(mintypecode('dF',savespace=1),'F') + assert_equal(mintypecode('dF'), 'D') + assert_equal(mintypecode('dD'), 'D') + assert_equal(mintypecode('Ff'), 'F') + #assert_equal(mintypecode('Fd',savespace=1),'F') + assert_equal(mintypecode('Fd'), 'D') + assert_equal(mintypecode('FF'), 'F') + assert_equal(mintypecode('FD'), 'D') + assert_equal(mintypecode('Df'), 'D') + assert_equal(mintypecode('Dd'), 'D') + assert_equal(mintypecode('DF'), 'D') + assert_equal(mintypecode('DD'), 'D') + + def test_default_3(self): + assert_equal(mintypecode('fdF'), 'D') + #assert_equal(mintypecode('fdF',savespace=1),'F') + assert_equal(mintypecode('fdD'), 'D') + assert_equal(mintypecode('fFD'), 'D') + assert_equal(mintypecode('dFD'), 'D') + + assert_equal(mintypecode('ifd'), 'd') + assert_equal(mintypecode('ifF'), 'F') + assert_equal(mintypecode('ifD'), 'D') + assert_equal(mintypecode('idF'), 'D') + #assert_equal(mintypecode('idF',savespace=1),'F') + assert_equal(mintypecode('idD'), 'D') + + +class TestIsscalar: + + def test_basic(self): + assert_(np.isscalar(3)) + assert_(not np.isscalar([3])) + assert_(not np.isscalar((3,))) + assert_(np.isscalar(3j)) + assert_(np.isscalar(4.0)) + + +class TestReal: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(y, np.real(y)) + + y = np.array(1) + out = np.real(y) + assert_array_equal(y, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.real(y) + assert_equal(y, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.real, np.real(y)) + + y = np.array(1 + 1j) + out = np.real(y) + assert_array_equal(y.real, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.real(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestImag: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(0, np.imag(y)) + + y = np.array(1) + out = np.imag(y) + assert_array_equal(0, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.imag(y) + assert_equal(0, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.imag, np.imag(y)) + + y = np.array(1 + 1j) + out = np.imag(y) + assert_array_equal(y.imag, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.imag(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestIscomplex: + + def test_fail(self): + z = np.array([-1, 0, 1]) + res = iscomplex(z) + assert_(not np.sometrue(res, axis=0)) + + def test_pass(self): + z = np.array([-1j, 1, 0]) + res = iscomplex(z) + assert_array_equal(res, [1, 0, 0]) + + +class TestIsreal: + + def test_pass(self): + z = np.array([-1, 0, 1j]) + res = isreal(z) + assert_array_equal(res, [1, 1, 0]) + + def test_fail(self): + z = np.array([-1j, 1, 0]) + res = isreal(z) + assert_array_equal(res, [0, 1, 1]) + + +class TestIscomplexobj: + + def test_basic(self): + z = np.array([-1, 0, 1]) + assert_(not iscomplexobj(z)) + z = np.array([-1j, 0, -1]) + assert_(iscomplexobj(z)) + + def test_scalar(self): + assert_(not iscomplexobj(1.0)) + assert_(iscomplexobj(1+0j)) + + def test_list(self): + assert_(iscomplexobj([3, 1+0j, True])) + assert_(not iscomplexobj([3, 1, True])) + + def test_duck(self): + class DummyComplexArray: + @property + def dtype(self): + return np.dtype(complex) + dummy = DummyComplexArray() + assert_(iscomplexobj(dummy)) + + def test_pandas_duck(self): + # This tests a custom np.dtype duck-typed class, such as used by pandas + # (pandas.core.dtypes) + class PdComplex(np.complex128): + pass + class PdDtype: + name = 'category' + names = None + type = PdComplex + kind = 'c' + str = ' 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same tests but with nan, posinf and neginf keywords + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1))/0., + nan=10, posinf=20, neginf=30) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1))/0. + result = nan_to_num(vals, copy=False) + + assert_(result is vals) + assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) + assert_(vals[1] == 0) + assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1))/0. + result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) + + assert_(result is vals) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + def test_array(self): + vals = nan_to_num([1]) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + vals = nan_to_num([1], nan=10, posinf=20, neginf=30) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + + def test_integer(self): + vals = nan_to_num(1) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + vals = nan_to_num(1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + + def test_float(self): + vals = nan_to_num(1.0) + assert_all(vals == 1.0) + assert_equal(type(vals), np.float_) + vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1.1) + assert_equal(type(vals), np.float_) + + def test_complex_good(self): + vals = nan_to_num(1+1j) + assert_all(vals == 1+1j) + assert_equal(type(vals), np.complex_) + vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30) + assert_all(vals == 1+1j) + assert_equal(type(vals), np.complex_) + + def test_complex_bad(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(0+1.j)/0. + vals = nan_to_num(v) + # !! This is actually (unexpectedly) zero + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex_) + + def test_complex_bad2(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(-1+1.j)/0. + vals = nan_to_num(v) + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex_) + # Fixme + #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) + # !! This is actually (unexpectedly) positive + # !! inf. Comment out for now, and see if it + # !! changes + #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) + + def test_do_not_rewrite_previous_keyword(self): + # This is done to test that when, for instance, nan=np.inf then these + # values are not rewritten by posinf keyword to the posinf value. + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999) + assert_all(np.isfinite(vals[[0, 2]])) + assert_all(vals[0] < -1e10) + assert_equal(vals[[1, 2]], [np.inf, 999]) + assert_equal(type(vals), np.ndarray) + + +class TestRealIfClose: + + def test_basic(self): + a = np.random.rand(10) + b = real_if_close(a+1e-15j) + assert_all(isrealobj(b)) + assert_array_equal(a, b) + b = real_if_close(a+1e-7j) + assert_all(iscomplexobj(b)) + b = real_if_close(a+1e-7j, tol=1e-6) + assert_all(isrealobj(b)) + + +class TestArrayConversion: + + def test_asfarray(self): + a = asfarray(np.array([1, 2, 3])) + assert_equal(a.__class__, np.ndarray) + assert_(np.issubdtype(a.dtype, np.floating)) + + # previously this would infer dtypes from arrays, unlike every single + # other numpy function + assert_raises(TypeError, + asfarray, np.array([1, 2, 3]), dtype=np.array(1.0)) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_ufunclike.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_ufunclike.py new file mode 100644 index 0000000000000000000000000000000000000000..17bbc94f72969128aaf6a9b83db59ed1a18e9228 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_ufunclike.py @@ -0,0 +1,104 @@ +import numpy as np +import numpy.core as nx +import numpy.lib.ufunclike as ufl +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_warns, assert_raises +) + + +class TestUfunclike: + + def test_isposinf(self): + a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) + out = nx.zeros(a.shape, bool) + tgt = nx.array([True, False, False, False, False, False]) + + res = ufl.isposinf(a) + assert_equal(res, tgt) + res = ufl.isposinf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex_) + with assert_raises(TypeError): + ufl.isposinf(a) + + def test_isneginf(self): + a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) + out = nx.zeros(a.shape, bool) + tgt = nx.array([False, True, False, False, False, False]) + + res = ufl.isneginf(a) + assert_equal(res, tgt) + res = ufl.isneginf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex_) + with assert_raises(TypeError): + ufl.isneginf(a) + + def test_fix(self): + a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) + out = nx.zeros(a.shape, float) + tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) + + res = ufl.fix(a) + assert_equal(res, tgt) + res = ufl.fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(ufl.fix(3.14), 3) + + def test_fix_with_subclass(self): + class MyArray(nx.ndarray): + def __new__(cls, data, metadata=None): + res = nx.array(data, copy=True).view(cls) + res.metadata = metadata + return res + + def __array_wrap__(self, obj, context=None): + if isinstance(obj, MyArray): + obj.metadata = self.metadata + return obj + + def __array_finalize__(self, obj): + self.metadata = getattr(obj, 'metadata', None) + return self + + a = nx.array([1.1, -1.1]) + m = MyArray(a, metadata='foo') + f = ufl.fix(m) + assert_array_equal(f, nx.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + + # check 0d arrays don't decay to scalars + m0d = m[0,...] + m0d.metadata = 'bar' + f0d = ufl.fix(m0d) + assert_(isinstance(f0d, MyArray)) + assert_equal(f0d.metadata, 'bar') + + def test_deprecated(self): + # NumPy 1.13.0, 2017-04-26 + assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2)) + assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2)) + assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2)) + + def test_scalar(self): + x = np.inf + actual = np.isposinf(x) + expected = np.True_ + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + x = -3.4 + actual = np.fix(x) + expected = np.float64(-3.0) + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + out = np.array(0.0) + actual = np.fix(x, out=out) + assert_(actual is out) diff --git a/MLPY/Lib/site-packages/numpy/lib/tests/test_utils.py b/MLPY/Lib/site-packages/numpy/lib/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7282d80d235ec269a544c67a52b3b66b2d8dbc44 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/tests/test_utils.py @@ -0,0 +1,174 @@ +import inspect +import sys +import pytest + +from numpy.core import arange +from numpy.testing import assert_, assert_equal, assert_raises_regex +from numpy.lib import deprecate, deprecate_with_doc +import numpy.lib.utils as utils + +from io import StringIO + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.skipif( + sys.version_info == (3, 10, 0, "candidate", 1), + reason="Broken as of bpo-44524", +) +def test_lookfor(): + out = StringIO() + utils.lookfor('eigenvalue', module='numpy', output=out, + import_modules=False) + out = out.getvalue() + assert_('numpy.linalg.eig' in out) + + +@deprecate +def old_func(self, x): + return x + + +@deprecate(message="Rather use new_func2") +def old_func2(self, x): + return x + + +def old_func3(self, x): + return x +new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3") + + +def old_func4(self, x): + """Summary. + + Further info. + """ + return x +new_func4 = deprecate(old_func4) + + +def old_func5(self, x): + """Summary. + + Bizarre indentation. + """ + return x +new_func5 = deprecate(old_func5, message="This function is\ndeprecated.") + + +def old_func6(self, x): + """ + Also in PEP-257. + """ + return x +new_func6 = deprecate(old_func6) + + +@deprecate_with_doc(msg="Rather use new_func7") +def old_func7(self,x): + return x + + +def test_deprecate_decorator(): + assert_('deprecated' in old_func.__doc__) + + +def test_deprecate_decorator_message(): + assert_('Rather use new_func2' in old_func2.__doc__) + + +def test_deprecate_fn(): + assert_('old_func3' in new_func3.__doc__) + assert_('new_func3' in new_func3.__doc__) + + +def test_deprecate_with_doc_decorator_message(): + assert_('Rather use new_func7' in old_func7.__doc__) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") +@pytest.mark.parametrize('old_func, new_func', [ + (old_func4, new_func4), + (old_func5, new_func5), + (old_func6, new_func6), +]) +def test_deprecate_help_indentation(old_func, new_func): + _compare_docs(old_func, new_func) + # Ensure we don't mess up the indentation + for knd, func in (('old', old_func), ('new', new_func)): + for li, line in enumerate(func.__doc__.split('\n')): + if li == 0: + assert line.startswith(' ') or not line.startswith(' '), knd + elif line: + assert line.startswith(' '), knd + + +def _compare_docs(old_func, new_func): + old_doc = inspect.getdoc(old_func) + new_doc = inspect.getdoc(new_func) + index = new_doc.index('\n\n') + 2 + assert_equal(new_doc[index:], old_doc) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") +def test_deprecate_preserve_whitespace(): + assert_('\n Bizarre' in new_func5.__doc__) + + +def test_safe_eval_nameconstant(): + # Test if safe_eval supports Python 3.4 _ast.NameConstant + utils.safe_eval('None') + + +class TestByteBounds: + + def test_byte_bounds(self): + # pointer difference matches size * itemsize + # due to contiguity + a = arange(12).reshape(3, 4) + low, high = utils.byte_bounds(a) + assert_equal(high - low, a.size * a.itemsize) + + def test_unusual_order_positive_stride(self): + a = arange(12).reshape(3, 4) + b = a.T + low, high = utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_unusual_order_negative_stride(self): + a = arange(12).reshape(3, 4) + b = a.T[::-1] + low, high = utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_strided(self): + a = arange(12) + b = a[::2] + low, high = utils.byte_bounds(b) + # the largest pointer address is lost (even numbers only in the + # stride), and compensate addresses for striding by 2 + assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize) + + +def test_assert_raises_regex_context_manager(): + with assert_raises_regex(ValueError, 'no deprecation warning'): + raise ValueError('no deprecation warning') + + +def test_info_method_heading(): + # info(class) should only print "Methods:" heading if methods exist + + class NoPublicMethods: + pass + + class WithPublicMethods: + def first_method(): + pass + + def _has_method_heading(cls): + out = StringIO() + utils.info(cls, output=out) + return 'Methods:' in out.getvalue() + + assert _has_method_heading(WithPublicMethods) + assert not _has_method_heading(NoPublicMethods) diff --git a/MLPY/Lib/site-packages/numpy/lib/twodim_base.py b/MLPY/Lib/site-packages/numpy/lib/twodim_base.py new file mode 100644 index 0000000000000000000000000000000000000000..6622f778f2fb64522451ad90844a47547b9e4e73 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/twodim_base.py @@ -0,0 +1,1059 @@ +""" Basic functions for manipulating 2d arrays + +""" +import functools + +from numpy.core.numeric import ( + asanyarray, arange, zeros, greater_equal, multiply, ones, + asarray, where, int8, int16, int32, int64, intp, empty, promote_types, + diagonal, nonzero, indices + ) +from numpy.core.overrides import set_array_function_like_doc, set_module +from numpy.core import overrides +from numpy.core import iinfo +from numpy.lib.stride_tricks import broadcast_to + + +__all__ = [ + 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', + 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', + 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +i1 = iinfo(int8) +i2 = iinfo(int16) +i4 = iinfo(int32) + + +def _min_int(low, high): + """ get small int that fits the range """ + if high <= i1.max and low >= i1.min: + return int8 + if high <= i2.max and low >= i2.min: + return int16 + if high <= i4.max and low >= i4.min: + return int32 + return int64 + + +def _flip_dispatcher(m): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def fliplr(m): + """ + Reverse the order of elements along axis 1 (left/right). + + For a 2-D array, this flips the entries in each row in the left/right + direction. Columns are preserved, but appear in a different order than + before. + + Parameters + ---------- + m : array_like + Input array, must be at least 2-D. + + Returns + ------- + f : ndarray + A view of `m` with the columns reversed. Since a view + is returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + flipud : Flip array in the up/down direction. + flip : Flip array in one or more dimesions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``. + Requires the array to be at least 2-D. + + Examples + -------- + >>> A = np.diag([1.,2.,3.]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.fliplr(A) + array([[0., 0., 1.], + [0., 2., 0.], + [3., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.fliplr(A) == A[:,::-1,...]) + True + + """ + m = asanyarray(m) + if m.ndim < 2: + raise ValueError("Input must be >= 2-d.") + return m[:, ::-1] + + +@array_function_dispatch(_flip_dispatcher) +def flipud(m): + """ + Reverse the order of elements along axis 0 (up/down). + + For a 2-D array, this flips the entries in each column in the up/down + direction. Rows are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + out : array_like + A view of `m` with the rows reversed. Since a view is + returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + fliplr : Flip array in the left/right direction. + flip : Flip array in one or more dimesions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``. + Requires the array to be at least 1-D. + + Examples + -------- + >>> A = np.diag([1.0, 2, 3]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.flipud(A) + array([[0., 0., 3.], + [0., 2., 0.], + [1., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.flipud(A) == A[::-1,...]) + True + + >>> np.flipud([1,2]) + array([2, 1]) + + """ + m = asanyarray(m) + if m.ndim < 1: + raise ValueError("Input must be >= 1-d.") + return m[::-1, ...] + + +def _eye_dispatcher(N, M=None, k=None, dtype=None, order=None, *, like=None): + return (like,) + + +@set_array_function_like_doc +@set_module('numpy') +def eye(N, M=None, k=0, dtype=float, order='C', *, like=None): + """ + Return a 2-D array with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the output. + M : int, optional + Number of columns in the output. If None, defaults to `N`. + k : int, optional + Index of the diagonal: 0 (the default) refers to the main diagonal, + a positive value refers to an upper diagonal, and a negative value + to a lower diagonal. + dtype : data-type, optional + Data-type of the returned array. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + .. versionadded:: 1.14.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + I : ndarray of shape (N,M) + An array where all elements are equal to zero, except for the `k`-th + diagonal, whose values are equal to one. + + See Also + -------- + identity : (almost) equivalent function + diag : diagonal 2-D array from a 1-D array specified by the user. + + Examples + -------- + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + >>> np.eye(3, k=1) + array([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + if like is not None: + return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like) + if M is None: + M = N + m = zeros((N, M), dtype=dtype, order=order) + if k >= M: + return m + if k >= 0: + i = k + else: + i = (-k) * M + m[:M-k].flat[i::M+1] = 1 + return m + + +_eye_with_like = array_function_dispatch( + _eye_dispatcher +)(eye) + + +def _diag_dispatcher(v, k=None): + return (v,) + + +@array_function_dispatch(_diag_dispatcher) +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + See the more detailed documentation for ``numpy.diagonal`` if you use this + function to extract a diagonal and wish to write to the resulting array; + whether it returns a copy or a view depends on what version of numpy you + are using. + + Parameters + ---------- + v : array_like + If `v` is a 2-D array, return a copy of its `k`-th diagonal. + If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th + diagonal. + k : int, optional + Diagonal in question. The default is 0. Use `k>0` for diagonals + above the main diagonal, and `k<0` for diagonals below the main + diagonal. + + Returns + ------- + out : ndarray + The extracted diagonal or constructed diagonal array. + + See Also + -------- + diagonal : Return specified diagonals. + diagflat : Create a 2-D array with the flattened input as a diagonal. + trace : Sum along diagonals. + triu : Upper triangle of an array. + tril : Lower triangle of an array. + + Examples + -------- + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(x, k=1) + array([1, 5]) + >>> np.diag(x, k=-1) + array([3, 7]) + + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + v = asanyarray(v) + s = v.shape + if len(s) == 1: + n = s[0]+abs(k) + res = zeros((n, n), v.dtype) + if k >= 0: + i = k + else: + i = (-k) * n + res[:n-k].flat[i::n+1] = v + return res + elif len(s) == 2: + return diagonal(v, k) + else: + raise ValueError("Input must be 1- or 2-d.") + + +@array_function_dispatch(_diag_dispatcher) +def diagflat(v, k=0): + """ + Create a two-dimensional array with the flattened input as a diagonal. + + Parameters + ---------- + v : array_like + Input data, which is flattened and set as the `k`-th + diagonal of the output. + k : int, optional + Diagonal to set; 0, the default, corresponds to the "main" diagonal, + a positive (negative) `k` giving the number of the diagonal above + (below) the main. + + Returns + ------- + out : ndarray + The 2-D output array. + + See Also + -------- + diag : MATLAB work-alike for 1-D and 2-D arrays. + diagonal : Return specified diagonals. + trace : Sum along diagonals. + + Examples + -------- + >>> np.diagflat([[1,2], [3,4]]) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]]) + + >>> np.diagflat([1,2], 1) + array([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]]) + + """ + try: + wrap = v.__array_wrap__ + except AttributeError: + wrap = None + v = asarray(v).ravel() + s = len(v) + n = s + abs(k) + res = zeros((n, n), v.dtype) + if (k >= 0): + i = arange(0, n-k, dtype=intp) + fi = i+k+i*n + else: + i = arange(0, n+k, dtype=intp) + fi = i+(i-k)*n + res.flat[fi] = v + if not wrap: + return res + return wrap(res) + + +def _tri_dispatcher(N, M=None, k=None, dtype=None, *, like=None): + return (like,) + + +@set_array_function_like_doc +@set_module('numpy') +def tri(N, M=None, k=0, dtype=float, *, like=None): + """ + An array with ones at and below the given diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the array. + M : int, optional + Number of columns in the array. + By default, `M` is taken equal to `N`. + k : int, optional + The sub-diagonal at and below which the array is filled. + `k` = 0 is the main diagonal, while `k` < 0 is below it, + and `k` > 0 is above. The default is 0. + dtype : dtype, optional + Data type of the returned array. The default is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + tri : ndarray of shape (N, M) + Array with its lower triangle filled with ones and zero elsewhere; + in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise. + + Examples + -------- + >>> np.tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + + >>> np.tri(3, 5, -1) + array([[0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0.], + [1., 1., 0., 0., 0.]]) + + """ + if like is not None: + return _tri_with_like(N, M=M, k=k, dtype=dtype, like=like) + + if M is None: + M = N + + m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), + arange(-k, M-k, dtype=_min_int(-k, M - k))) + + # Avoid making a copy if the requested type is already bool + m = m.astype(dtype, copy=False) + + return m + + +_tri_with_like = array_function_dispatch( + _tri_dispatcher +)(tri) + + +def _trilu_dispatcher(m, k=None): + return (m,) + + +@array_function_dispatch(_trilu_dispatcher) +def tril(m, k=0): + """ + Lower triangle of an array. + + Return a copy of an array with elements above the `k`-th diagonal zeroed. + + Parameters + ---------- + m : array_like, shape (M, N) + Input array. + k : int, optional + Diagonal above which to zero elements. `k = 0` (the default) is the + main diagonal, `k < 0` is below it and `k > 0` is above. + + Returns + ------- + tril : ndarray, shape (M, N) + Lower triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + triu : same thing, only for the upper triangle + + Examples + -------- + >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k, dtype=bool) + + return where(mask, m, zeros(1, m.dtype)) + + +@array_function_dispatch(_trilu_dispatcher) +def triu(m, k=0): + """ + Upper triangle of an array. + + Return a copy of an array with the elements below the `k`-th diagonal + zeroed. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k-1, dtype=bool) + + return where(mask, zeros(1, m.dtype), m) + + +def _vander_dispatcher(x, N=None, increasing=None): + return (x,) + + +# Originally borrowed from John Hunter and matplotlib +@array_function_dispatch(_vander_dispatcher) +def vander(x, N=None, increasing=False): + """ + Generate a Vandermonde matrix. + + The columns of the output matrix are powers of the input vector. The + order of the powers is determined by the `increasing` boolean argument. + Specifically, when `increasing` is False, the `i`-th output column is + the input vector raised element-wise to the power of ``N - i - 1``. Such + a matrix with a geometric progression in each row is named for Alexandre- + Theophile Vandermonde. + + Parameters + ---------- + x : array_like + 1-D input array. + N : int, optional + Number of columns in the output. If `N` is not specified, a square + array is returned (``N = len(x)``). + increasing : bool, optional + Order of the powers of the columns. If True, the powers increase + from left to right, if False (the default) they are reversed. + + .. versionadded:: 1.9.0 + + Returns + ------- + out : ndarray + Vandermonde matrix. If `increasing` is False, the first column is + ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is + True, the columns are ``x^0, x^1, ..., x^(N-1)``. + + See Also + -------- + polynomial.polynomial.polyvander + + Examples + -------- + >>> x = np.array([1, 2, 3, 5]) + >>> N = 3 + >>> np.vander(x, N) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> np.column_stack([x**(N-1-i) for i in range(N)]) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> x = np.array([1, 2, 3, 5]) + >>> np.vander(x) + array([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> np.vander(x, increasing=True) + array([[ 1, 1, 1, 1], + [ 1, 2, 4, 8], + [ 1, 3, 9, 27], + [ 1, 5, 25, 125]]) + + The determinant of a square Vandermonde matrix is the product + of the differences between the values of the input vector: + + >>> np.linalg.det(np.vander(x)) + 48.000000000000043 # may vary + >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) + 48 + + """ + x = asarray(x) + if x.ndim != 1: + raise ValueError("x must be a one-dimensional array or sequence.") + if N is None: + N = len(x) + + v = empty((len(x), N), dtype=promote_types(x.dtype, int)) + tmp = v[:, ::-1] if not increasing else v + + if N > 0: + tmp[:, 0] = 1 + if N > 1: + tmp[:, 1:] = x[:, None] + multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) + + return v + + +def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None, + weights=None, density=None): + yield x + yield y + + # This terrible logic is adapted from the checks in histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + if N == 2: + yield from bins # bins=[x, y] + else: + yield bins + + yield weights + + +@array_function_dispatch(_histogram2d_dispatcher) +def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, + density=None): + """ + Compute the bi-dimensional histogram of two data samples. + + Parameters + ---------- + x : array_like, shape (N,) + An array containing the x coordinates of the points to be + histogrammed. + y : array_like, shape (N,) + An array containing the y coordinates of the points to be + histogrammed. + bins : int or array_like or [int, int] or [array, array], optional + The bin specification: + + * If int, the number of bins for the two dimensions (nx=ny=bins). + * If array_like, the bin edges for the two dimensions + (x_edges=y_edges=bins). + * If [int, int], the number of bins in each dimension + (nx, ny = bins). + * If [array, array], the bin edges in each dimension + (x_edges, y_edges = bins). + * A combination [int, array] or [array, int], where int + is the number of bins and array is the bin edges. + + range : array_like, shape(2,2), optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range + will be considered outliers and not tallied in the histogram. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_area``. + normed : bool, optional + An alias for the density argument that behaves identically. To avoid + confusion with the broken normed argument to `histogram`, `density` + should be preferred. + weights : array_like, shape(N,), optional + An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. + Weights are normalized to 1 if `normed` is True. If `normed` is + False, the values of the returned histogram are equal to the sum of + the weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray, shape(nx, ny) + The bi-dimensional histogram of samples `x` and `y`. Values in `x` + are histogrammed along the first dimension and values in `y` are + histogrammed along the second dimension. + xedges : ndarray, shape(nx+1,) + The bin edges along the first dimension. + yedges : ndarray, shape(ny+1,) + The bin edges along the second dimension. + + See Also + -------- + histogram : 1D histogram + histogramdd : Multidimensional histogram + + Notes + ----- + When `normed` is True, then the returned histogram is the sample + density, defined such that the sum over bins of the product + ``bin_value * bin_area`` is 1. + + Please note that the histogram does not follow the Cartesian convention + where `x` values are on the abscissa and `y` values on the ordinate + axis. Rather, `x` is histogrammed along the first dimension of the + array (vertical), and `y` along the second dimension of the array + (horizontal). This ensures compatibility with `histogramdd`. + + Examples + -------- + >>> from matplotlib.image import NonUniformImage + >>> import matplotlib.pyplot as plt + + Construct a 2-D histogram with variable bin width. First define the bin + edges: + + >>> xedges = [0, 1, 3, 5] + >>> yedges = [0, 2, 3, 4, 6] + + Next we create a histogram H with random bin content: + + >>> x = np.random.normal(2, 1, 100) + >>> y = np.random.normal(1, 1, 100) + >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) + >>> # Histogram does not follow Cartesian convention (see Notes), + >>> # therefore transpose H for visualization purposes. + >>> H = H.T + + :func:`imshow ` can only display square bins: + + >>> fig = plt.figure(figsize=(7, 3)) + >>> ax = fig.add_subplot(131, title='imshow: square bins') + >>> plt.imshow(H, interpolation='nearest', origin='lower', + ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + + + :func:`pcolormesh ` can display actual edges: + + >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', + ... aspect='equal') + >>> X, Y = np.meshgrid(xedges, yedges) + >>> ax.pcolormesh(X, Y, H) + + + :class:`NonUniformImage ` can be used to + display actual bin edges with interpolation: + + >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', + ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) + >>> im = NonUniformImage(ax, interpolation='bilinear') + >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 + >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 + >>> im.set_data(xcenters, ycenters, H) + >>> ax.images.append(im) + >>> plt.show() + + """ + from numpy import histogramdd + + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + xedges = yedges = asarray(bins) + bins = [xedges, yedges] + hist, edges = histogramdd([x, y], bins, range, normed, weights, density) + return hist, edges[0], edges[1] + + +@set_module('numpy') +def mask_indices(n, mask_func, k=0): + """ + Return the indices to access (n, n) arrays, given a masking function. + + Assume `mask_func` is a function that, for a square array a of size + ``(n, n)`` with a possible offset argument `k`, when called as + ``mask_func(a, k)`` returns a new array with zeros in certain locations + (functions like `triu` or `tril` do precisely this). Then this function + returns the indices where the non-zero values would be located. + + Parameters + ---------- + n : int + The returned indices will be valid to access arrays of shape (n, n). + mask_func : callable + A function whose call signature is similar to that of `triu`, `tril`. + That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. + `k` is an optional argument to the function. + k : scalar + An optional argument which is passed through to `mask_func`. Functions + like `triu`, `tril` take a second argument that is interpreted as an + offset. + + Returns + ------- + indices : tuple of arrays. + The `n` arrays of indices corresponding to the locations where + ``mask_func(np.ones((n, n)), k)`` is True. + + See Also + -------- + triu, tril, triu_indices, tril_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + These are the indices that would allow you to access the upper triangular + part of any 3x3 array: + + >>> iu = np.mask_indices(3, np.triu) + + For example, if `a` is a 3x3 array: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> a[iu] + array([0, 1, 2, 4, 5, 8]) + + An offset can be passed also to the masking function. This gets us the + indices starting on the first diagonal right of the main one: + + >>> iu1 = np.mask_indices(3, np.triu, 1) + + with which we now extract only three elements: + + >>> a[iu1] + array([1, 2, 5]) + + """ + m = ones((n, n), int) + a = mask_func(m, k) + return nonzero(a != 0) + + +@set_module('numpy') +def tril_indices(n, k=0, m=None): + """ + Return the indices for the lower-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The row dimension of the arrays for which the returned + indices will be valid. + k : int, optional + Diagonal offset (see `tril` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple of arrays + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. + + See also + -------- + triu_indices : similar function, for upper-triangular. + mask_indices : generic function accepting an arbitrary mask function. + tril, triu + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> il1 = np.tril_indices(4) + >>> il2 = np.tril_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[il1] + array([ 0, 4, 5, ..., 13, 14, 15]) + + And for assigning values: + + >>> a[il1] = -1 + >>> a + array([[-1, 1, 2, 3], + [-1, -1, 6, 7], + [-1, -1, -1, 11], + [-1, -1, -1, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + + >>> a[il2] = -10 + >>> a + array([[-10, -10, -10, 3], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) + + """ + tri_ = tri(n, m, k=k, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +def _trilu_indices_form_dispatcher(arr, k=None): + return (arr,) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def tril_indices_from(arr, k=0): + """ + Return the indices for the lower-triangle of arr. + + See `tril_indices` for full details. + + Parameters + ---------- + arr : array_like + The indices will be valid for square arrays whose dimensions are + the same as arr. + k : int, optional + Diagonal offset (see `tril` for details). + + See Also + -------- + tril_indices, tril + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +@set_module('numpy') +def triu_indices(n, k=0, m=None): + """ + Return the indices for the upper-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The size of the arrays for which the returned indices will + be valid. + k : int, optional + Diagonal offset (see `triu` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple, shape(2) of ndarrays, shape(`n`) + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. Can be used + to slice a ndarray of shape(`n`, `n`). + + See also + -------- + tril_indices : similar function, for lower-triangular. + mask_indices : generic function accepting an arbitrary mask function. + triu, tril + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + upper triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> iu1 = np.triu_indices(4) + >>> iu2 = np.triu_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[iu1] + array([ 0, 1, 2, ..., 10, 11, 15]) + + And for assigning values: + + >>> a[iu1] = -1 + >>> a + array([[-1, -1, -1, -1], + [ 4, -1, -1, -1], + [ 8, 9, -1, -1], + [12, 13, 14, -1]]) + + These cover only a small part of the whole array (two diagonals right + of the main one): + + >>> a[iu2] = -10 + >>> a + array([[ -1, -1, -10, -10], + [ 4, -1, -1, -10], + [ 8, 9, -1, -1], + [ 12, 13, 14, -1]]) + + """ + tri_ = ~tri(n, m, k=k - 1, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def triu_indices_from(arr, k=0): + """ + Return the indices for the upper-triangle of arr. + + See `triu_indices` for full details. + + Parameters + ---------- + arr : ndarray, shape(N, N) + The indices will be valid for square arrays. + k : int, optional + Diagonal offset (see `triu` for details). + + Returns + ------- + triu_indices_from : tuple, shape(2) of ndarray, shape(N) + Indices for the upper-triangle of `arr`. + + See Also + -------- + triu_indices, triu + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/MLPY/Lib/site-packages/numpy/lib/twodim_base.pyi b/MLPY/Lib/site-packages/numpy/lib/twodim_base.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7a88673d14998a0e9009515f5c61f2a7e70b8c23 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/twodim_base.pyi @@ -0,0 +1,32 @@ +from typing import List, Optional, Any + +from numpy import ndarray, _OrderCF +from numpy.typing import ArrayLike, DTypeLike + +__all__: List[str] + +def fliplr(m): ... +def flipud(m): ... + +def eye( + N: int, + M: Optional[int] = ..., + k: int = ..., + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + like: Optional[ArrayLike] = ... +) -> ndarray[Any, Any]: ... + +def diag(v, k=...): ... +def diagflat(v, k=...): ... +def tri(N, M=..., k=..., dtype = ..., *, like=...): ... +def tril(m, k=...): ... +def triu(m, k=...): ... +def vander(x, N=..., increasing=...): ... +def histogram2d(x, y, bins=..., range=..., normed=..., weights=..., density=...): ... +def mask_indices(n, mask_func, k=...): ... +def tril_indices(n, k=..., m=...): ... +def tril_indices_from(arr, k=...): ... +def triu_indices(n, k=..., m=...): ... +def triu_indices_from(arr, k=...): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/type_check.py b/MLPY/Lib/site-packages/numpy/lib/type_check.py new file mode 100644 index 0000000000000000000000000000000000000000..376d09123a49a7654573282a5b6b9763d5bbc339 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/type_check.py @@ -0,0 +1,769 @@ +"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py + +""" +import functools +import warnings + +__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', + 'isreal', 'nan_to_num', 'real', 'real_if_close', + 'typename', 'asfarray', 'mintypecode', 'asscalar', + 'common_type'] + +import numpy.core.numeric as _nx +from numpy.core.numeric import asarray, asanyarray, isnan, zeros +from numpy.core.overrides import set_module +from numpy.core import overrides +from .ufunclike import isneginf, isposinf + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' + + +@set_module('numpy') +def mintypecode(typechars, typeset='GDFgdf', default='d'): + """ + Return the character for the minimum-size type to which given types can + be safely cast. + + The returned type character must represent the smallest size dtype such + that an array of the returned type can handle the data from an array of + all types in `typechars` (or if `typechars` is an array, then its + dtype.char). + + Parameters + ---------- + typechars : list of str or array_like + If a list of strings, each string should represent a dtype. + If array_like, the character representation of the array dtype is used. + typeset : str or list of str, optional + The set of characters that the returned character is chosen from. + The default set is 'GDFgdf'. + default : str, optional + The default character, this is returned if none of the characters in + `typechars` matches a character in `typeset`. + + Returns + ------- + typechar : str + The character representing the minimum-size type that was found. + + See Also + -------- + dtype, sctype2char, maximum_sctype + + Examples + -------- + >>> np.mintypecode(['d', 'f', 'S']) + 'd' + >>> x = np.array([1.1, 2-3.j]) + >>> np.mintypecode(x) + 'D' + + >>> np.mintypecode('abceh', default='G') + 'G' + + """ + typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char + for t in typechars) + intersection = set(t for t in typecodes if t in typeset) + if not intersection: + return default + if 'F' in intersection and 'd' in intersection: + return 'D' + return min(intersection, key=_typecodes_by_elsize.index) + + +def _asfarray_dispatcher(a, dtype=None): + return (a,) + + +@array_function_dispatch(_asfarray_dispatcher) +def asfarray(a, dtype=_nx.float_): + """ + Return an array converted to a float type. + + Parameters + ---------- + a : array_like + The input array. + dtype : str or dtype object, optional + Float type code to coerce input array `a`. If `dtype` is one of the + 'int' dtypes, it is replaced with float64. + + Returns + ------- + out : ndarray + The input `a` as a float ndarray. + + Examples + -------- + >>> np.asfarray([2, 3]) + array([2., 3.]) + >>> np.asfarray([2, 3], dtype='float') + array([2., 3.]) + >>> np.asfarray([2, 3], dtype='int8') + array([2., 3.]) + + """ + if not _nx.issubdtype(dtype, _nx.inexact): + dtype = _nx.float_ + return asarray(a, dtype=dtype) + + +def _real_dispatcher(val): + return (val,) + + +@array_function_dispatch(_real_dispatcher) +def real(val): + """ + Return the real part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The real component of the complex argument. If `val` is real, the type + of `val` is used for the output. If `val` has complex elements, the + returned type is float. + + See Also + -------- + real_if_close, imag, angle + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.real + array([1., 3., 5.]) + >>> a.real = 9 + >>> a + array([9.+2.j, 9.+4.j, 9.+6.j]) + >>> a.real = np.array([9, 8, 7]) + >>> a + array([9.+2.j, 8.+4.j, 7.+6.j]) + >>> np.real(1 + 1j) + 1.0 + + """ + try: + return val.real + except AttributeError: + return asanyarray(val).real + + +def _imag_dispatcher(val): + return (val,) + + +@array_function_dispatch(_imag_dispatcher) +def imag(val): + """ + Return the imaginary part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The imaginary component of the complex argument. If `val` is real, + the type of `val` is used for the output. If `val` has complex + elements, the returned type is float. + + See Also + -------- + real, angle, real_if_close + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.imag + array([2., 4., 6.]) + >>> a.imag = np.array([8, 10, 12]) + >>> a + array([1. +8.j, 3.+10.j, 5.+12.j]) + >>> np.imag(1 + 1j) + 1.0 + + """ + try: + return val.imag + except AttributeError: + return asanyarray(val).imag + + +def _is_type_dispatcher(x): + return (x,) + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplex(x): + """ + Returns a bool array, where True if input element is complex. + + What is tested is whether the input has a non-zero imaginary part, not if + the input type is complex. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray of bools + Output array. + + See Also + -------- + isreal + iscomplexobj : Return True if x is a complex type or an array of complex + numbers. + + Examples + -------- + >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([ True, False, False, False, False, True]) + + """ + ax = asanyarray(x) + if issubclass(ax.dtype.type, _nx.complexfloating): + return ax.imag != 0 + res = zeros(ax.shape, bool) + return res[()] # convert to scalar if needed + + +@array_function_dispatch(_is_type_dispatcher) +def isreal(x): + """ + Returns a bool array, where True if input element is real. + + If element has complex type with zero complex part, the return value + for that element is True. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray, bool + Boolean array of same shape as `x`. + + Notes + ----- + `isreal` may behave unexpectedly for string or object arrays (see examples) + + See Also + -------- + iscomplex + isrealobj : Return True if x is not a complex type. + + Examples + -------- + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> np.isreal(a) + array([False, True, True, True, True, False]) + + The function does not work on string arrays. + + >>> a = np.array([2j, "a"], dtype="U") + >>> np.isreal(a) # Warns about non-elementwise comparison + False + + Returns True for all elements in input array of ``dtype=object`` even if + any of the elements is complex. + + >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> np.isreal(a) + array([ True, True, True]) + + isreal should not be used with object arrays + + >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> np.isreal(a) + array([ True, True]) + + """ + return imag(x) == 0 + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplexobj(x): + """ + Check for a complex type or an array of complex numbers. + + The type of the input is checked, not the value. Even if the input + has an imaginary part equal to zero, `iscomplexobj` evaluates to True. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + iscomplexobj : bool + The return value, True if `x` is of a complex type or has at least + one complex element. + + See Also + -------- + isrealobj, iscomplex + + Examples + -------- + >>> np.iscomplexobj(1) + False + >>> np.iscomplexobj(1+0j) + True + >>> np.iscomplexobj([3, 1+0j, True]) + True + + """ + try: + dtype = x.dtype + type_ = dtype.type + except AttributeError: + type_ = asarray(x).dtype.type + return issubclass(type_, _nx.complexfloating) + + +@array_function_dispatch(_is_type_dispatcher) +def isrealobj(x): + """ + Return True if x is a not complex type or an array of complex numbers. + + The type of the input is checked, not the value. So even if the input + has an imaginary part equal to zero, `isrealobj` evaluates to False + if the data type is complex. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + y : bool + The return value, False if `x` is of a complex type. + + See Also + -------- + iscomplexobj, isreal + + Notes + ----- + The function is only meant for arrays with numerical values but it + accepts all other objects. Since it assumes array input, the return + value of other objects may be True. + + >>> np.isrealobj('A string') + True + >>> np.isrealobj(False) + True + >>> np.isrealobj(None) + True + + Examples + -------- + >>> np.isrealobj(1) + True + >>> np.isrealobj(1+0j) + False + >>> np.isrealobj([3, 1+0j, True]) + False + + """ + return not iscomplexobj(x) + +#----------------------------------------------------------------------------- + +def _getmaxmin(t): + from numpy.core import getlimits + f = getlimits.finfo(t) + return f.max, f.min + + +def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): + return (x,) + + +@array_function_dispatch(_nan_to_num_dispatcher) +def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): + """ + Replace NaN with zero and infinity with large finite numbers (default + behaviour) or with the numbers defined by the user using the `nan`, + `posinf` and/or `neginf` keywords. + + If `x` is inexact, NaN is replaced by zero or by the user defined value in + `nan` keyword, infinity is replaced by the largest finite floating point + values representable by ``x.dtype`` or by the user defined value in + `posinf` keyword and -infinity is replaced by the most negative finite + floating point values representable by ``x.dtype`` or by the user defined + value in `neginf` keyword. + + For complex dtypes, the above is applied to each of the real and + imaginary components of `x` separately. + + If `x` is not inexact, then no replacements are made. + + Parameters + ---------- + x : scalar or array_like + Input data. + copy : bool, optional + Whether to create a copy of `x` (True) or to replace values + in-place (False). The in-place operation only occurs if + casting to an array does not require a copy. + Default is True. + + .. versionadded:: 1.13 + nan : int, float, optional + Value to be used to fill NaN values. If no value is passed + then NaN values will be replaced with 0.0. + + .. versionadded:: 1.17 + posinf : int, float, optional + Value to be used to fill positive infinity values. If no value is + passed then positive infinity values will be replaced with a very + large number. + + .. versionadded:: 1.17 + neginf : int, float, optional + Value to be used to fill negative infinity values. If no value is + passed then negative infinity values will be replaced with a very + small (or negative) number. + + .. versionadded:: 1.17 + + + + Returns + ------- + out : ndarray + `x`, with the non-finite values replaced. If `copy` is False, this may + be `x` itself. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity. + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + isfinite : Shows which elements are finite (not NaN, not infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + Examples + -------- + >>> np.nan_to_num(np.inf) + 1.7976931348623157e+308 + >>> np.nan_to_num(-np.inf) + -1.7976931348623157e+308 + >>> np.nan_to_num(np.nan) + 0.0 + >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) + >>> np.nan_to_num(x) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, + -1.2800000e+02, 1.2800000e+02]) + >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(y) + array([ 1.79769313e+308 +0.00000000e+000j, # may vary + 0.00000000e+000 +0.00000000e+000j, + 0.00000000e+000 +1.79769313e+308j]) + >>> np.nan_to_num(y, nan=111111, posinf=222222) + array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + """ + x = _nx.array(x, subok=True, copy=copy) + xtype = x.dtype.type + + isscalar = (x.ndim == 0) + + if not issubclass(xtype, _nx.inexact): + return x[()] if isscalar else x + + iscomplex = issubclass(xtype, _nx.complexfloating) + + dest = (x.real, x.imag) if iscomplex else (x,) + maxf, minf = _getmaxmin(x.real.dtype) + if posinf is not None: + maxf = posinf + if neginf is not None: + minf = neginf + for d in dest: + idx_nan = isnan(d) + idx_posinf = isposinf(d) + idx_neginf = isneginf(d) + _nx.copyto(d, nan, where=idx_nan) + _nx.copyto(d, maxf, where=idx_posinf) + _nx.copyto(d, minf, where=idx_neginf) + return x[()] if isscalar else x + +#----------------------------------------------------------------------------- + +def _real_if_close_dispatcher(a, tol=None): + return (a,) + + +@array_function_dispatch(_real_if_close_dispatcher) +def real_if_close(a, tol=100): + """ + If input is complex with all imaginary parts close to zero, return + real parts. + + "Close to zero" is defined as `tol` * (machine epsilon of the type for + `a`). + + Parameters + ---------- + a : array_like + Input array. + tol : float + Tolerance in machine epsilons for the complex part of the elements + in the array. + + Returns + ------- + out : ndarray + If `a` is real, the type of `a` is used for the output. If `a` + has complex elements, the returned type is float. + + See Also + -------- + real, imag, angle + + Notes + ----- + Machine epsilon varies from machine to machine and between data types + but Python floats on most platforms have a machine epsilon equal to + 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print + out the machine epsilon for floats. + + Examples + -------- + >>> np.finfo(float).eps + 2.2204460492503131e-16 # may vary + + >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) + array([2.1, 5.2]) + >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) + array([2.1+4.e-13j, 5.2 + 3e-15j]) + + """ + a = asanyarray(a) + if not issubclass(a.dtype.type, _nx.complexfloating): + return a + if tol > 1: + from numpy.core import getlimits + f = getlimits.finfo(a.dtype.type) + tol = f.eps * tol + if _nx.all(_nx.absolute(a.imag) < tol): + a = a.real + return a + + +def _asscalar_dispatcher(a): + # 2018-10-10, 1.16 + warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use ' + 'a.item() instead', DeprecationWarning, stacklevel=3) + return (a,) + + +@array_function_dispatch(_asscalar_dispatcher) +def asscalar(a): + """ + Convert an array of size 1 to its scalar equivalent. + + .. deprecated:: 1.16 + + Deprecated, use `numpy.ndarray.item()` instead. + + Parameters + ---------- + a : ndarray + Input array of size 1. + + Returns + ------- + out : scalar + Scalar representation of `a`. The output data type is the same type + returned by the input's `item` method. + + Examples + -------- + >>> np.asscalar(np.array([24])) + 24 + """ + return a.item() + +#----------------------------------------------------------------------------- + +_namefromtype = {'S1': 'character', + '?': 'bool', + 'b': 'signed char', + 'B': 'unsigned char', + 'h': 'short', + 'H': 'unsigned short', + 'i': 'integer', + 'I': 'unsigned integer', + 'l': 'long integer', + 'L': 'unsigned long integer', + 'q': 'long long integer', + 'Q': 'unsigned long long integer', + 'f': 'single precision', + 'd': 'double precision', + 'g': 'long precision', + 'F': 'complex single precision', + 'D': 'complex double precision', + 'G': 'complex long double precision', + 'S': 'string', + 'U': 'unicode', + 'V': 'void', + 'O': 'object' + } + +@set_module('numpy') +def typename(char): + """ + Return a description for the given data type code. + + Parameters + ---------- + char : str + Data type code. + + Returns + ------- + out : str + Description of the input data type code. + + See Also + -------- + dtype, typecodes + + Examples + -------- + >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', + ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] + >>> for typechar in typechars: + ... print(typechar, ' : ', np.typename(typechar)) + ... + S1 : character + ? : bool + B : unsigned char + D : complex double precision + G : complex long double precision + F : complex single precision + I : unsigned integer + H : unsigned short + L : unsigned long integer + O : object + Q : unsigned long long integer + S : string + U : unicode + V : void + b : signed char + d : double precision + g : long precision + f : single precision + i : integer + h : short + l : long integer + q : long long integer + + """ + return _namefromtype[char] + +#----------------------------------------------------------------------------- + +#determine the "minimum common type" for a group of arrays. +array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble], + [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]] +array_precision = {_nx.half: 0, + _nx.single: 1, + _nx.double: 2, + _nx.longdouble: 3, + _nx.csingle: 1, + _nx.cdouble: 2, + _nx.clongdouble: 3} + + +def _common_type_dispatcher(*arrays): + return arrays + + +@array_function_dispatch(_common_type_dispatcher) +def common_type(*arrays): + """ + Return a scalar type which is common to the input arrays. + + The return type will always be an inexact (i.e. floating point) scalar + type, even if all the arrays are integer arrays. If one of the inputs is + an integer array, the minimum precision type that is returned is a + 64-bit floating point dtype. + + All input arrays except int64 and uint64 can be safely cast to the + returned dtype without loss of information. + + Parameters + ---------- + array1, array2, ... : ndarrays + Input arrays. + + Returns + ------- + out : data type code + Data type code. + + See Also + -------- + dtype, mintypecode + + Examples + -------- + >>> np.common_type(np.arange(2, dtype=np.float32)) + + >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) + + >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) + + + """ + is_complex = False + precision = 0 + for a in arrays: + t = a.dtype.type + if iscomplexobj(a): + is_complex = True + if issubclass(t, _nx.integer): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t, None) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] diff --git a/MLPY/Lib/site-packages/numpy/lib/type_check.pyi b/MLPY/Lib/site-packages/numpy/lib/type_check.pyi new file mode 100644 index 0000000000000000000000000000000000000000..005a082e428d8d011aca78eb9196ca713963d6f6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/type_check.pyi @@ -0,0 +1,19 @@ +from typing import List + +__all__: List[str] + +def mintypecode(typechars, typeset=..., default=...): ... +def asfarray(a, dtype = ...): ... +def real(val): ... +def imag(val): ... +def iscomplex(x): ... +def isreal(x): ... +def iscomplexobj(x): ... +def isrealobj(x): ... +def nan_to_num(x, copy=..., nan=..., posinf=..., neginf=...): ... +def real_if_close(a, tol=...): ... +def typename(char): ... +def common_type(*arrays): ... + +# NOTE: Deprecated +# def asscalar(a): ... diff --git a/MLPY/Lib/site-packages/numpy/lib/ufunclike.py b/MLPY/Lib/site-packages/numpy/lib/ufunclike.py new file mode 100644 index 0000000000000000000000000000000000000000..afb6c4722870e0440ae37f008997558f20aceea9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/ufunclike.py @@ -0,0 +1,268 @@ +""" +Module of functions that are like ufuncs in acting on arrays and optionally +storing results in an output array. + +""" +__all__ = ['fix', 'isneginf', 'isposinf'] + +import numpy.core.numeric as nx +from numpy.core.overrides import ( + array_function_dispatch, ARRAY_FUNCTION_ENABLED, +) +import warnings +import functools + + +def _deprecate_out_named_y(f): + """ + Allow the out argument to be passed as the name `y` (deprecated) + + In future, this decorator should be removed. + """ + @functools.wraps(f) + def func(x, out=None, **kwargs): + if 'y' in kwargs: + if 'out' in kwargs: + raise TypeError( + "{} got multiple values for argument 'out'/'y'" + .format(f.__name__) + ) + out = kwargs.pop('y') + # NumPy 1.13.0, 2017-04-26 + warnings.warn( + "The name of the out argument to {} has changed from `y` to " + "`out`, to match other ufuncs.".format(f.__name__), + DeprecationWarning, stacklevel=3) + return f(x, out=out, **kwargs) + + return func + + +def _fix_out_named_y(f): + """ + Allow the out argument to be passed as the name `y` (deprecated) + + This decorator should only be used if _deprecate_out_named_y is used on + a corresponding dispatcher function. + """ + @functools.wraps(f) + def func(x, out=None, **kwargs): + if 'y' in kwargs: + # we already did error checking in _deprecate_out_named_y + out = kwargs.pop('y') + return f(x, out=out, **kwargs) + + return func + + +def _fix_and_maybe_deprecate_out_named_y(f): + """ + Use the appropriate decorator, depending upon if dispatching is being used. + """ + if ARRAY_FUNCTION_ENABLED: + return _fix_out_named_y(f) + else: + return _deprecate_out_named_y(f) + + +@_deprecate_out_named_y +def _dispatcher(x, out=None): + return (x, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +@_fix_and_maybe_deprecate_out_named_y +def fix(x, out=None): + """ + Round to nearest integer towards zero. + + Round an array of floats element-wise to nearest integer towards zero. + The rounded values are returned as floats. + + Parameters + ---------- + x : array_like + An array of floats to be rounded + out : ndarray, optional + A location into which the result is stored. If provided, it must have + a shape that the input broadcasts to. If not provided or None, a + freshly-allocated array is returned. + + Returns + ------- + out : ndarray of floats + A float array with the same dimensions as the input. + If second argument is not supplied then a float array is returned + with the rounded values. + + If a second argument is supplied the result is stored there. + The return value `out` is then a reference to that array. + + See Also + -------- + rint, trunc, floor, ceil + around : Round to given number of decimals + + Examples + -------- + >>> np.fix(3.14) + 3.0 + >>> np.fix(3) + 3.0 + >>> np.fix([2.1, 2.9, -2.1, -2.9]) + array([ 2., 2., -2., -2.]) + + """ + # promote back to an array if flattened + res = nx.asanyarray(nx.ceil(x, out=out)) + res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) + + # when no out argument is passed and no subclasses are involved, flatten + # scalars + if out is None and type(res) is nx.ndarray: + res = res[()] + return res + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +@_fix_and_maybe_deprecate_out_named_y +def isposinf(x, out=None): + """ + Test element-wise for positive infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a boolean array is returned + with values True where the corresponding element of the input is + positive infinity and values False where the element of the input is + not positive infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as zeros + and ones, if the type is boolean then as False and True. + The return value `out` is then a reference to that array. + + See Also + -------- + isinf, isneginf, isfinite, isnan + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values + + Examples + -------- + >>> np.isposinf(np.PINF) + True + >>> np.isposinf(np.inf) + True + >>> np.isposinf(np.NINF) + False + >>> np.isposinf([-np.inf, 0., np.inf]) + array([False, False, True]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isposinf(x, y) + array([0, 0, 1]) + >>> y + array([0, 0, 1]) + + """ + is_inf = nx.isinf(x) + try: + signbit = ~nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +@_fix_and_maybe_deprecate_out_named_y +def isneginf(x, out=None): + """ + Test element-wise for negative infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a numpy boolean array is + returned with values True where the corresponding element of the + input is negative infinity and values False where the element of + the input is not negative infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as + zeros and ones, if the type is boolean then as False and True. The + return value `out` is then a reference to that array. + + See Also + -------- + isinf, isposinf, isnan, isfinite + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values. + + Examples + -------- + >>> np.isneginf(np.NINF) + True + >>> np.isneginf(np.inf) + False + >>> np.isneginf(np.PINF) + False + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isneginf(x, y) + array([1, 0, 0]) + >>> y + array([1, 0, 0]) + + """ + is_inf = nx.isinf(x) + try: + signbit = nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) diff --git a/MLPY/Lib/site-packages/numpy/lib/ufunclike.pyi b/MLPY/Lib/site-packages/numpy/lib/ufunclike.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4c75f955369e7f076c7e2d160a77c74fc62898a3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/ufunclike.pyi @@ -0,0 +1,66 @@ +from typing import Any, overload, TypeVar, List, Union + +from numpy import floating, bool_, object_, ndarray +from numpy.typing import ( + NDArray, + _FloatLike_co, + _ArrayLikeFloat_co, + _ArrayLikeObject_co, +) + +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + +__all__: List[str] + +@overload +def fix( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> floating[Any]: ... +@overload +def fix( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def fix( + x: _ArrayLikeObject_co, + out: None = ..., +) -> NDArray[object_]: ... +@overload +def fix( + x: Union[_ArrayLikeFloat_co, _ArrayLikeObject_co], + out: _ArrayType, +) -> _ArrayType: ... + +@overload +def isposinf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> bool_: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[bool_]: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: _ArrayType, +) -> _ArrayType: ... + +@overload +def isneginf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> bool_: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[bool_]: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: _ArrayType, +) -> _ArrayType: ... diff --git a/MLPY/Lib/site-packages/numpy/lib/user_array.py b/MLPY/Lib/site-packages/numpy/lib/user_array.py new file mode 100644 index 0000000000000000000000000000000000000000..3000b5ad77a2429af9ff4d0721c389603a342ccb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/user_array.py @@ -0,0 +1,286 @@ +""" +Standard container-class for easy multiple-inheritance. + +Try to inherit from the ndarray instead of using this class as this is not +complete. + +""" +from numpy.core import ( + array, asarray, absolute, add, subtract, multiply, divide, + remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, + bitwise_xor, invert, less, less_equal, not_equal, equal, greater, + greater_equal, shape, reshape, arange, sin, sqrt, transpose +) + + +class container: + """ + container(data, dtype=None, copy=True) + + Standard container-class for easy multiple-inheritance. + + Methods + ------- + copy + tostring + byteswap + astype + + """ + def __init__(self, data, dtype=None, copy=True): + self.array = array(data, dtype, copy=copy) + + def __repr__(self): + if self.ndim > 0: + return self.__class__.__name__ + repr(self.array)[len("array"):] + else: + return self.__class__.__name__ + "(" + repr(self.array) + ")" + + def __array__(self, t=None): + if t: + return self.array.astype(t) + return self.array + + # Array as sequence + def __len__(self): + return len(self.array) + + def __getitem__(self, index): + return self._rc(self.array[index]) + + def __setitem__(self, index, value): + self.array[index] = asarray(value, self.dtype) + + def __abs__(self): + return self._rc(absolute(self.array)) + + def __neg__(self): + return self._rc(-self.array) + + def __add__(self, other): + return self._rc(self.array + asarray(other)) + + __radd__ = __add__ + + def __iadd__(self, other): + add(self.array, other, self.array) + return self + + def __sub__(self, other): + return self._rc(self.array - asarray(other)) + + def __rsub__(self, other): + return self._rc(asarray(other) - self.array) + + def __isub__(self, other): + subtract(self.array, other, self.array) + return self + + def __mul__(self, other): + return self._rc(multiply(self.array, asarray(other))) + + __rmul__ = __mul__ + + def __imul__(self, other): + multiply(self.array, other, self.array) + return self + + def __div__(self, other): + return self._rc(divide(self.array, asarray(other))) + + def __rdiv__(self, other): + return self._rc(divide(asarray(other), self.array)) + + def __idiv__(self, other): + divide(self.array, other, self.array) + return self + + def __mod__(self, other): + return self._rc(remainder(self.array, other)) + + def __rmod__(self, other): + return self._rc(remainder(other, self.array)) + + def __imod__(self, other): + remainder(self.array, other, self.array) + return self + + def __divmod__(self, other): + return (self._rc(divide(self.array, other)), + self._rc(remainder(self.array, other))) + + def __rdivmod__(self, other): + return (self._rc(divide(other, self.array)), + self._rc(remainder(other, self.array))) + + def __pow__(self, other): + return self._rc(power(self.array, asarray(other))) + + def __rpow__(self, other): + return self._rc(power(asarray(other), self.array)) + + def __ipow__(self, other): + power(self.array, other, self.array) + return self + + def __lshift__(self, other): + return self._rc(left_shift(self.array, other)) + + def __rshift__(self, other): + return self._rc(right_shift(self.array, other)) + + def __rlshift__(self, other): + return self._rc(left_shift(other, self.array)) + + def __rrshift__(self, other): + return self._rc(right_shift(other, self.array)) + + def __ilshift__(self, other): + left_shift(self.array, other, self.array) + return self + + def __irshift__(self, other): + right_shift(self.array, other, self.array) + return self + + def __and__(self, other): + return self._rc(bitwise_and(self.array, other)) + + def __rand__(self, other): + return self._rc(bitwise_and(other, self.array)) + + def __iand__(self, other): + bitwise_and(self.array, other, self.array) + return self + + def __xor__(self, other): + return self._rc(bitwise_xor(self.array, other)) + + def __rxor__(self, other): + return self._rc(bitwise_xor(other, self.array)) + + def __ixor__(self, other): + bitwise_xor(self.array, other, self.array) + return self + + def __or__(self, other): + return self._rc(bitwise_or(self.array, other)) + + def __ror__(self, other): + return self._rc(bitwise_or(other, self.array)) + + def __ior__(self, other): + bitwise_or(self.array, other, self.array) + return self + + def __pos__(self): + return self._rc(self.array) + + def __invert__(self): + return self._rc(invert(self.array)) + + def _scalarfunc(self, func): + if self.ndim == 0: + return func(self[0]) + else: + raise TypeError( + "only rank-0 arrays can be converted to Python scalars.") + + def __complex__(self): + return self._scalarfunc(complex) + + def __float__(self): + return self._scalarfunc(float) + + def __int__(self): + return self._scalarfunc(int) + + def __hex__(self): + return self._scalarfunc(hex) + + def __oct__(self): + return self._scalarfunc(oct) + + def __lt__(self, other): + return self._rc(less(self.array, other)) + + def __le__(self, other): + return self._rc(less_equal(self.array, other)) + + def __eq__(self, other): + return self._rc(equal(self.array, other)) + + def __ne__(self, other): + return self._rc(not_equal(self.array, other)) + + def __gt__(self, other): + return self._rc(greater(self.array, other)) + + def __ge__(self, other): + return self._rc(greater_equal(self.array, other)) + + def copy(self): + "" + return self._rc(self.array.copy()) + + def tostring(self): + "" + return self.array.tostring() + + def tobytes(self): + "" + return self.array.tobytes() + + def byteswap(self): + "" + return self._rc(self.array.byteswap()) + + def astype(self, typecode): + "" + return self._rc(self.array.astype(typecode)) + + def _rc(self, a): + if len(shape(a)) == 0: + return a + else: + return self.__class__(a) + + def __array_wrap__(self, *args): + return self.__class__(args[0]) + + def __setattr__(self, attr, value): + if attr == 'array': + object.__setattr__(self, attr, value) + return + try: + self.array.__setattr__(attr, value) + except AttributeError: + object.__setattr__(self, attr, value) + + # Only called after other approaches fail. + def __getattr__(self, attr): + if (attr == 'array'): + return object.__getattribute__(self, attr) + return self.array.__getattribute__(attr) + +############################################################# +# Test of class container +############################################################# +if __name__ == '__main__': + temp = reshape(arange(10000), (100, 100)) + + ua = container(temp) + # new object created begin test + print(dir(ua)) + print(shape(ua), ua.shape) # I have changed Numeric.py + + ua_small = ua[:3, :5] + print(ua_small) + # this did not change ua[0,0], which is not normal behavior + ua_small[0, 0] = 10 + print(ua_small[0, 0], ua[0, 0]) + print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) + print(less(ua_small, 103), type(less(ua_small, 103))) + print(type(ua_small * reshape(arange(15), shape(ua_small)))) + print(reshape(ua_small, (5, 3))) + print(transpose(ua_small)) diff --git a/MLPY/Lib/site-packages/numpy/lib/utils.py b/MLPY/Lib/site-packages/numpy/lib/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e49f06d66817cadb3e19c292555ab61cb7b9b2c0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/utils.py @@ -0,0 +1,1071 @@ +import os +import sys +import textwrap +import types +import re +import warnings + +from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype +from numpy.core.overrides import set_module +from numpy.core import ndarray, ufunc, asarray +import numpy as np + +__all__ = [ + 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', + 'deprecate_with_doc', 'get_include', 'info', 'source', 'who', + 'lookfor', 'byte_bounds', 'safe_eval' + ] + +def get_include(): + """ + Return the directory that contains the NumPy \\*.h header files. + + Extension modules that need to compile against NumPy should use this + function to locate the appropriate include directory. + + Notes + ----- + When using ``distutils``, for example in ``setup.py``. + :: + + import numpy as np + ... + Extension('extension_name', ... + include_dirs=[np.get_include()]) + ... + + """ + import numpy + if numpy.show_config is None: + # running from numpy source directory + d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include') + else: + # using installed numpy core headers + import numpy.core as core + d = os.path.join(os.path.dirname(core.__file__), 'include') + return d + + +def _set_function_name(func, name): + func.__name__ = name + return func + + +class _Deprecate: + """ + Decorator class to deprecate old functions. + + Refer to `deprecate` for details. + + See Also + -------- + deprecate + + """ + + def __init__(self, old_name=None, new_name=None, message=None): + self.old_name = old_name + self.new_name = new_name + self.message = message + + def __call__(self, func, *args, **kwargs): + """ + Decorator call. Refer to ``decorate``. + + """ + old_name = self.old_name + new_name = self.new_name + message = self.message + + if old_name is None: + try: + old_name = func.__name__ + except AttributeError: + old_name = func.__name__ + if new_name is None: + depdoc = "`%s` is deprecated!" % old_name + else: + depdoc = "`%s` is deprecated, use `%s` instead!" % \ + (old_name, new_name) + + if message is not None: + depdoc += "\n" + message + + def newfunc(*args,**kwds): + """`arrayrange` is deprecated, use `arange` instead!""" + warnings.warn(depdoc, DeprecationWarning, stacklevel=2) + return func(*args, **kwds) + + newfunc = _set_function_name(newfunc, old_name) + doc = func.__doc__ + if doc is None: + doc = depdoc + else: + lines = doc.expandtabs().split('\n') + indent = _get_indent(lines[1:]) + if lines[0].lstrip(): + # Indent the original first line to let inspect.cleandoc() + # dedent the docstring despite the deprecation notice. + doc = indent * ' ' + doc + else: + # Remove the same leading blank lines as cleandoc() would. + skip = len(lines[0]) + 1 + for line in lines[1:]: + if len(line) > indent: + break + skip += len(line) + 1 + doc = doc[skip:] + depdoc = textwrap.indent(depdoc, ' ' * indent) + doc = '\n\n'.join([depdoc, doc]) + newfunc.__doc__ = doc + try: + d = func.__dict__ + except AttributeError: + pass + else: + newfunc.__dict__.update(d) + return newfunc + + +def _get_indent(lines): + """ + Determines the leading whitespace that could be removed from all the lines. + """ + indent = sys.maxsize + for line in lines: + content = len(line.lstrip()) + if content: + indent = min(indent, len(line) - content) + if indent == sys.maxsize: + indent = 0 + return indent + + +def deprecate(*args, **kwargs): + """ + Issues a DeprecationWarning, adds warning to `old_name`'s + docstring, rebinds ``old_name.__name__`` and returns the new + function object. + + This function may also be used as a decorator. + + Parameters + ---------- + func : function + The function to be deprecated. + old_name : str, optional + The name of the function to be deprecated. Default is None, in + which case the name of `func` is used. + new_name : str, optional + The new name for the function. Default is None, in which case the + deprecation message is that `old_name` is deprecated. If given, the + deprecation message is that `old_name` is deprecated and `new_name` + should be used instead. + message : str, optional + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + old_func : function + The deprecated function. + + Examples + -------- + Note that ``olduint`` returns a value after printing Deprecation + Warning: + + >>> olduint = np.deprecate(np.uint) + DeprecationWarning: `uint64` is deprecated! # may vary + >>> olduint(6) + 6 + + """ + # Deprecate may be run as a function or as a decorator + # If run as a function, we initialise the decorator class + # and execute its __call__ method. + + if args: + fn = args[0] + args = args[1:] + + return _Deprecate(*args, **kwargs)(fn) + else: + return _Deprecate(*args, **kwargs) + + +def deprecate_with_doc(msg): + """ + Deprecates a function and includes the deprecation in its docstring. + + This function is used as a decorator. It returns an object that can be + used to issue a DeprecationWarning, by passing the to-be decorated + function as argument, this adds warning to the to-be decorated function's + docstring and returns the new function object. + + See Also + -------- + deprecate : Decorate a function such that it issues a `DeprecationWarning` + + Parameters + ---------- + msg : str + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + obj : object + + """ + return _Deprecate(message=msg) + + +#-------------------------------------------- +# Determine if two arrays can share memory +#-------------------------------------------- + +def byte_bounds(a): + """ + Returns pointers to the end-points of an array. + + Parameters + ---------- + a : ndarray + Input array. It must conform to the Python-side of the array + interface. + + Returns + ------- + (low, high) : tuple of 2 integers + The first integer is the first byte of the array, the second + integer is just past the last byte of the array. If `a` is not + contiguous it will not use every byte between the (`low`, `high`) + values. + + Examples + -------- + >>> I = np.eye(2, dtype='f'); I.dtype + dtype('float32') + >>> low, high = np.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + >>> I = np.eye(2); I.dtype + dtype('float64') + >>> low, high = np.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + + """ + ai = a.__array_interface__ + a_data = ai['data'][0] + astrides = ai['strides'] + ashape = ai['shape'] + bytes_a = asarray(a).dtype.itemsize + + a_low = a_high = a_data + if astrides is None: + # contiguous case + a_high += a.size * bytes_a + else: + for shape, stride in zip(ashape, astrides): + if stride < 0: + a_low += (shape-1)*stride + else: + a_high += (shape-1)*stride + a_high += bytes_a + return a_low, a_high + + +#----------------------------------------------------------------------------- +# Function for output and information on the variables used. +#----------------------------------------------------------------------------- + + +def who(vardict=None): + """ + Print the NumPy arrays in the given dictionary. + + If there is no dictionary passed in or `vardict` is None then returns + NumPy arrays in the globals() dictionary (all NumPy arrays in the + namespace). + + Parameters + ---------- + vardict : dict, optional + A dictionary possibly containing ndarrays. Default is globals(). + + Returns + ------- + out : None + Returns 'None'. + + Notes + ----- + Prints out the name, shape, bytes and type of all of the ndarrays + present in `vardict`. + + Examples + -------- + >>> a = np.arange(10) + >>> b = np.ones(20) + >>> np.who() + Name Shape Bytes Type + =========================================================== + a 10 80 int64 + b 20 160 float64 + Upper bound on total bytes = 240 + + >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', + ... 'idx':5} + >>> np.who(d) + Name Shape Bytes Type + =========================================================== + x 2 16 float64 + y 3 24 float64 + Upper bound on total bytes = 40 + + """ + if vardict is None: + frame = sys._getframe().f_back + vardict = frame.f_globals + sta = [] + cache = {} + for name in vardict.keys(): + if isinstance(vardict[name], ndarray): + var = vardict[name] + idv = id(var) + if idv in cache.keys(): + namestr = name + " (%s)" % cache[idv] + original = 0 + else: + cache[idv] = name + namestr = name + original = 1 + shapestr = " x ".join(map(str, var.shape)) + bytestr = str(var.nbytes) + sta.append([namestr, shapestr, bytestr, var.dtype.name, + original]) + + maxname = 0 + maxshape = 0 + maxbyte = 0 + totalbytes = 0 + for k in range(len(sta)): + val = sta[k] + if maxname < len(val[0]): + maxname = len(val[0]) + if maxshape < len(val[1]): + maxshape = len(val[1]) + if maxbyte < len(val[2]): + maxbyte = len(val[2]) + if val[4]: + totalbytes += int(val[2]) + + if len(sta) > 0: + sp1 = max(10, maxname) + sp2 = max(10, maxshape) + sp3 = max(10, maxbyte) + prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ') + print(prval + "\n" + "="*(len(prval)+5) + "\n") + + for k in range(len(sta)): + val = sta[k] + print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4), + val[1], ' '*(sp2-len(val[1])+5), + val[2], ' '*(sp3-len(val[2])+5), + val[3])) + print("\nUpper bound on total bytes = %d" % totalbytes) + return + +#----------------------------------------------------------------------------- + + +# NOTE: pydoc defines a help function which works similarly to this +# except it uses a pager to take over the screen. + +# combine name and arguments and split to multiple lines of width +# characters. End lines on a comma and begin argument list indented with +# the rest of the arguments. +def _split_line(name, arguments, width): + firstwidth = len(name) + k = firstwidth + newstr = name + sepstr = ", " + arglist = arguments.split(sepstr) + for argument in arglist: + if k == firstwidth: + addstr = "" + else: + addstr = sepstr + k = k + len(argument) + len(addstr) + if k > width: + k = firstwidth + 1 + len(argument) + newstr = newstr + ",\n" + " "*(firstwidth+2) + argument + else: + newstr = newstr + addstr + argument + return newstr + +_namedict = None +_dictlist = None + +# Traverse all module directories underneath globals +# to see if something is defined +def _makenamedict(module='numpy'): + module = __import__(module, globals(), locals(), []) + thedict = {module.__name__:module.__dict__} + dictlist = [module.__name__] + totraverse = [module.__dict__] + while True: + if len(totraverse) == 0: + break + thisdict = totraverse.pop(0) + for x in thisdict.keys(): + if isinstance(thisdict[x], types.ModuleType): + modname = thisdict[x].__name__ + if modname not in dictlist: + moddict = thisdict[x].__dict__ + dictlist.append(modname) + totraverse.append(moddict) + thedict[modname] = moddict + return thedict, dictlist + + +def _info(obj, output=sys.stdout): + """Provide information about ndarray obj. + + Parameters + ---------- + obj : ndarray + Must be ndarray, not checked. + output + Where printed output goes. + + Notes + ----- + Copied over from the numarray module prior to its removal. + Adapted somewhat as only numpy is an option now. + + Called by info. + + """ + extra = "" + tic = "" + bp = lambda x: x + cls = getattr(obj, '__class__', type(obj)) + nm = getattr(cls, '__name__', cls) + strides = obj.strides + endian = obj.dtype.byteorder + + print("class: ", nm, file=output) + print("shape: ", obj.shape, file=output) + print("strides: ", strides, file=output) + print("itemsize: ", obj.itemsize, file=output) + print("aligned: ", bp(obj.flags.aligned), file=output) + print("contiguous: ", bp(obj.flags.contiguous), file=output) + print("fortran: ", obj.flags.fortran, file=output) + print( + "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), + file=output + ) + print("byteorder: ", end=' ', file=output) + if endian in ['|', '=']: + print("%s%s%s" % (tic, sys.byteorder, tic), file=output) + byteswap = False + elif endian == '>': + print("%sbig%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "big" + else: + print("%slittle%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "little" + print("byteswap: ", bp(byteswap), file=output) + print("type: %s" % obj.dtype, file=output) + + +@set_module('numpy') +def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'): + """ + Get help information for a function, class, or module. + + Parameters + ---------- + object : object or str, optional + Input object or name to get information about. If `object` is a + numpy object, its docstring is given. If it is a string, available + modules are searched for matching objects. If None, information + about `info` itself is returned. + maxwidth : int, optional + Printing width. + output : file like object, optional + File like object that the output is written to, default is + ``stdout``. The object has to be opened in 'w' or 'a' mode. + toplevel : str, optional + Start search at this level. + + See Also + -------- + source, lookfor + + Notes + ----- + When used interactively with an object, ``np.info(obj)`` is equivalent + to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython + prompt. + + Examples + -------- + >>> np.info(np.polyval) # doctest: +SKIP + polyval(p, x) + Evaluate the polynomial p at x. + ... + + When using a string for `object` it is possible to get multiple results. + + >>> np.info('fft') # doctest: +SKIP + *** Found in numpy *** + Core FFT routines + ... + *** Found in numpy.fft *** + fft(a, n=None, axis=-1) + ... + *** Repeat reference found in numpy.fft.fftpack *** + *** Total of 3 references found. *** + + """ + global _namedict, _dictlist + # Local import to speed up numpy's import time. + import pydoc + import inspect + + if (hasattr(object, '_ppimport_importer') or + hasattr(object, '_ppimport_module')): + object = object._ppimport_module + elif hasattr(object, '_ppimport_attr'): + object = object._ppimport_attr + + if object is None: + info(info) + elif isinstance(object, ndarray): + _info(object, output=output) + elif isinstance(object, str): + if _namedict is None: + _namedict, _dictlist = _makenamedict(toplevel) + numfound = 0 + objlist = [] + for namestr in _dictlist: + try: + obj = _namedict[namestr][object] + if id(obj) in objlist: + print("\n " + "*** Repeat reference found in %s *** " % namestr, + file=output + ) + else: + objlist.append(id(obj)) + print(" *** Found in %s ***" % namestr, file=output) + info(obj) + print("-"*maxwidth, file=output) + numfound += 1 + except KeyError: + pass + if numfound == 0: + print("Help for %s not found." % object, file=output) + else: + print("\n " + "*** Total of %d references found. ***" % numfound, + file=output + ) + + elif inspect.isfunction(object) or inspect.ismethod(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif inspect.isclass(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc1 = inspect.getdoc(object) + if doc1 is None: + if hasattr(object, '__init__'): + print(inspect.getdoc(object.__init__), file=output) + else: + print(inspect.getdoc(object), file=output) + + methods = pydoc.allmethods(object) + + public_methods = [meth for meth in methods if meth[0] != '_'] + if public_methods: + print("\n\nMethods:\n", file=output) + for meth in public_methods: + thisobj = getattr(object, meth, None) + if thisobj is not None: + methstr, other = pydoc.splitdoc( + inspect.getdoc(thisobj) or "None" + ) + print(" %s -- %s" % (meth, methstr), file=output) + + elif hasattr(object, '__doc__'): + print(inspect.getdoc(object), file=output) + + +@set_module('numpy') +def source(object, output=sys.stdout): + """ + Print or write to a file the source code for a NumPy object. + + The source code is only returned for objects written in Python. Many + functions and classes are defined in C and will therefore not return + useful information. + + Parameters + ---------- + object : numpy object + Input object. This can be any object (function, class, module, + ...). + output : file object, optional + If `output` not supplied then source code is printed to screen + (sys.stdout). File object must be created with either write 'w' or + append 'a' modes. + + See Also + -------- + lookfor, info + + Examples + -------- + >>> np.source(np.interp) #doctest: +SKIP + In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py + def interp(x, xp, fp, left=None, right=None): + \"\"\".... (full docstring printed)\"\"\" + if isinstance(x, (float, int, number)): + return compiled_interp([x], xp, fp, left, right).item() + else: + return compiled_interp(x, xp, fp, left, right) + + The source code is only returned for objects written in Python. + + >>> np.source(np.array) #doctest: +SKIP + Not available for this object. + + """ + # Local import to speed up numpy's import time. + import inspect + try: + print("In file: %s\n" % inspect.getsourcefile(object), file=output) + print(inspect.getsource(object), file=output) + except Exception: + print("Not available for this object.", file=output) + + +# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} +# where kind: "func", "class", "module", "object" +# and index: index in breadth-first namespace traversal +_lookfor_caches = {} + +# regexp whose match indicates that the string may contain a function +# signature +_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I) + + +@set_module('numpy') +def lookfor(what, module=None, import_modules=True, regenerate=False, + output=None): + """ + Do a keyword search on docstrings. + + A list of objects that matched the search is displayed, + sorted by relevance. All given keywords need to be found in the + docstring for it to be returned as a result, but the order does + not matter. + + Parameters + ---------- + what : str + String containing words to look for. + module : str or list, optional + Name of module(s) whose docstrings to go through. + import_modules : bool, optional + Whether to import sub-modules in packages. Default is True. + regenerate : bool, optional + Whether to re-generate the docstring cache. Default is False. + output : file-like, optional + File-like object to write the output to. If omitted, use a pager. + + See Also + -------- + source, info + + Notes + ----- + Relevance is determined only roughly, by checking if the keywords occur + in the function name, at the start of a docstring, etc. + + Examples + -------- + >>> np.lookfor('binary representation') # doctest: +SKIP + Search results for 'binary representation' + ------------------------------------------ + numpy.binary_repr + Return the binary representation of the input number as a string. + numpy.core.setup_common.long_double_representation + Given a binary dump as given by GNU od -b, look for long double + numpy.base_repr + Return a string representation of a number in the given base system. + ... + + """ + import pydoc + + # Cache + cache = _lookfor_generate_cache(module, import_modules, regenerate) + + # Search + # XXX: maybe using a real stemming search engine would be better? + found = [] + whats = str(what).lower().split() + if not whats: + return + + for name, (docstring, kind, index) in cache.items(): + if kind in ('module', 'object'): + # don't show modules or objects + continue + doc = docstring.lower() + if all(w in doc for w in whats): + found.append(name) + + # Relevance sort + # XXX: this is full Harrison-Stetson heuristics now, + # XXX: it probably could be improved + + kind_relevance = {'func': 1000, 'class': 1000, + 'module': -1000, 'object': -1000} + + def relevance(name, docstr, kind, index): + r = 0 + # do the keywords occur within the start of the docstring? + first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) + r += sum([200 for w in whats if w in first_doc]) + # do the keywords occur in the function name? + r += sum([30 for w in whats if w in name]) + # is the full name long? + r += -len(name) * 5 + # is the object of bad type? + r += kind_relevance.get(kind, -1000) + # is the object deep in namespace hierarchy? + r += -name.count('.') * 10 + r += max(-index / 100, -100) + return r + + def relevance_value(a): + return relevance(a, *cache[a]) + found.sort(key=relevance_value) + + # Pretty-print + s = "Search results for '%s'" % (' '.join(whats)) + help_text = [s, "-"*len(s)] + for name in found[::-1]: + doc, kind, ix = cache[name] + + doclines = [line.strip() for line in doc.strip().split("\n") + if line.strip()] + + # find a suitable short description + try: + first_doc = doclines[0].strip() + if _function_signature_re.search(first_doc): + first_doc = doclines[1].strip() + except IndexError: + first_doc = "" + help_text.append("%s\n %s" % (name, first_doc)) + + if not found: + help_text.append("Nothing found.") + + # Output + if output is not None: + output.write("\n".join(help_text)) + elif len(help_text) > 10: + pager = pydoc.getpager() + pager("\n".join(help_text)) + else: + print("\n".join(help_text)) + +def _lookfor_generate_cache(module, import_modules, regenerate): + """ + Generate docstring cache for given module. + + Parameters + ---------- + module : str, None, module + Module for which to generate docstring cache + import_modules : bool + Whether to import sub-modules in packages. + regenerate : bool + Re-generate the docstring cache + + Returns + ------- + cache : dict {obj_full_name: (docstring, kind, index), ...} + Docstring cache for the module, either cached one (regenerate=False) + or newly generated. + + """ + # Local import to speed up numpy's import time. + import inspect + + from io import StringIO + + if module is None: + module = "numpy" + + if isinstance(module, str): + try: + __import__(module) + except ImportError: + return {} + module = sys.modules[module] + elif isinstance(module, list) or isinstance(module, tuple): + cache = {} + for mod in module: + cache.update(_lookfor_generate_cache(mod, import_modules, + regenerate)) + return cache + + if id(module) in _lookfor_caches and not regenerate: + return _lookfor_caches[id(module)] + + # walk items and collect docstrings + cache = {} + _lookfor_caches[id(module)] = cache + seen = {} + index = 0 + stack = [(module.__name__, module)] + while stack: + name, item = stack.pop(0) + if id(item) in seen: + continue + seen[id(item)] = True + + index += 1 + kind = "object" + + if inspect.ismodule(item): + kind = "module" + try: + _all = item.__all__ + except AttributeError: + _all = None + + # import sub-packages + if import_modules and hasattr(item, '__path__'): + for pth in item.__path__: + for mod_path in os.listdir(pth): + this_py = os.path.join(pth, mod_path) + init_py = os.path.join(pth, mod_path, '__init__.py') + if (os.path.isfile(this_py) and + mod_path.endswith('.py')): + to_import = mod_path[:-3] + elif os.path.isfile(init_py): + to_import = mod_path + else: + continue + if to_import == '__init__': + continue + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + try: + sys.stdout = StringIO() + sys.stderr = StringIO() + __import__("%s.%s" % (name, to_import)) + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + # Catch SystemExit, too + except BaseException: + continue + + for n, v in _getmembers(item): + try: + item_name = getattr(v, '__name__', "%s.%s" % (name, n)) + mod_name = getattr(v, '__module__', None) + except NameError: + # ref. SWIG's global cvars + # NameError: Unknown C global variable + item_name = "%s.%s" % (name, n) + mod_name = None + if '.' not in item_name and mod_name: + item_name = "%s.%s" % (mod_name, item_name) + + if not item_name.startswith(name + '.'): + # don't crawl "foreign" objects + if isinstance(v, ufunc): + # ... unless they are ufuncs + pass + else: + continue + elif not (inspect.ismodule(v) or _all is None or n in _all): + continue + stack.append(("%s.%s" % (name, n), v)) + elif inspect.isclass(item): + kind = "class" + for n, v in _getmembers(item): + stack.append(("%s.%s" % (name, n), v)) + elif hasattr(item, "__call__"): + kind = "func" + + try: + doc = inspect.getdoc(item) + except NameError: + # ref SWIG's NameError: Unknown C global variable + doc = None + if doc is not None: + cache[name] = (doc, kind, index) + + return cache + +def _getmembers(item): + import inspect + try: + members = inspect.getmembers(item) + except Exception: + members = [(x, getattr(item, x)) for x in dir(item) + if hasattr(item, x)] + return members + + +def safe_eval(source): + """ + Protected string evaluation. + + Evaluate a string containing a Python literal expression without + allowing the execution of arbitrary non-literal code. + + Parameters + ---------- + source : str + The string to evaluate. + + Returns + ------- + obj : object + The result of evaluating `source`. + + Raises + ------ + SyntaxError + If the code has invalid Python syntax, or if it contains + non-literal code. + + Examples + -------- + >>> np.safe_eval('1') + 1 + >>> np.safe_eval('[1, 2, 3]') + [1, 2, 3] + >>> np.safe_eval('{"foo": ("bar", 10.0)}') + {'foo': ('bar', 10.0)} + + >>> np.safe_eval('import os') + Traceback (most recent call last): + ... + SyntaxError: invalid syntax + + >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') + Traceback (most recent call last): + ... + ValueError: malformed node or string: <_ast.Call object at 0x...> + + """ + # Local import to speed up numpy's import time. + import ast + return ast.literal_eval(source) + + +def _median_nancheck(data, result, axis, out): + """ + Utility function to check median result from data for NaN values at the end + and return NaN in that case. Input result can also be a MaskedArray. + + Parameters + ---------- + data : array + Input data to median function + result : Array or MaskedArray + Result of median function + axis : int + Axis along which the median was computed. + out : ndarray, optional + Output array in which to place the result. + + Returns + ------- + median : scalar or ndarray + Median or NaN in axes which contained NaN in the input. + """ + if data.size == 0: + return result + n = np.isnan(data.take(-1, axis=axis)) + # masked NaN values are ok + if np.ma.isMaskedArray(n): + n = n.filled(False) + if result.ndim == 0: + if n == True: + if out is not None: + out[...] = data.dtype.type(np.nan) + result = out + else: + result = data.dtype.type(np.nan) + elif np.count_nonzero(n.ravel()) > 0: + result[n] = np.nan + return result + +def _opt_info(): + """ + Returns a string contains the supported CPU features by the current build. + + The string format can be explained as follows: + - dispatched features that are supported by the running machine + end with `*`. + - dispatched features that are "not" supported by the running machine + end with `?`. + - remained features are representing the baseline. + """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + + if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: + return '' + + enabled_features = ' '.join(__cpu_baseline__) + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + enabled_features += f" {feature}*" + else: + enabled_features += f" {feature}?" + + return enabled_features +#----------------------------------------------------------------------------- diff --git a/MLPY/Lib/site-packages/numpy/lib/utils.pyi b/MLPY/Lib/site-packages/numpy/lib/utils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d62af57df1a2ac075c9dd87e81729a95bf4db9f6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/lib/utils.pyi @@ -0,0 +1,99 @@ +import sys +from ast import AST +from typing import ( + Any, + Callable, + List, + Mapping, + Optional, + overload, + Sequence, + Tuple, + TypeVar, + Union, +) + +from numpy import ndarray, generic + +from numpy.core.numerictypes import ( + issubclass_ as issubclass_, + issubdtype as issubdtype, + issubsctype as issubsctype, +) + +if sys.version_info >= (3, 8): + from typing import Protocol +else: + from typing_extensions import Protocol + +_T_contra = TypeVar("_T_contra", contravariant=True) +_FuncType = TypeVar("_FuncType", bound=Callable[..., Any]) + +# A file-like object opened in `w` mode +class _SupportsWrite(Protocol[_T_contra]): + def write(self, __s: _T_contra) -> Any: ... + +__all__: List[str] + +class _Deprecate: + old_name: Optional[str] + new_name: Optional[str] + message: Optional[str] + def __init__( + self, + old_name: Optional[str] = ..., + new_name: Optional[str] = ..., + message: Optional[str] = ..., + ) -> None: ... + # NOTE: `__call__` can in principle take arbitrary `*args` and `**kwargs`, + # even though they aren't used for anything + def __call__(self, func: _FuncType) -> _FuncType: ... + +def get_include() -> str: ... + +@overload +def deprecate( + *, + old_name: Optional[str] = ..., + new_name: Optional[str] = ..., + message: Optional[str] = ..., +) -> _Deprecate: ... +@overload +def deprecate( + __func: _FuncType, + old_name: Optional[str] = ..., + new_name: Optional[str] = ..., + message: Optional[str] = ..., +) -> _FuncType: ... + +def deprecate_with_doc(msg: Optional[str]) -> _Deprecate: ... + +# NOTE: In practice `byte_bounds` can (potentially) take any object +# implementing the `__array_interface__` protocol. The caveat is +# that certain keys, marked as optional in the spec, must be present for +# `byte_bounds`. This concerns `"strides"` and `"data"`. +def byte_bounds(a: Union[generic, ndarray[Any, Any]]) -> Tuple[int, int]: ... + +def who(vardict: Optional[Mapping[str, ndarray[Any, Any]]] = ...) -> None: ... + +def info( + object: object = ..., + maxwidth: int = ..., + output: Optional[_SupportsWrite[str]] = ..., + toplevel: str = ..., +) -> None: ... + +def source( + object: object, + output: Optional[_SupportsWrite[str]] = ..., +) -> None: ... + +def lookfor( + what: str, + module: Union[None, str, Sequence[str]] = ..., + import_modules: bool = ..., + regenerate: bool = ..., + output: Optional[_SupportsWrite[str]] =..., +) -> None: ... + +def safe_eval(source: Union[str, AST]) -> Any: ... diff --git a/MLPY/Lib/site-packages/numpy/linalg/__init__.py b/MLPY/Lib/site-packages/numpy/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..efcbd918f9cb68609055bc30ad6d21e7e332977d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/linalg/__init__.py @@ -0,0 +1,80 @@ +""" +``numpy.linalg`` +================ + +The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient +low level implementations of standard linear algebra algorithms. Those +libraries may be provided by NumPy itself using C versions of a subset of their +reference implementations but, when possible, highly optimized libraries that +take advantage of specialized processor functionality are preferred. Examples +of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries +are multithreaded and processor dependent, environmental variables and external +packages such as threadpoolctl may be needed to control the number of threads +or specify the processor architecture. + +- OpenBLAS: https://www.openblas.net/ +- threadpoolctl: https://github.com/joblib/threadpoolctl + +Please note that the most-used linear algebra functions in NumPy are present in +the main ``numpy`` namespace rather than in ``numpy.linalg``. There are: +``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``, +``einsum_path`` and ``kron``. + +Functions present in numpy.linalg are listed below. + + +Matrix and vector products +-------------------------- + + multi_dot + matrix_power + +Decompositions +-------------- + + cholesky + qr + svd + +Matrix eigenvalues +------------------ + + eig + eigh + eigvals + eigvalsh + +Norms and other numbers +----------------------- + + norm + cond + det + matrix_rank + slogdet + +Solving equations and inverting matrices +---------------------------------------- + + solve + tensorsolve + lstsq + inv + pinv + tensorinv + +Exceptions +---------- + + LinAlgError + +""" +# To get sub-modules +from . import linalg +from .linalg import * + +__all__ = linalg.__all__.copy() + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/linalg/__init__.pyi b/MLPY/Lib/site-packages/numpy/linalg/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b83f357f20d8849ed239b388540cbcf37583ddb6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/linalg/__init__.pyi @@ -0,0 +1,26 @@ +from typing import Any, List + +__all__: List[str] + +class LinAlgError(Exception): ... + +def tensorsolve(a, b, axes=...): ... +def solve(a, b): ... +def tensorinv(a, ind=...): ... +def inv(a): ... +def matrix_power(a, n): ... +def cholesky(a): ... +def qr(a, mode=...): ... +def eigvals(a): ... +def eigvalsh(a, UPLO=...): ... +def eig(a): ... +def eigh(a, UPLO=...): ... +def svd(a, full_matrices=..., compute_uv=..., hermitian=...): ... +def cond(x, p=...): ... +def matrix_rank(M, tol=..., hermitian=...): ... +def pinv(a, rcond=..., hermitian=...): ... +def slogdet(a): ... +def det(a): ... +def lstsq(a, b, rcond=...): ... +def norm(x, ord=..., axis=..., keepdims=...): ... +def multi_dot(arrays, *, out=...): ... diff --git a/MLPY/Lib/site-packages/numpy/linalg/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/linalg/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..052567a8f58489e7d70f36e9f08ebac9f2799c33 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/__pycache__/linalg.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/linalg/__pycache__/linalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aa8544d2e837de859b3ee08d7664c0cf34cc2b3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/__pycache__/linalg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/linalg/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..424e2b6ddb41846988c945ab9cdccb164c46a07a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/_umath_linalg.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/linalg/_umath_linalg.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..923cfe023ba3447ddba1f0b0d2da58852a017b56 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/_umath_linalg.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/lapack_lite.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/linalg/lapack_lite.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9253c8f9690aae72e07d3ccb0ff537a157f58324 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/lapack_lite.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/linalg.py b/MLPY/Lib/site-packages/numpy/linalg/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..e559b12773106f46f8c0795e53024144b087cdad --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/linalg/linalg.py @@ -0,0 +1,2814 @@ +"""Lite version of scipy.linalg. + +Notes +----- +This module is a lite version of the linalg.py module in SciPy which +contains high-level Python interface to the LAPACK library. The lite +version only accesses the following LAPACK functions: dgesv, zgesv, +dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, +zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. +""" + +__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', + 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', + 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', + 'LinAlgError', 'multi_dot'] + +import functools +import operator +import warnings + +from numpy.core import ( + array, asarray, zeros, empty, empty_like, intc, single, double, + csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot, + add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite, + finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs, + atleast_2d, intp, asanyarray, object_, matmul, + swapaxes, divide, count_nonzero, isnan, sign, argsort, sort +) +from numpy.core.multiarray import normalize_axis_index +from numpy.core.overrides import set_module +from numpy.core import overrides +from numpy.lib.twodim_base import triu, eye +from numpy.linalg import lapack_lite, _umath_linalg + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.linalg') + + +fortran_int = intc + + +@set_module('numpy.linalg') +class LinAlgError(Exception): + """ + Generic Python-exception-derived object raised by linalg functions. + + General purpose exception class, derived from Python's exception.Exception + class, programmatically raised in linalg functions when a Linear + Algebra-related condition would prevent further correct execution of the + function. + + Parameters + ---------- + None + + Examples + -------- + >>> from numpy import linalg as LA + >>> LA.inv(np.zeros((2,2))) + Traceback (most recent call last): + File "", line 1, in + File "...linalg.py", line 350, + in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) + File "...linalg.py", line 249, + in solve + raise LinAlgError('Singular matrix') + numpy.linalg.LinAlgError: Singular matrix + + """ + + +def _determine_error_states(): + errobj = geterrobj() + bufsize = errobj[0] + + with errstate(invalid='call', over='ignore', + divide='ignore', under='ignore'): + invalid_call_errmask = geterrobj()[1] + + return [bufsize, invalid_call_errmask, None] + +# Dealing with errors in _umath_linalg +_linalg_error_extobj = _determine_error_states() +del _determine_error_states + +def _raise_linalgerror_singular(err, flag): + raise LinAlgError("Singular matrix") + +def _raise_linalgerror_nonposdef(err, flag): + raise LinAlgError("Matrix is not positive definite") + +def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): + raise LinAlgError("Eigenvalues did not converge") + +def _raise_linalgerror_svd_nonconvergence(err, flag): + raise LinAlgError("SVD did not converge") + +def _raise_linalgerror_lstsq(err, flag): + raise LinAlgError("SVD did not converge in Linear Least Squares") + +def get_linalg_error_extobj(callback): + extobj = list(_linalg_error_extobj) # make a copy + extobj[2] = callback + return extobj + +def _makearray(a): + new = asarray(a) + wrap = getattr(a, "__array_prepare__", new.__array_wrap__) + return new, wrap + +def isComplexType(t): + return issubclass(t, complexfloating) + +_real_types_map = {single : single, + double : double, + csingle : single, + cdouble : double} + +_complex_types_map = {single : csingle, + double : cdouble, + csingle : csingle, + cdouble : cdouble} + +def _realType(t, default=double): + return _real_types_map.get(t, default) + +def _complexType(t, default=cdouble): + return _complex_types_map.get(t, default) + +def _linalgRealType(t): + """Cast the type t to either double or cdouble.""" + return double + +def _commonType(*arrays): + # in lite version, use higher precision (always double or cdouble) + result_type = single + is_complex = False + for a in arrays: + if issubclass(a.dtype.type, inexact): + if isComplexType(a.dtype.type): + is_complex = True + rt = _realType(a.dtype.type, default=None) + if rt is None: + # unsupported inexact scalar + raise TypeError("array type %s is unsupported in linalg" % + (a.dtype.name,)) + else: + rt = double + if rt is double: + result_type = double + if is_complex: + t = cdouble + result_type = _complex_types_map[result_type] + else: + t = double + return t, result_type + + +# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are). + +_fastCT = fastCopyAndTranspose + +def _to_native_byte_order(*arrays): + ret = [] + for arr in arrays: + if arr.dtype.byteorder not in ('=', '|'): + ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) + else: + ret.append(arr) + if len(ret) == 1: + return ret[0] + else: + return ret + +def _fastCopyAndTranspose(type, *arrays): + cast_arrays = () + for a in arrays: + if a.dtype.type is not type: + a = a.astype(type) + cast_arrays = cast_arrays + (_fastCT(a),) + if len(cast_arrays) == 1: + return cast_arrays[0] + else: + return cast_arrays + +def _assert_2d(*arrays): + for a in arrays: + if a.ndim != 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'two-dimensional' % a.ndim) + +def _assert_stacked_2d(*arrays): + for a in arrays: + if a.ndim < 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + +def _assert_stacked_square(*arrays): + for a in arrays: + m, n = a.shape[-2:] + if m != n: + raise LinAlgError('Last 2 dimensions of the array must be square') + +def _assert_finite(*arrays): + for a in arrays: + if not isfinite(a).all(): + raise LinAlgError("Array must not contain infs or NaNs") + +def _is_empty_2d(arr): + # check size first for efficiency + return arr.size == 0 and product(arr.shape[-2:]) == 0 + + +def transpose(a): + """ + Transpose each matrix in a stack of matrices. + + Unlike np.transpose, this only swaps the last two axes, rather than all of + them + + Parameters + ---------- + a : (...,M,N) array_like + + Returns + ------- + aT : (...,N,M) ndarray + """ + return swapaxes(a, -1, -2) + +# Linear equations + +def _tensorsolve_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensorsolve_dispatcher) +def tensorsolve(a, b, axes=None): + """ + Solve the tensor equation ``a x = b`` for x. + + It is assumed that all indices of `x` are summed over in the product, + together with the rightmost indices of `a`, as is done in, for example, + ``tensordot(a, x, axes=b.ndim)``. + + Parameters + ---------- + a : array_like + Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals + the shape of that sub-tensor of `a` consisting of the appropriate + number of its rightmost indices, and must be such that + ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be + 'square'). + b : array_like + Right-hand tensor, which can be of any shape. + axes : tuple of ints, optional + Axes in `a` to reorder to the right, before inversion. + If None (default), no reordering is done. + + Returns + ------- + x : ndarray, shape Q + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorinv, numpy.einsum + + Examples + -------- + >>> a = np.eye(2*3*4) + >>> a.shape = (2*3, 4, 2, 3, 4) + >>> b = np.random.randn(2*3, 4) + >>> x = np.linalg.tensorsolve(a, b) + >>> x.shape + (2, 3, 4) + >>> np.allclose(np.tensordot(a, x, axes=3), b) + True + + """ + a, wrap = _makearray(a) + b = asarray(b) + an = a.ndim + + if axes is not None: + allaxes = list(range(0, an)) + for k in axes: + allaxes.remove(k) + allaxes.insert(an, k) + a = a.transpose(allaxes) + + oldshape = a.shape[-(an-b.ndim):] + prod = 1 + for k in oldshape: + prod *= k + + a = a.reshape(-1, prod) + b = b.ravel() + res = wrap(solve(a, b)) + res.shape = oldshape + return res + + +def _solve_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_solve_dispatcher) +def solve(a, b): + """ + Solve a linear matrix equation, or system of linear scalar equations. + + Computes the "exact" solution, `x`, of the well-determined, i.e., full + rank, linear matrix equation `ax = b`. + + Parameters + ---------- + a : (..., M, M) array_like + Coefficient matrix. + b : {(..., M,), (..., M, K)}, array_like + Ordinate or "dependent variable" values. + + Returns + ------- + x : {(..., M,), (..., M, K)} ndarray + Solution to the system a x = b. Returned shape is identical to `b`. + + Raises + ------ + LinAlgError + If `a` is singular or not square. + + See Also + -------- + scipy.linalg.solve : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The solutions are computed using LAPACK routine ``_gesv``. + + `a` must be square and of full-rank, i.e., all rows (or, equivalently, + columns) must be linearly independent; if either is not true, use + `lstsq` for the least-squares best "solution" of the + system/equation. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 22. + + Examples + -------- + Solve the system of equations ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``: + + >>> a = np.array([[1, 2], [3, 5]]) + >>> b = np.array([1, 2]) + >>> x = np.linalg.solve(a, b) + >>> x + array([-1., 1.]) + + Check that the solution is correct: + + >>> np.allclose(np.dot(a, x), b) + True + + """ + a, _ = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + b, wrap = _makearray(b) + t, result_t = _commonType(a, b) + + # We use the b = (..., M,) logic, only if the number of extra dimensions + # match exactly + if b.ndim == a.ndim - 1: + gufunc = _umath_linalg.solve1 + else: + gufunc = _umath_linalg.solve + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_singular) + r = gufunc(a, b, signature=signature, extobj=extobj) + + return wrap(r.astype(result_t, copy=False)) + + +def _tensorinv_dispatcher(a, ind=None): + return (a,) + + +@array_function_dispatch(_tensorinv_dispatcher) +def tensorinv(a, ind=2): + """ + Compute the 'inverse' of an N-dimensional array. + + The result is an inverse for `a` relative to the tensordot operation + ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, + ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the + tensordot operation. + + Parameters + ---------- + a : array_like + Tensor to 'invert'. Its shape must be 'square', i. e., + ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. + ind : int, optional + Number of first indices that are involved in the inverse sum. + Must be a positive integer, default is 2. + + Returns + ------- + b : ndarray + `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorsolve + + Examples + -------- + >>> a = np.eye(4*6) + >>> a.shape = (4, 6, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=2) + >>> ainv.shape + (8, 3, 4, 6) + >>> b = np.random.randn(4, 6) + >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) + True + + >>> a = np.eye(4*6) + >>> a.shape = (24, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=1) + >>> ainv.shape + (8, 3, 24) + >>> b = np.random.randn(24) + >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + True + + """ + a = asarray(a) + oldshape = a.shape + prod = 1 + if ind > 0: + invshape = oldshape[ind:] + oldshape[:ind] + for k in oldshape[ind:]: + prod *= k + else: + raise ValueError("Invalid ind argument.") + a = a.reshape(prod, -1) + ia = inv(a) + return ia.reshape(*invshape) + + +# Matrix inversion + +def _unary_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_dispatcher) +def inv(a): + """ + Compute the (multiplicative) inverse of a matrix. + + Given a square matrix `a`, return the matrix `ainv` satisfying + ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be inverted. + + Returns + ------- + ainv : (..., M, M) ndarray or matrix + (Multiplicative) inverse of the matrix `a`. + + Raises + ------ + LinAlgError + If `a` is not square or inversion fails. + + See Also + -------- + scipy.linalg.inv : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + Examples + -------- + >>> from numpy.linalg import inv + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> ainv = inv(a) + >>> np.allclose(np.dot(a, ainv), np.eye(2)) + True + >>> np.allclose(np.dot(ainv, a), np.eye(2)) + True + + If a is a matrix object, then the return value is a matrix as well: + + >>> ainv = inv(np.matrix(a)) + >>> ainv + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + + Inverses of several matrices can be computed at once: + + >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) + >>> inv(a) + array([[[-2. , 1. ], + [ 1.5 , -0.5 ]], + [[-1.25, 0.75], + [ 0.75, -0.25]]]) + + """ + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_singular) + ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) + return wrap(ainv.astype(result_t, copy=False)) + + +def _matrix_power_dispatcher(a, n): + return (a,) + + +@array_function_dispatch(_matrix_power_dispatcher) +def matrix_power(a, n): + """ + Raise a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix + squarings and matrix multiplications. If ``n == 0``, the identity matrix + of the same shape as M is returned. If ``n < 0``, the inverse + is computed and then raised to the ``abs(n)``. + + .. note:: Stacks of object matrices are not currently supported. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be "powered". + n : int + The exponent can be any integer or long integer, positive, + negative, or zero. + + Returns + ------- + a**n : (..., M, M) ndarray or matrix object + The return value is the same shape and type as `M`; + if the exponent is positive or zero then the type of the + elements is the same as those of `M`. If the exponent is + negative the elements are floating-point. + + Raises + ------ + LinAlgError + For matrices that are not square or that (for negative powers) cannot + be inverted numerically. + + Examples + -------- + >>> from numpy.linalg import matrix_power + >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit + >>> matrix_power(i, 3) # should = -i + array([[ 0, -1], + [ 1, 0]]) + >>> matrix_power(i, 0) + array([[1, 0], + [0, 1]]) + >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements + array([[ 0., 1.], + [-1., 0.]]) + + Somewhat more sophisticated example + + >>> q = np.zeros((4, 4)) + >>> q[0:2, 0:2] = -i + >>> q[2:4, 2:4] = i + >>> q # one of the three quaternion units not equal to 1 + array([[ 0., -1., 0., 0.], + [ 1., 0., 0., 0.], + [ 0., 0., 0., 1.], + [ 0., 0., -1., 0.]]) + >>> matrix_power(q, 2) # = -np.eye(4) + array([[-1., 0., 0., 0.], + [ 0., -1., 0., 0.], + [ 0., 0., -1., 0.], + [ 0., 0., 0., -1.]]) + + """ + a = asanyarray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + + try: + n = operator.index(n) + except TypeError as e: + raise TypeError("exponent must be an integer") from e + + # Fall back on dot for object arrays. Object arrays are not supported by + # the current implementation of matmul using einsum + if a.dtype != object: + fmatmul = matmul + elif a.ndim == 2: + fmatmul = dot + else: + raise NotImplementedError( + "matrix_power not supported for stacks of object arrays") + + if n == 0: + a = empty_like(a) + a[...] = eye(a.shape[-2], dtype=a.dtype) + return a + + elif n < 0: + a = inv(a) + n = abs(n) + + # short-cuts. + if n == 1: + return a + + elif n == 2: + return fmatmul(a, a) + + elif n == 3: + return fmatmul(fmatmul(a, a), a) + + # Use binary decomposition to reduce the number of matrix multiplications. + # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to + # increasing powers of 2, and multiply into the result as needed. + z = result = None + while n > 0: + z = a if z is None else fmatmul(z, z) + n, bit = divmod(n, 2) + if bit: + result = z if result is None else fmatmul(result, z) + + return result + + +# Cholesky decomposition + + +@array_function_dispatch(_unary_dispatcher) +def cholesky(a): + """ + Cholesky decomposition. + + Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, + where `L` is lower-triangular and .H is the conjugate transpose operator + (which is the ordinary transpose if `a` is real-valued). `a` must be + Hermitian (symmetric if real-valued) and positive-definite. No + checking is performed to verify whether `a` is Hermitian or not. + In addition, only the lower-triangular and diagonal elements of `a` + are used. Only `L` is actually returned. + + Parameters + ---------- + a : (..., M, M) array_like + Hermitian (symmetric if all elements are real), positive-definite + input matrix. + + Returns + ------- + L : (..., M, M) array_like + Upper or lower-triangular Cholesky factor of `a`. Returns a + matrix object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the decomposition fails, for example, if `a` is not + positive-definite. + + See Also + -------- + scipy.linalg.cholesky : Similar function in SciPy. + scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian + positive-definite matrix. + scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in + `scipy.linalg.cho_solve`. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The Cholesky decomposition is often used as a fast way of solving + + .. math:: A \\mathbf{x} = \\mathbf{b} + + (when `A` is both Hermitian/symmetric and positive-definite). + + First, we solve for :math:`\\mathbf{y}` in + + .. math:: L \\mathbf{y} = \\mathbf{b}, + + and then for :math:`\\mathbf{x}` in + + .. math:: L.H \\mathbf{x} = \\mathbf{y}. + + Examples + -------- + >>> A = np.array([[1,-2j],[2j,5]]) + >>> A + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> L = np.linalg.cholesky(A) + >>> L + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> np.dot(L, L.T.conj()) # verify that L * L.H = A + array([[1.+0.j, 0.-2.j], + [0.+2.j, 5.+0.j]]) + >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? + >>> np.linalg.cholesky(A) # an ndarray object is returned + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> # But a matrix object is returned if A is a matrix object + >>> np.linalg.cholesky(np.matrix(A)) + matrix([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + + """ + extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef) + gufunc = _umath_linalg.cholesky_lo + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = gufunc(a, signature=signature, extobj=extobj) + return wrap(r.astype(result_t, copy=False)) + + +# QR decomposition + +def _qr_dispatcher(a, mode=None): + return (a,) + + +@array_function_dispatch(_qr_dispatcher) +def qr(a, mode='reduced'): + """ + Compute the qr factorization of a matrix. + + Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is + upper-triangular. + + Parameters + ---------- + a : array_like, shape (M, N) + Matrix to be factored. + mode : {'reduced', 'complete', 'r', 'raw'}, optional + If K = min(M, N), then + + * 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) + * 'complete' : returns q, r with dimensions (M, M), (M, N) + * 'r' : returns r only with dimensions (K, N) + * 'raw' : returns h, tau with dimensions (N, M), (K,) + + The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, + see the notes for more information. The default is 'reduced', and to + maintain backward compatibility with earlier versions of numpy both + it and the old default 'full' can be omitted. Note that array h + returned in 'raw' mode is transposed for calling Fortran. The + 'economic' mode is deprecated. The modes 'full' and 'economic' may + be passed using only the first letter for backwards compatibility, + but all others must be spelled out. See the Notes for more + explanation. + + + Returns + ------- + q : ndarray of float or complex, optional + A matrix with orthonormal columns. When mode = 'complete' the + result is an orthogonal/unitary matrix depending on whether or not + a is real/complex. The determinant may be either +/- 1 in that + case. + r : ndarray of float or complex, optional + The upper-triangular matrix. + (h, tau) : ndarrays of np.double or np.cdouble, optional + The array h contains the Householder reflectors that generate q + along with r. The tau array contains scaling factors for the + reflectors. In the deprecated 'economic' mode only h is returned. + + Raises + ------ + LinAlgError + If factoring fails. + + See Also + -------- + scipy.linalg.qr : Similar function in SciPy. + scipy.linalg.rq : Compute RQ decomposition of a matrix. + + Notes + ----- + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, + ``dorgqr``, and ``zungqr``. + + For more information on the qr factorization, see for example: + https://en.wikipedia.org/wiki/QR_factorization + + Subclasses of `ndarray` are preserved except for the 'raw' mode. So if + `a` is of type `matrix`, all the return values will be matrices too. + + New 'reduced', 'complete', and 'raw' options for mode were added in + NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In + addition the options 'full' and 'economic' were deprecated. Because + 'full' was the previous default and 'reduced' is the new default, + backward compatibility can be maintained by letting `mode` default. + The 'raw' option was added so that LAPACK routines that can multiply + arrays by q using the Householder reflectors can be used. Note that in + this case the returned arrays are of type np.double or np.cdouble and + the h array is transposed to be FORTRAN compatible. No routines using + the 'raw' return are currently exposed by numpy, but some are available + in lapack_lite and just await the necessary work. + + Examples + -------- + >>> a = np.random.randn(9, 6) + >>> q, r = np.linalg.qr(a) + >>> np.allclose(a, np.dot(q, r)) # a does equal qr + True + >>> r2 = np.linalg.qr(a, mode='r') + >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full' + True + + Example illustrating a common use of `qr`: solving of least squares + problems + + What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for + the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points + and you'll see that it should be y0 = 0, m = 1.) The answer is provided + by solving the over-determined matrix equation ``Ax = b``, where:: + + A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) + x = array([[y0], [m]]) + b = array([[1], [0], [2], [1]]) + + If A = qr such that q is orthonormal (which is always possible via + Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice, + however, we simply use `lstsq`.) + + >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) + >>> A + array([[0, 1], + [1, 1], + [1, 1], + [2, 1]]) + >>> b = np.array([1, 0, 2, 1]) + >>> q, r = np.linalg.qr(A) + >>> p = np.dot(q.T, b) + >>> np.dot(np.linalg.inv(r), p) + array([ 1.1e-16, 1.0e+00]) + + """ + if mode not in ('reduced', 'complete', 'r', 'raw'): + if mode in ('f', 'full'): + # 2013-04-01, 1.8 + msg = "".join(( + "The 'full' option is deprecated in favor of 'reduced'.\n", + "For backward compatibility let mode default.")) + warnings.warn(msg, DeprecationWarning, stacklevel=3) + mode = 'reduced' + elif mode in ('e', 'economic'): + # 2013-04-01, 1.8 + msg = "The 'economic' option is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=3) + mode = 'economic' + else: + raise ValueError(f"Unrecognized mode '{mode}'") + + a, wrap = _makearray(a) + _assert_2d(a) + m, n = a.shape + t, result_t = _commonType(a) + a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) + mn = min(m, n) + tau = zeros((mn,), t) + + if isComplexType(t): + lapack_routine = lapack_lite.zgeqrf + routine_name = 'zgeqrf' + else: + lapack_routine = lapack_lite.dgeqrf + routine_name = 'dgeqrf' + + # calculate optimal size of work data 'work' + lwork = 1 + work = zeros((lwork,), t) + results = lapack_routine(m, n, a, max(1, m), tau, work, -1, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + # do qr decomposition + lwork = max(1, n, int(abs(work[0]))) + work = zeros((lwork,), t) + results = lapack_routine(m, n, a, max(1, m), tau, work, lwork, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + # handle modes that don't return q + if mode == 'r': + r = _fastCopyAndTranspose(result_t, a[:, :mn]) + return wrap(triu(r)) + + if mode == 'raw': + return a, tau + + if mode == 'economic': + if t != result_t : + a = a.astype(result_t, copy=False) + return wrap(a.T) + + # generate q from a + if mode == 'complete' and m > n: + mc = m + q = empty((m, m), t) + else: + mc = mn + q = empty((n, m), t) + q[:n] = a + + if isComplexType(t): + lapack_routine = lapack_lite.zungqr + routine_name = 'zungqr' + else: + lapack_routine = lapack_lite.dorgqr + routine_name = 'dorgqr' + + # determine optimal lwork + lwork = 1 + work = zeros((lwork,), t) + results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, -1, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + # compute q + lwork = max(1, n, int(abs(work[0]))) + work = zeros((lwork,), t) + results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, lwork, 0) + if results['info'] != 0: + raise LinAlgError('%s returns %d' % (routine_name, results['info'])) + + q = _fastCopyAndTranspose(result_t, q[:mc]) + r = _fastCopyAndTranspose(result_t, a[:, :mc]) + + return wrap(q), wrap(triu(r)) + + +# Eigenvalues + + +@array_function_dispatch(_unary_dispatcher) +def eigvals(a): + """ + Compute the eigenvalues of a general matrix. + + Main difference between `eigvals` and `eig`: the eigenvectors aren't + returned. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues will be computed. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues, each repeated according to its multiplicity. + They are not necessarily ordered, nor are they necessarily + real for real matrices. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigh : eigenvalues and eigenvectors of real symmetric or complex + Hermitian (conjugate symmetric) arrays. + scipy.linalg.eigvals : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + Examples + -------- + Illustration, using the fact that the eigenvalues of a diagonal matrix + are its diagonal elements, that multiplying a matrix on the left + by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose + of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, + if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as + ``A``: + + >>> from numpy import linalg as LA + >>> x = np.random.random() + >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) + >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) + (1.0, 1.0, 0.0) + + Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other: + + >>> D = np.diag((-1,1)) + >>> LA.eigvals(D) + array([-1., 1.]) + >>> A = np.dot(Q, D) + >>> A = np.dot(A, Q.T) + >>> LA.eigvals(A) + array([ 1., -1.]) # random + + """ + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + signature = 'D->D' if isComplexType(t) else 'd->D' + w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj) + + if not isComplexType(t): + if all(w.imag == 0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + return w.astype(result_t, copy=False) + + +def _eigvalsh_dispatcher(a, UPLO=None): + return (a,) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigvalsh(a, UPLO='L'): + """ + Compute the eigenvalues of a complex Hermitian or real symmetric matrix. + + Main difference from eigh: the eigenvectors are not computed. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues are to be + computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigvals : eigenvalues of general real or complex arrays. + eig : eigenvalues and right eigenvectors of general real or complex + arrays. + scipy.linalg.eigvalsh : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> LA.eigvalsh(a) + array([ 0.17157288, 5.82842712]) # may vary + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() + >>> # with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa = LA.eigvalsh(a) + >>> wb = LA.eigvals(b) + >>> wa; wb + array([1., 6.]) + array([6.+0.j, 1.+0.j]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + if UPLO == 'L': + gufunc = _umath_linalg.eigvalsh_lo + else: + gufunc = _umath_linalg.eigvalsh_up + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->d' if isComplexType(t) else 'd->d' + w = gufunc(a, signature=signature, extobj=extobj) + return w.astype(_realType(result_t), copy=False) + +def _convertarray(a): + t, result_t = _commonType(a) + a = _fastCT(a.astype(t)) + return a, t, result_t + + +# Eigenvectors + + +@array_function_dispatch(_unary_dispatcher) +def eig(a): + """ + Compute the eigenvalues and right eigenvectors of a square array. + + Parameters + ---------- + a : (..., M, M) array + Matrices for which the eigenvalues and right eigenvectors will + be computed + + Returns + ------- + w : (..., M) array + The eigenvalues, each repeated according to its multiplicity. + The eigenvalues are not necessarily ordered. The resulting + array will be of complex type, unless the imaginary part is + zero in which case it will be cast to a real type. When `a` + is real the resulting eigenvalues will be real (0 imaginary + part) or occur in conjugate pairs + + v : (..., M, M) array + The normalized (unit "length") eigenvectors, such that the + column ``v[:,i]`` is the eigenvector corresponding to the + eigenvalue ``w[i]``. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvals : eigenvalues of a non-symmetric array. + eigh : eigenvalues and eigenvectors of a real symmetric or complex + Hermitian (conjugate symmetric) array. + eigvalsh : eigenvalues of a real symmetric or complex Hermitian + (conjugate symmetric) array. + scipy.linalg.eig : Similar function in SciPy that also solves the + generalized eigenvalue problem. + scipy.linalg.schur : Best choice for unitary and other non-Hermitian + normal matrices. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + The number `w` is an eigenvalue of `a` if there exists a vector + `v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and + `v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]`` + for :math:`i \\in \\{0,...,M-1\\}`. + + The array `v` of eigenvectors may not be of maximum rank, that is, some + of the columns may be linearly dependent, although round-off error may + obscure that fact. If the eigenvalues are all different, then theoretically + the eigenvectors are linearly independent and `a` can be diagonalized by + a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal. + + For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur` + is preferred because the matrix `v` is guaranteed to be unitary, which is + not the case when using `eig`. The Schur factorization produces an + upper triangular matrix rather than a diagonal matrix, but for normal + matrices only the diagonal of the upper triangular matrix is needed, the + rest is roundoff error. + + Finally, it is emphasized that `v` consists of the *right* (as in + right-hand side) eigenvectors of `a`. A vector `y` satisfying + ``y.T @ a = z * y.T`` for some number `z` is called a *left* + eigenvector of `a`, and, in general, the left and right eigenvectors + of a matrix are not necessarily the (perhaps conjugate) transposes + of each other. + + References + ---------- + G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, + Academic Press, Inc., 1980, Various pp. + + Examples + -------- + >>> from numpy import linalg as LA + + (Almost) trivial example with real e-values and e-vectors. + + >>> w, v = LA.eig(np.diag((1, 2, 3))) + >>> w; v + array([1., 2., 3.]) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Real matrix possessing complex e-values and e-vectors; note that the + e-values are complex conjugates of each other. + + >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) + >>> w; v + array([1.+1.j, 1.-1.j]) + array([[0.70710678+0.j , 0.70710678-0.j ], + [0. -0.70710678j, 0. +0.70710678j]]) + + Complex-valued matrix with real e-values (but complex-valued e-vectors); + note that ``a.conj().T == a``, i.e., `a` is Hermitian. + + >>> a = np.array([[1, 1j], [-1j, 1]]) + >>> w, v = LA.eig(a) + >>> w; v + array([2.+0.j, 0.+0.j]) + array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary + [ 0.70710678+0.j , -0. +0.70710678j]]) + + Be careful about round-off error! + + >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) + >>> # Theor. e-values are 1 +/- 1e-9 + >>> w, v = LA.eig(a) + >>> w; v + array([1., 1.]) + array([[1., 0.], + [0., 1.]]) + + """ + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + signature = 'D->DD' if isComplexType(t) else 'd->DD' + w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj) + + if not isComplexType(t) and all(w.imag == 0.0): + w = w.real + vt = vt.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + vt = vt.astype(result_t, copy=False) + return w.astype(result_t, copy=False), wrap(vt) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigh(a, UPLO='L'): + """ + Return the eigenvalues and eigenvectors of a complex Hermitian + (conjugate symmetric) or a real symmetric matrix. + + Returns two objects, a 1-D array containing the eigenvalues of `a`, and + a 2-D square array or matrix (depending on the input type) of the + corresponding eigenvectors (in columns). + + Parameters + ---------- + a : (..., M, M) array + Hermitian or real symmetric matrices whose eigenvalues and + eigenvectors are to be computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + w : (..., M) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + v : {(..., M, M) ndarray, (..., M, M) matrix} + The column ``v[:, i]`` is the normalized eigenvector corresponding + to the eigenvalue ``w[i]``. Will return a matrix object if `a` is + a matrix object. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eig : eigenvalues and right eigenvectors for non-symmetric arrays. + eigvals : eigenvalues of non-symmetric arrays. + scipy.linalg.eigh : Similar function in SciPy (but also solves the + generalized eigenvalue problem). + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``, + ``_heevd``. + + The eigenvalues of real symmetric or complex Hermitian matrices are + always real. [1]_ The array `v` of (column) eigenvectors is unitary + and `a`, `w`, and `v` satisfy the equations + ``dot(a, v[:, i]) = w[i] * v[:, i]``. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 222. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> a + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> w, v = LA.eigh(a) + >>> w; v + array([0.17157288, 5.82842712]) + array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair + array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j]) + >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair + array([0.+0.j, 0.+0.j]) + + >>> A = np.matrix(a) # what happens if input is a matrix object + >>> A + matrix([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> w, v = LA.eigh(A) + >>> w; v + array([0.17157288, 5.82842712]) + matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa, va = LA.eigh(a) + >>> wb, vb = LA.eig(b) + >>> wa; wb + array([1., 6.]) + array([6.+0.j, 1.+0.j]) + >>> va; vb + array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary + [ 0. +0.89442719j, 0. -0.4472136j ]]) + array([[ 0.89442719+0.j , -0. +0.4472136j], + [-0. +0.4472136j, 0.89442719+0.j ]]) + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + if UPLO == 'L': + gufunc = _umath_linalg.eigh_lo + else: + gufunc = _umath_linalg.eigh_up + + signature = 'D->dD' if isComplexType(t) else 'd->dd' + w, vt = gufunc(a, signature=signature, extobj=extobj) + w = w.astype(_realType(result_t), copy=False) + vt = vt.astype(result_t, copy=False) + return w, wrap(vt) + + +# Singular value decomposition + +def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None): + return (a,) + + +@array_function_dispatch(_svd_dispatcher) +def svd(a, full_matrices=True, compute_uv=True, hermitian=False): + """ + Singular Value Decomposition. + + When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh + = (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D + array of `a`'s singular values. When `a` is higher-dimensional, SVD is + applied in stacked mode as explained below. + + Parameters + ---------- + a : (..., M, N) array_like + A real or complex array with ``a.ndim >= 2``. + full_matrices : bool, optional + If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and + ``(..., N, N)``, respectively. Otherwise, the shapes are + ``(..., M, K)`` and ``(..., K, N)``, respectively, where + ``K = min(M, N)``. + compute_uv : bool, optional + Whether or not to compute `u` and `vh` in addition to `s`. True + by default. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.17.0 + + Returns + ------- + u : { (..., M, M), (..., M, K) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + s : (..., K) array + Vector(s) with the singular values, within each vector sorted in + descending order. The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. + vh : { (..., N, N), (..., K, N) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + See Also + -------- + scipy.linalg.svd : Similar function in SciPy. + scipy.linalg.svdvals : Compute singular values of a matrix. + + Notes + ----- + + .. versionchanged:: 1.8.0 + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The decomposition is performed using LAPACK routine ``_gesdd``. + + SVD is usually described for the factorization of a 2D matrix :math:`A`. + The higher-dimensional case will be discussed below. In the 2D case, SVD is + written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, + :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` + contains the singular values of `a` and `u` and `vh` are unitary. The rows + of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are + the eigenvectors of :math:`A A^H`. In both cases the corresponding + (possibly non-zero) eigenvalues are given by ``s**2``. + + If `a` has more than two dimensions, then broadcasting rules apply, as + explained in :ref:`routines.linalg-broadcasting`. This means that SVD is + working in "stacked" mode: it iterates over all indices of the first + ``a.ndim - 2`` dimensions and for each combination SVD is applied to the + last two indices. The matrix `a` can be reconstructed from the + decomposition with either ``(u * s[..., None, :]) @ vh`` or + ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the + function ``np.matmul`` for python versions below 3.5.) + + If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are + all the return values. + + Examples + -------- + >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) + >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3) + + Reconstruction based on full SVD, 2D case: + + >>> u, s, vh = np.linalg.svd(a, full_matrices=True) + >>> u.shape, s.shape, vh.shape + ((9, 9), (6,), (6, 6)) + >>> np.allclose(a, np.dot(u[:, :6] * s, vh)) + True + >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat[:6, :6] = np.diag(s) + >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) + True + + Reconstruction based on reduced SVD, 2D case: + + >>> u, s, vh = np.linalg.svd(a, full_matrices=False) + >>> u.shape, s.shape, vh.shape + ((9, 6), (6,), (6, 6)) + >>> np.allclose(a, np.dot(u * s, vh)) + True + >>> smat = np.diag(s) + >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) + True + + Reconstruction based on full SVD, 4D case: + + >>> u, s, vh = np.linalg.svd(b, full_matrices=True) + >>> u.shape, s.shape, vh.shape + ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh)) + True + >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh)) + True + + Reconstruction based on reduced SVD, 4D case: + + >>> u, s, vh = np.linalg.svd(b, full_matrices=False) + >>> u.shape, s.shape, vh.shape + ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(u * s[..., None, :], vh)) + True + >>> np.allclose(b, np.matmul(u, s[..., None] * vh)) + True + + """ + import numpy as _nx + a, wrap = _makearray(a) + + if hermitian: + # note: lapack svd returns eigenvalues with s ** 2 sorted descending, + # but eig returns s sorted ascending, so we re-order the eigenvalues + # and related arrays to have the correct order + if compute_uv: + s, u = eigh(a) + sgn = sign(s) + s = abs(s) + sidx = argsort(s)[..., ::-1] + sgn = _nx.take_along_axis(sgn, sidx, axis=-1) + s = _nx.take_along_axis(s, sidx, axis=-1) + u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1) + # singular values are unsigned, move the sign into v + vt = transpose(u * sgn[..., None, :]).conjugate() + return wrap(u), s, wrap(vt) + else: + s = eigvalsh(a) + s = s[..., ::-1] + s = abs(s) + return sort(s)[..., ::-1] + + _assert_stacked_2d(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) + + m, n = a.shape[-2:] + if compute_uv: + if full_matrices: + if m < n: + gufunc = _umath_linalg.svd_m_f + else: + gufunc = _umath_linalg.svd_n_f + else: + if m < n: + gufunc = _umath_linalg.svd_m_s + else: + gufunc = _umath_linalg.svd_n_s + + signature = 'D->DdD' if isComplexType(t) else 'd->ddd' + u, s, vh = gufunc(a, signature=signature, extobj=extobj) + u = u.astype(result_t, copy=False) + s = s.astype(_realType(result_t), copy=False) + vh = vh.astype(result_t, copy=False) + return wrap(u), s, wrap(vh) + else: + if m < n: + gufunc = _umath_linalg.svd_m + else: + gufunc = _umath_linalg.svd_n + + signature = 'D->d' if isComplexType(t) else 'd->d' + s = gufunc(a, signature=signature, extobj=extobj) + s = s.astype(_realType(result_t), copy=False) + return s + + +def _cond_dispatcher(x, p=None): + return (x,) + + +@array_function_dispatch(_cond_dispatcher) +def cond(x, p=None): + """ + Compute the condition number of a matrix. + + This function is capable of returning the condition number using + one of seven different norms, depending on the value of `p` (see + Parameters below). + + Parameters + ---------- + x : (..., M, N) array_like + The matrix whose condition number is sought. + p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional + Order of the norm: + + ===== ============================ + p norm for matrices + ===== ============================ + None 2-norm, computed directly using the ``SVD`` + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 2-norm (largest sing. value) + -2 smallest singular value + ===== ============================ + + inf means the numpy.inf object, and the Frobenius norm is + the root-of-sum-of-squares norm. + + Returns + ------- + c : {float, inf} + The condition number of the matrix. May be infinite. + + See Also + -------- + numpy.linalg.norm + + Notes + ----- + The condition number of `x` is defined as the norm of `x` times the + norm of the inverse of `x` [1]_; the norm can be the usual L2-norm + (root-of-sum-of-squares) or one of a number of other matrix norms. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, + Academic Press, Inc., 1980, pg. 285. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) + >>> a + array([[ 1, 0, -1], + [ 0, 1, 0], + [ 1, 0, 1]]) + >>> LA.cond(a) + 1.4142135623730951 + >>> LA.cond(a, 'fro') + 3.1622776601683795 + >>> LA.cond(a, np.inf) + 2.0 + >>> LA.cond(a, -np.inf) + 1.0 + >>> LA.cond(a, 1) + 2.0 + >>> LA.cond(a, -1) + 1.0 + >>> LA.cond(a, 2) + 1.4142135623730951 + >>> LA.cond(a, -2) + 0.70710678118654746 # may vary + >>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False)) + 0.70710678118654746 # may vary + + """ + x = asarray(x) # in case we have a matrix + if _is_empty_2d(x): + raise LinAlgError("cond is not defined on empty arrays") + if p is None or p == 2 or p == -2: + s = svd(x, compute_uv=False) + with errstate(all='ignore'): + if p == -2: + r = s[..., -1] / s[..., 0] + else: + r = s[..., 0] / s[..., -1] + else: + # Call inv(x) ignoring errors. The result array will + # contain nans in the entries where inversion failed. + _assert_stacked_2d(x) + _assert_stacked_square(x) + t, result_t = _commonType(x) + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(all='ignore'): + invx = _umath_linalg.inv(x, signature=signature) + r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)) + r = r.astype(result_t, copy=False) + + # Convert nans to infs unless the original array had nan entries + r = asarray(r) + nan_mask = isnan(r) + if nan_mask.any(): + nan_mask &= ~isnan(x).any(axis=(-2, -1)) + if r.ndim > 0: + r[nan_mask] = Inf + elif nan_mask: + r[()] = Inf + + # Convention is to return scalars instead of 0d arrays + if r.ndim == 0: + r = r[()] + + return r + + +def _matrix_rank_dispatcher(M, tol=None, hermitian=None): + return (M,) + + +@array_function_dispatch(_matrix_rank_dispatcher) +def matrix_rank(M, tol=None, hermitian=False): + """ + Return matrix rank of array using SVD method + + Rank of the array is the number of singular values of the array that are + greater than `tol`. + + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + + Parameters + ---------- + M : {(M,), (..., M, N)} array_like + Input vector or stack of matrices. + tol : (...) array_like, float, optional + Threshold below which SVD values are considered zero. If `tol` is + None, and ``S`` is an array with singular values for `M`, and + ``eps`` is the epsilon value for datatype of ``S``, then `tol` is + set to ``S.max() * max(M.shape) * eps``. + + .. versionchanged:: 1.14 + Broadcasted against the stack of matrices + hermitian : bool, optional + If True, `M` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.14 + + Returns + ------- + rank : (...) array_like + Rank of M. + + Notes + ----- + The default threshold to detect rank deficiency is a test on the magnitude + of the singular values of `M`. By default, we identify singular values less + than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with + the symbols defined above). This is the algorithm MATLAB uses [1]. It also + appears in *Numerical recipes* in the discussion of SVD solutions for linear + least squares [2]. + + This default threshold is designed to detect rank deficiency accounting for + the numerical errors of the SVD computation. Imagine that there is a column + in `M` that is an exact (in floating point) linear combination of other + columns in `M`. Computing the SVD on `M` will not produce a singular value + exactly equal to 0 in general: any difference of the smallest SVD value from + 0 will be caused by numerical imprecision in the calculation of the SVD. + Our threshold for small SVD values takes this numerical imprecision into + account, and the default threshold will detect such numerical rank + deficiency. The threshold may declare a matrix `M` rank deficient even if + the linear combination of some columns of `M` is not exactly equal to + another column of `M` but only numerically very close to another column of + `M`. + + We chose our default threshold because it is in wide use. Other thresholds + are possible. For example, elsewhere in the 2007 edition of *Numerical + recipes* there is an alternative threshold of ``S.max() * + np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe + this threshold as being based on "expected roundoff error" (p 71). + + The thresholds above deal with floating point roundoff error in the + calculation of the SVD. However, you may have more information about the + sources of error in `M` that would make you consider other tolerance values + to detect *effective* rank deficiency. The most useful measure of the + tolerance depends on the operations you intend to use on your matrix. For + example, if your data come from uncertain measurements with uncertainties + greater than floating point epsilon, choosing a tolerance near that + uncertainty may be preferable. The tolerance may be absolute if the + uncertainties are absolute rather than relative. + + References + ---------- + .. [1] MATLAB reference documention, "Rank" + https://www.mathworks.com/help/techdoc/ref/rank.html + .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, + "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, + page 795. + + Examples + -------- + >>> from numpy.linalg import matrix_rank + >>> matrix_rank(np.eye(4)) # Full rank matrix + 4 + >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix + >>> matrix_rank(I) + 3 + >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 + 1 + >>> matrix_rank(np.zeros((4,))) + 0 + """ + M = asarray(M) + if M.ndim < 2: + return int(not all(M==0)) + S = svd(M, compute_uv=False, hermitian=hermitian) + if tol is None: + tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps + else: + tol = asarray(tol)[..., newaxis] + return count_nonzero(S > tol, axis=-1) + + +# Generalized inverse + +def _pinv_dispatcher(a, rcond=None, hermitian=None): + return (a,) + + +@array_function_dispatch(_pinv_dispatcher) +def pinv(a, rcond=1e-15, hermitian=False): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate the generalized inverse of a matrix using its + singular-value decomposition (SVD) and including all + *large* singular values. + + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + + Parameters + ---------- + a : (..., M, N) array_like + Matrix or stack of matrices to be pseudo-inverted. + rcond : (...) array_like of float + Cutoff for small singular values. + Singular values less than or equal to + ``rcond * largest_singular_value`` are set to zero. + Broadcasts against the stack of matrices. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.17.0 + + Returns + ------- + B : (..., N, M) ndarray + The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so + is `B`. + + Raises + ------ + LinAlgError + If the SVD computation does not converge. + + See Also + -------- + scipy.linalg.pinv : Similar function in SciPy. + scipy.linalg.pinv2 : Similar function in SciPy (SVD-based). + scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a + Hermitian matrix. + + Notes + ----- + The pseudo-inverse of a matrix A, denoted :math:`A^+`, is + defined as: "the matrix that 'solves' [the least-squares problem] + :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then + :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. + + It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular + value decomposition of A, then + :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are + orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting + of A's so-called singular values, (followed, typically, by + zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix + consisting of the reciprocals of A's singular values + (again, followed by zeros). [1]_ + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pp. 139-142. + + Examples + -------- + The following example checks that ``a * a+ * a == a`` and + ``a+ * a * a+ == a+``: + + >>> a = np.random.randn(9, 6) + >>> B = np.linalg.pinv(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a, wrap = _makearray(a) + rcond = asarray(rcond) + if _is_empty_2d(a): + m, n = a.shape[-2:] + res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) + return wrap(res) + a = a.conjugate() + u, s, vt = svd(a, full_matrices=False, hermitian=hermitian) + + # discard small singular values + cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) + large = s > cutoff + s = divide(1, s, where=large, out=s) + s[~large] = 0 + + res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) + return wrap(res) + + +# Determinant + + +@array_function_dispatch(_unary_dispatcher) +def slogdet(a): + """ + Compute the sign and (natural) logarithm of the determinant of an array. + + If an array has a very small or very large determinant, then a call to + `det` may overflow or underflow. This routine is more robust against such + issues, because it computes the logarithm of the determinant rather than + the determinant itself. + + Parameters + ---------- + a : (..., M, M) array_like + Input array, has to be a square 2-D array. + + Returns + ------- + sign : (...) array_like + A number representing the sign of the determinant. For a real matrix, + this is 1, 0, or -1. For a complex matrix, this is a complex number + with absolute value 1 (i.e., it is on the unit circle), or else 0. + logdet : (...) array_like + The natural log of the absolute value of the determinant. + + If the determinant is zero, then `sign` will be 0 and `logdet` will be + -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``. + + See Also + -------- + det + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + .. versionadded:: 1.6.0 + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + + Examples + -------- + The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> (sign, logdet) = np.linalg.slogdet(a) + >>> (sign, logdet) + (-1, 0.69314718055994529) # may vary + >>> sign * np.exp(logdet) + -2.0 + + Computing log-determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> sign, logdet = np.linalg.slogdet(a) + >>> (sign, logdet) + (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) + >>> sign * np.exp(logdet) + array([-2., -3., -8.]) + + This routine succeeds where ordinary `det` does not: + + >>> np.linalg.det(np.eye(500) * 0.1) + 0.0 + >>> np.linalg.slogdet(np.eye(500) * 0.1) + (1, -1151.2925464970228) + + """ + a = asarray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + real_t = _realType(result_t) + signature = 'D->Dd' if isComplexType(t) else 'd->dd' + sign, logdet = _umath_linalg.slogdet(a, signature=signature) + sign = sign.astype(result_t, copy=False) + logdet = logdet.astype(real_t, copy=False) + return sign, logdet + + +@array_function_dispatch(_unary_dispatcher) +def det(a): + """ + Compute the determinant of an array. + + Parameters + ---------- + a : (..., M, M) array_like + Input array to compute determinants for. + + Returns + ------- + det : (...) array_like + Determinant of `a`. + + See Also + -------- + slogdet : Another way to represent the determinant, more suitable + for large matrices where underflow/overflow may occur. + scipy.linalg.det : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + Examples + -------- + The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.linalg.det(a) + -2.0 # may vary + + Computing determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> np.linalg.det(a) + array([-2., -3., -8.]) + + """ + a = asarray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = _umath_linalg.det(a, signature=signature) + r = r.astype(result_t, copy=False) + return r + + +# Linear Least Squares + +def _lstsq_dispatcher(a, b, rcond=None): + return (a, b) + + +@array_function_dispatch(_lstsq_dispatcher) +def lstsq(a, b, rcond="warn"): + r""" + Return the least-squares solution to a linear matrix equation. + + Computes the vector `x` that approximatively solves the equation + ``a @ x = b``. The equation may be under-, well-, or over-determined + (i.e., the number of linearly independent rows of `a` can be less than, + equal to, or greater than its number of linearly independent columns). + If `a` is square and of full rank, then `x` (but for round-off error) + is the "exact" solution of the equation. Else, `x` minimizes the + Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing + solutions, the one with the smallest 2-norm :math:`||x||` is returned. + + Parameters + ---------- + a : (M, N) array_like + "Coefficient" matrix. + b : {(M,), (M, K)} array_like + Ordinate or "dependent variable" values. If `b` is two-dimensional, + the least-squares solution is calculated for each of the `K` columns + of `b`. + rcond : float, optional + Cut-off ratio for small singular values of `a`. + For the purposes of rank determination, singular values are treated + as zero if they are smaller than `rcond` times the largest singular + value of `a`. + + .. versionchanged:: 1.14.0 + If not set, a FutureWarning is given. The previous default + of ``-1`` will use the machine precision as `rcond` parameter, + the new default will use the machine precision times `max(M, N)`. + To silence the warning and use the new default, use ``rcond=None``, + to keep using the old behavior, use ``rcond=-1``. + + Returns + ------- + x : {(N,), (N, K)} ndarray + Least-squares solution. If `b` is two-dimensional, + the solutions are in the `K` columns of `x`. + residuals : {(1,), (K,), (0,)} ndarray + Sums of squared residuals: Squared Euclidean 2-norm for each column in + ``b - a @ x``. + If the rank of `a` is < N or M <= N, this is an empty array. + If `b` is 1-dimensional, this is a (1,) shape array. + Otherwise the shape is (K,). + rank : int + Rank of matrix `a`. + s : (min(M, N),) ndarray + Singular values of `a`. + + Raises + ------ + LinAlgError + If computation does not converge. + + See Also + -------- + scipy.linalg.lstsq : Similar function in SciPy. + + Notes + ----- + If `b` is a matrix, then all array results are returned as matrices. + + Examples + -------- + Fit a line, ``y = mx + c``, through some noisy data-points: + + >>> x = np.array([0, 1, 2, 3]) + >>> y = np.array([-1, 0.2, 0.9, 2.1]) + + By examining the coefficients, we see that the line should have a + gradient of roughly 1 and cut the y-axis at, more or less, -1. + + We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` + and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: + + >>> A = np.vstack([x, np.ones(len(x))]).T + >>> A + array([[ 0., 1.], + [ 1., 1.], + [ 2., 1.], + [ 3., 1.]]) + + >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0] + >>> m, c + (1.0 -0.95) # may vary + + Plot the data along with the fitted line: + + >>> import matplotlib.pyplot as plt + >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10) + >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line') + >>> _ = plt.legend() + >>> plt.show() + + """ + a, _ = _makearray(a) + b, wrap = _makearray(b) + is_1d = b.ndim == 1 + if is_1d: + b = b[:, newaxis] + _assert_2d(a, b) + m, n = a.shape[-2:] + m2, n_rhs = b.shape[-2:] + if m != m2: + raise LinAlgError('Incompatible dimensions') + + t, result_t = _commonType(a, b) + # FIXME: real_t is unused + real_t = _linalgRealType(t) + result_real_t = _realType(result_t) + + # Determine default rcond value + if rcond == "warn": + # 2017-08-19, 1.14.0 + warnings.warn("`rcond` parameter will change to the default of " + "machine precision times ``max(M, N)`` where M and N " + "are the input matrix dimensions.\n" + "To use the future default and silence this warning " + "we advise to pass `rcond=None`, to keep using the old, " + "explicitly pass `rcond=-1`.", + FutureWarning, stacklevel=3) + rcond = -1 + if rcond is None: + rcond = finfo(t).eps * max(n, m) + + if m <= n: + gufunc = _umath_linalg.lstsq_m + else: + gufunc = _umath_linalg.lstsq_n + + signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' + extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq) + if n_rhs == 0: + # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis + b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype) + x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj) + if m == 0: + x[...] = 0 + if n_rhs == 0: + # remove the item we added + x = x[..., :n_rhs] + resids = resids[..., :n_rhs] + + # remove the axis we added + if is_1d: + x = x.squeeze(axis=-1) + # we probably should squeeze resids too, but we can't + # without breaking compatibility. + + # as documented + if rank != n or m <= n: + resids = array([], result_real_t) + + # coerce output arrays + s = s.astype(result_real_t, copy=False) + resids = resids.astype(result_real_t, copy=False) + x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed + return wrap(x), wrap(resids), rank, s + + +def _multi_svd_norm(x, row_axis, col_axis, op): + """Compute a function of the singular values of the 2-D matrices in `x`. + + This is a private utility function used by `numpy.linalg.norm()`. + + Parameters + ---------- + x : ndarray + row_axis, col_axis : int + The axes of `x` that hold the 2-D matrices. + op : callable + This should be either numpy.amin or `numpy.amax` or `numpy.sum`. + + Returns + ------- + result : float or ndarray + If `x` is 2-D, the return values is a float. + Otherwise, it is an array with ``x.ndim - 2`` dimensions. + The return values are either the minimum or maximum or sum of the + singular values of the matrices, depending on whether `op` + is `numpy.amin` or `numpy.amax` or `numpy.sum`. + + """ + y = moveaxis(x, (row_axis, col_axis), (-2, -1)) + result = op(svd(y, compute_uv=False), axis=-1) + return result + + +def _norm_dispatcher(x, ord=None, axis=None, keepdims=None): + return (x,) + + +@array_function_dispatch(_norm_dispatcher) +def norm(x, ord=None, axis=None, keepdims=False): + """ + Matrix or vector norm. + + This function is able to return one of eight different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + Parameters + ---------- + x : array_like + Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord` + is None. If both `axis` and `ord` are None, the 2-norm of + ``x.ravel`` will be returned. + ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional + Order of the norm (see table under ``Notes``). inf means numpy's + `inf` object. The default is None. + axis : {None, int, 2-tuple of ints}, optional. + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default + is None. + + .. versionadded:: 1.8.0 + + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `x`. + + .. versionadded:: 1.10.0 + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + See Also + -------- + scipy.linalg.norm : Similar function in SciPy. + + Notes + ----- + For values of ``ord < 1``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The nuclear norm is the sum of the singular values. + + Both the Frobenius and nuclear norm orders are only defined for + matrices and raise a ValueError when ``x.ndim != 2``. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.norm(a) + 7.745966692414834 + >>> LA.norm(b) + 7.745966692414834 + >>> LA.norm(b, 'fro') + 7.745966692414834 + >>> LA.norm(a, np.inf) + 4.0 + >>> LA.norm(b, np.inf) + 9.0 + >>> LA.norm(a, -np.inf) + 0.0 + >>> LA.norm(b, -np.inf) + 2.0 + + >>> LA.norm(a, 1) + 20.0 + >>> LA.norm(b, 1) + 7.0 + >>> LA.norm(a, -1) + -4.6566128774142013e-010 + >>> LA.norm(b, -1) + 6.0 + >>> LA.norm(a, 2) + 7.745966692414834 + >>> LA.norm(b, 2) + 7.3484692283495345 + + >>> LA.norm(a, -2) + 0.0 + >>> LA.norm(b, -2) + 1.8570331885190563e-016 # may vary + >>> LA.norm(a, 3) + 5.8480354764257312 # may vary + >>> LA.norm(a, -3) + 0.0 + + Using the `axis` argument to compute vector norms: + + >>> c = np.array([[ 1, 2, 3], + ... [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + array([ 1.41421356, 2.23606798, 5. ]) + >>> LA.norm(c, axis=1) + array([ 3.74165739, 4.24264069]) + >>> LA.norm(c, ord=1, axis=1) + array([ 6., 6.]) + + Using the `axis` argument to compute matrix norms: + + >>> m = np.arange(8).reshape(2,2,2) + >>> LA.norm(m, axis=(1,2)) + array([ 3.74165739, 11.22497216]) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (3.7416573867739413, 11.224972160321824) + + """ + x = asarray(x) + + if not issubclass(x.dtype.type, (inexact, object_)): + x = x.astype(float) + + # Immediately handle some default, simple, fast, and common cases. + if axis is None: + ndim = x.ndim + if ((ord is None) or + (ord in ('f', 'fro') and ndim == 2) or + (ord == 2 and ndim == 1)): + + x = x.ravel(order='K') + if isComplexType(x.dtype.type): + sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) + else: + sqnorm = dot(x, x) + ret = sqrt(sqnorm) + if keepdims: + ret = ret.reshape(ndim*[1]) + return ret + + # Normalize the `axis` argument to a tuple. + nd = x.ndim + if axis is None: + axis = tuple(range(nd)) + elif not isinstance(axis, tuple): + try: + axis = int(axis) + except Exception as e: + raise TypeError("'axis' must be None, an integer or a tuple of integers") from e + axis = (axis,) + + if len(axis) == 1: + if ord == Inf: + return abs(x).max(axis=axis, keepdims=keepdims) + elif ord == -Inf: + return abs(x).min(axis=axis, keepdims=keepdims) + elif ord == 0: + # Zero norm + return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims) + elif ord == 1: + # special case for speedup + return add.reduce(abs(x), axis=axis, keepdims=keepdims) + elif ord is None or ord == 2: + # special case for speedup + s = (x.conj() * x).real + return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) + # None of the str-type keywords for ord ('fro', 'nuc') + # are valid for vectors + elif isinstance(ord, str): + raise ValueError(f"Invalid norm order '{ord}' for vectors") + else: + absx = abs(x) + absx **= ord + ret = add.reduce(absx, axis=axis, keepdims=keepdims) + ret **= (1 / ord) + return ret + elif len(axis) == 2: + row_axis, col_axis = axis + row_axis = normalize_axis_index(row_axis, nd) + col_axis = normalize_axis_index(col_axis, nd) + if row_axis == col_axis: + raise ValueError('Duplicate axes given.') + if ord == 2: + ret = _multi_svd_norm(x, row_axis, col_axis, amax) + elif ord == -2: + ret = _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) + elif ord == Inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) + elif ord == -1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) + elif ord == -Inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) + elif ord in [None, 'fro', 'f']: + ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) + elif ord == 'nuc': + ret = _multi_svd_norm(x, row_axis, col_axis, sum) + else: + raise ValueError("Invalid norm order for matrices.") + if keepdims: + ret_shape = list(x.shape) + ret_shape[axis[0]] = 1 + ret_shape[axis[1]] = 1 + ret = ret.reshape(ret_shape) + return ret + else: + raise ValueError("Improper number of dimensions to norm.") + + +# multi_dot + +def _multidot_dispatcher(arrays, *, out=None): + yield from arrays + yield out + + +@array_function_dispatch(_multidot_dispatcher) +def multi_dot(arrays, *, out=None): + """ + Compute the dot product of two or more arrays in a single function call, + while automatically selecting the fastest evaluation order. + + `multi_dot` chains `numpy.dot` and uses optimal parenthesization + of the matrices [1]_ [2]_. Depending on the shapes of the matrices, + this can speed up the multiplication a lot. + + If the first argument is 1-D it is treated as a row vector. + If the last argument is 1-D it is treated as a column vector. + The other arguments must be 2-D. + + Think of `multi_dot` as:: + + def multi_dot(arrays): return functools.reduce(np.dot, arrays) + + + Parameters + ---------- + arrays : sequence of array_like + If the first argument is 1-D it is treated as row vector. + If the last argument is 1-D it is treated as column vector. + The other arguments must be 2-D. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a, b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + .. versionadded:: 1.19.0 + + Returns + ------- + output : ndarray + Returns the dot product of the supplied arrays. + + See Also + -------- + numpy.dot : dot multiplication with two arguments. + + References + ---------- + + .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 + .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication + + Examples + -------- + `multi_dot` allows you to write:: + + >>> from numpy.linalg import multi_dot + >>> # Prepare some data + >>> A = np.random.random((10000, 100)) + >>> B = np.random.random((100, 1000)) + >>> C = np.random.random((1000, 5)) + >>> D = np.random.random((5, 333)) + >>> # the actual dot multiplication + >>> _ = multi_dot([A, B, C, D]) + + instead of:: + + >>> _ = np.dot(np.dot(np.dot(A, B), C), D) + >>> # or + >>> _ = A.dot(B).dot(C).dot(D) + + Notes + ----- + The cost for a matrix multiplication can be calculated with the + following function:: + + def cost(A, B): + return A.shape[0] * A.shape[1] * B.shape[1] + + Assume we have three matrices + :math:`A_{10x100}, B_{100x5}, C_{5x50}`. + + The costs for the two different parenthesizations are as follows:: + + cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 + cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 + + """ + n = len(arrays) + # optimization only makes sense for len(arrays) > 2 + if n < 2: + raise ValueError("Expecting at least two arrays.") + elif n == 2: + return dot(arrays[0], arrays[1], out=out) + + arrays = [asanyarray(a) for a in arrays] + + # save original ndim to reshape the result array into the proper form later + ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim + # Explicitly convert vectors to 2D arrays to keep the logic of the internal + # _multi_dot_* functions as simple as possible. + if arrays[0].ndim == 1: + arrays[0] = atleast_2d(arrays[0]) + if arrays[-1].ndim == 1: + arrays[-1] = atleast_2d(arrays[-1]).T + _assert_2d(*arrays) + + # _multi_dot_three is much faster than _multi_dot_matrix_chain_order + if n == 3: + result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out) + else: + order = _multi_dot_matrix_chain_order(arrays) + result = _multi_dot(arrays, order, 0, n - 1, out=out) + + # return proper shape + if ndim_first == 1 and ndim_last == 1: + return result[0, 0] # scalar + elif ndim_first == 1 or ndim_last == 1: + return result.ravel() # 1-D + else: + return result + + +def _multi_dot_three(A, B, C, out=None): + """ + Find the best order for three arrays and do the multiplication. + + For three arguments `_multi_dot_three` is approximately 15 times faster + than `_multi_dot_matrix_chain_order` + + """ + a0, a1b0 = A.shape + b1c0, c1 = C.shape + # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 + cost1 = a0 * b1c0 * (a1b0 + c1) + # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 + cost2 = a1b0 * c1 * (a0 + b1c0) + + if cost1 < cost2: + return dot(dot(A, B), C, out=out) + else: + return dot(A, dot(B, C), out=out) + + +def _multi_dot_matrix_chain_order(arrays, return_costs=False): + """ + Return a np.array that encodes the optimal order of mutiplications. + + The optimal order array is then used by `_multi_dot()` to do the + multiplication. + + Also return the cost matrix if `return_costs` is `True` + + The implementation CLOSELY follows Cormen, "Introduction to Algorithms", + Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. + + cost[i, j] = min([ + cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) + for k in range(i, j)]) + + """ + n = len(arrays) + # p stores the dimensions of the matrices + # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] + p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] + # m is a matrix of costs of the subproblems + # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} + m = zeros((n, n), dtype=double) + # s is the actual ordering + # s[i, j] is the value of k at which we split the product A_i..A_j + s = empty((n, n), dtype=intp) + + for l in range(1, n): + for i in range(n - l): + j = i + l + m[i, j] = Inf + for k in range(i, j): + q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] + if q < m[i, j]: + m[i, j] = q + s[i, j] = k # Note that Cormen uses 1-based index + + return (s, m) if return_costs else s + + +def _multi_dot(arrays, order, i, j, out=None): + """Actually do the multiplication with the given order.""" + if i == j: + # the initial call with non-None out should never get here + assert out is None + + return arrays[i] + else: + return dot(_multi_dot(arrays, order, i, order[i, j]), + _multi_dot(arrays, order, order[i, j] + 1, j), + out=out) diff --git a/MLPY/Lib/site-packages/numpy/linalg/setup.py b/MLPY/Lib/site-packages/numpy/linalg/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..b83393d23db0cc37bd9072786289942734f863dd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/linalg/setup.py @@ -0,0 +1,83 @@ +import os +import sys + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + from numpy.distutils.system_info import ( + get_info, system_info, lapack_opt_info, blas_opt_info) + config = Configuration('linalg', parent_package, top_path) + + config.add_subpackage('tests') + + # Configure lapack_lite + + src_dir = 'lapack_lite' + lapack_lite_src = [ + os.path.join(src_dir, 'python_xerbla.c'), + os.path.join(src_dir, 'f2c_z_lapack.c'), + os.path.join(src_dir, 'f2c_c_lapack.c'), + os.path.join(src_dir, 'f2c_d_lapack.c'), + os.path.join(src_dir, 'f2c_s_lapack.c'), + os.path.join(src_dir, 'f2c_lapack.c'), + os.path.join(src_dir, 'f2c_blas.c'), + os.path.join(src_dir, 'f2c_config.c'), + os.path.join(src_dir, 'f2c.c'), + ] + all_sources = config.paths(lapack_lite_src) + + if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0": + lapack_info = get_info('lapack_ilp64_opt', 2) + else: + lapack_info = get_info('lapack_opt', 0) # and {} + + use_lapack_lite = not lapack_info + + if use_lapack_lite: + # This makes numpy.distutils write the fact that lapack_lite + # is being used to numpy.__config__ + class numpy_linalg_lapack_lite(system_info): + def calc_info(self): + info = {'language': 'c'} + if sys.maxsize > 2**32: + # Build lapack-lite in 64-bit integer mode. + # The suffix is arbitrary (lapack_lite symbols follow it), + # but use the "64_" convention here. + info['define_macros'] = [ + ('HAVE_BLAS_ILP64', None), + ('BLAS_SYMBOL_SUFFIX', '64_') + ] + self.set_info(**info) + + lapack_info = numpy_linalg_lapack_lite().get_info(2) + + def get_lapack_lite_sources(ext, build_dir): + if use_lapack_lite: + print("### Warning: Using unoptimized lapack ###") + return all_sources + else: + if sys.platform == 'win32': + print("### Warning: python_xerbla.c is disabled ###") + return [] + return [all_sources[0]] + + config.add_extension( + 'lapack_lite', + sources=['lapack_litemodule.c', get_lapack_lite_sources], + depends=['lapack_lite/f2c.h'], + extra_info=lapack_info, + ) + + # umath_linalg module + config.add_extension( + '_umath_linalg', + sources=['umath_linalg.c.src', get_lapack_lite_sources], + depends=['lapack_lite/f2c.h'], + extra_info=lapack_info, + libraries=['npymath'], + ) + config.add_data_files('*.pyi') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/__init__.py b/MLPY/Lib/site-packages/numpy/linalg/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7d3715f2cca00698d2ebb0d991bbf3804d62767 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_build.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_build.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..266d3f4522ea19d9b862d7ca22d5d9c7e8bda293 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_build.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1b425834902670ec99245b9bdd9171c927ca1b1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_deprecations.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc3f8730dcf07679fba5b568b0ecd6f6f4d4c79a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b78ce85ee4b29e8a2453c1226a53999a6a150a7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/linalg/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/test_build.py b/MLPY/Lib/site-packages/numpy/linalg/tests/test_build.py new file mode 100644 index 0000000000000000000000000000000000000000..293d639525b533289d0a32badd181aadd40e0b94 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/linalg/tests/test_build.py @@ -0,0 +1,53 @@ +from subprocess import PIPE, Popen +import sys +import re +import pytest + +from numpy.linalg import lapack_lite +from numpy.testing import assert_ + + +class FindDependenciesLdd: + + def __init__(self): + self.cmd = ['ldd'] + + try: + p = Popen(self.cmd, stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + except OSError as e: + raise RuntimeError(f'command {self.cmd} cannot be run') from e + + def get_dependencies(self, lfile): + p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + if not (p.returncode == 0): + raise RuntimeError(f'failed dependencies check for {lfile}') + + return stdout + + def grep_dependencies(self, lfile, deps): + stdout = self.get_dependencies(lfile) + + rdeps = dict([(dep, re.compile(dep)) for dep in deps]) + founds = [] + for l in stdout.splitlines(): + for k, v in rdeps.items(): + if v.search(l): + founds.append(k) + + return founds + + +class TestF77Mismatch: + + @pytest.mark.skipif(not(sys.platform[:5] == 'linux'), + reason="no fortran compiler on non-Linux platform") + def test_lapack(self): + f = FindDependenciesLdd() + deps = f.grep_dependencies(lapack_lite.__file__, + [b'libg2c', b'libgfortran']) + assert_(len(deps) <= 1, + """Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to +cause random crashes and wrong results. See numpy INSTALL.txt for more +information.""") diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/test_deprecations.py b/MLPY/Lib/site-packages/numpy/linalg/tests/test_deprecations.py new file mode 100644 index 0000000000000000000000000000000000000000..3dad16f01fdb78b5f703b2d2591746dbcba99aaf --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/linalg/tests/test_deprecations.py @@ -0,0 +1,20 @@ +"""Test deprecation and future warnings. + +""" +import numpy as np +from numpy.testing import assert_warns + + +def test_qr_mode_full_future_warning(): + """Check mode='full' FutureWarning. + + In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were + deprecated. The release date will probably be sometime in the summer + of 2013. + + """ + a = np.eye(2) + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/test_linalg.py b/MLPY/Lib/site-packages/numpy/linalg/tests/test_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..fdea50ffc8a77aea50bfc2572a54f792838a3f60 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/linalg/tests/test_linalg.py @@ -0,0 +1,2092 @@ +""" Test functions for linalg module + +""" +import os +import sys +import itertools +import traceback +import textwrap +import subprocess +import pytest + +import numpy as np +from numpy import array, single, double, csingle, cdouble, dot, identity, matmul +from numpy import multiply, atleast_2d, inf, asarray +from numpy import linalg +from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError +from numpy.linalg.linalg import _multi_dot_matrix_chain_order +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, + assert_almost_equal, assert_allclose, suppress_warnings, + assert_raises_regex, HAS_LAPACK64, + ) +from numpy.testing._private.utils import requires_memory + + +def consistent_subclass(out, in_): + # For ndarray subclass input, our output should have the same subclass + # (non-ndarray input gets converted to ndarray). + return type(out) is (type(in_) if isinstance(in_, np.ndarray) + else np.ndarray) + + +old_assert_almost_equal = assert_almost_equal + + +def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw): + if asarray(a).dtype.type in (single, csingle): + decimal = single_decimal + else: + decimal = double_decimal + old_assert_almost_equal(a, b, decimal=decimal, **kw) + + +def get_real_dtype(dtype): + return {single: single, double: double, + csingle: single, cdouble: double}[dtype] + + +def get_complex_dtype(dtype): + return {single: csingle, double: cdouble, + csingle: csingle, cdouble: cdouble}[dtype] + + +def get_rtol(dtype): + # Choose a safe rtol + if dtype in (single, csingle): + return 1e-5 + else: + return 1e-11 + + +# used to categorize tests +all_tags = { + 'square', 'nonsquare', 'hermitian', # mutually exclusive + 'generalized', 'size-0', 'strided' # optional additions +} + + +class LinalgCase: + def __init__(self, name, a, b, tags=set()): + """ + A bundle of arguments to be passed to a test case, with an identifying + name, the operands a and b, and a set of tags to filter the tests + """ + assert_(isinstance(name, str)) + self.name = name + self.a = a + self.b = b + self.tags = frozenset(tags) # prevent shared tags + + def check(self, do): + """ + Run the function `do` on this test case, expanding arguments + """ + do(self.a, self.b, tags=self.tags) + + def __repr__(self): + return f'' + + +def apply_tag(tag, cases): + """ + Add the given tag (a string) to each of the cases (a list of LinalgCase + objects) + """ + assert tag in all_tags, "Invalid tag" + for case in cases: + case.tags = case.tags | {tag} + return cases + + +# +# Base test cases +# + +np.random.seed(1234) + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("single", + array([[1., 2.], [3., 4.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("double", + array([[1., 2.], [3., 4.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_2", + array([[1., 2.], [3., 4.]], dtype=double), + array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), + LinalgCase("csingle", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("cdouble", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_2", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), + LinalgCase("0x0", + np.empty((0, 0), dtype=double), + np.empty((0,), dtype=double), + tags={'size-0'}), + LinalgCase("8x8", + np.random.rand(8, 8), + np.random.rand(8)), + LinalgCase("1x1", + np.random.rand(1, 1), + np.random.rand(1)), + LinalgCase("nonarray", + [[1, 2], [3, 4]], + [2, 1]), +]) + +# non-square test-cases +CASES += apply_tag('nonsquare', [ + LinalgCase("single_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("single_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), + array([2., 1., 3.], dtype=single)), + LinalgCase("double_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), + array([2., 1., 3.], dtype=double)), + LinalgCase("csingle_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("csingle_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), + LinalgCase("cdouble_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), + LinalgCase("cdouble_nsq_1_2", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("cdouble_nsq_2_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("8x11", + np.random.rand(8, 11), + np.random.rand(8)), + LinalgCase("1x5", + np.random.rand(1, 5), + np.random.rand(1)), + LinalgCase("5x1", + np.random.rand(5, 1), + np.random.rand(5)), + LinalgCase("0x4", + np.random.rand(0, 4), + np.random.rand(0), + tags={'size-0'}), + LinalgCase("4x0", + np.random.rand(4, 0), + np.random.rand(4), + tags={'size-0'}), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hsingle", + array([[1., 2.], [2., 1.]], dtype=single), + None), + LinalgCase("hdouble", + array([[1., 2.], [2., 1.]], dtype=double), + None), + LinalgCase("hcsingle", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle), + None), + LinalgCase("hcdouble", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble), + None), + LinalgCase("hempty", + np.empty((0, 0), dtype=double), + None, + tags={'size-0'}), + LinalgCase("hnonarray", + [[1, 2], [2, 1]], + None), + LinalgCase("matrix_b_only", + array([[1., 2.], [2., 1.]]), + None), + LinalgCase("hmatrix_1x1", + np.random.rand(1, 1), + None), +]) + + +# +# Gufunc test cases +# +def _make_generalized_cases(): + new_cases = [] + + for case in CASES: + if not isinstance(case.a, np.ndarray): + continue + + a = np.array([case.a, 2 * case.a, 3 * case.a]) + if case.b is None: + b = None + else: + b = np.array([case.b, 7 * case.b, 6 * case.b]) + new_case = LinalgCase(case.name + "_tile3", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape) + if case.b is None: + b = None + else: + b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape) + new_case = LinalgCase(case.name + "_tile213", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + return new_cases + + +CASES += _make_generalized_cases() + + +# +# Generate stride combination variations of the above +# +def _stride_comb_iter(x): + """ + Generate cartesian product of strides for all axes + """ + + if not isinstance(x, np.ndarray): + yield x, "nop" + return + + stride_set = [(1,)] * x.ndim + stride_set[-1] = (1, 3, -4) + if x.ndim > 1: + stride_set[-2] = (1, 3, -4) + if x.ndim > 2: + stride_set[-3] = (1, -4) + + for repeats in itertools.product(*tuple(stride_set)): + new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] + slices = tuple([slice(None, None, repeat) for repeat in repeats]) + + # new array with different strides, but same data + xi = np.empty(new_shape, dtype=x.dtype) + xi.view(np.uint32).fill(0xdeadbeef) + xi = xi[slices] + xi[...] = x + xi = xi.view(x.__class__) + assert_(np.all(xi == x)) + yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + + # generate also zero strides if possible + if x.ndim >= 1 and x.shape[-1] == 1: + s = list(x.strides) + s[-1] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0" + if x.ndim >= 2 and x.shape[-2] == 1: + s = list(x.strides) + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_x" + if x.ndim >= 2 and x.shape[:-2] == (1, 1): + s = list(x.strides) + s[-1] = 0 + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_0" + + +def _make_strided_cases(): + new_cases = [] + for case in CASES: + for a, a_label in _stride_comb_iter(case.a): + for b, b_label in _stride_comb_iter(case.b): + new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b, + tags=case.tags | {'strided'}) + new_cases.append(new_case) + return new_cases + + +CASES += _make_strided_cases() + + +# +# Test different routines against the above cases +# +class LinalgTestCase: + TEST_CASES = CASES + + def check_cases(self, require=set(), exclude=set()): + """ + Run func on each of the cases with all of the tags in require, and none + of the tags in exclude + """ + for case in self.TEST_CASES: + # filter by require and exclude + if case.tags & require != require: + continue + if case.tags & exclude: + continue + + try: + case.check(self.do) + except Exception as e: + msg = f'In test case: {case!r}\n\n' + msg += traceback.format_exc() + raise AssertionError(msg) from e + + +class LinalgSquareTestCase(LinalgTestCase): + + def test_sq_cases(self): + self.check_cases(require={'square'}, + exclude={'generalized', 'size-0'}) + + def test_empty_sq_cases(self): + self.check_cases(require={'square', 'size-0'}, + exclude={'generalized'}) + + +class LinalgNonsquareTestCase(LinalgTestCase): + + def test_nonsq_cases(self): + self.check_cases(require={'nonsquare'}, + exclude={'generalized', 'size-0'}) + + def test_empty_nonsq_cases(self): + self.check_cases(require={'nonsquare', 'size-0'}, + exclude={'generalized'}) + + +class HermitianTestCase(LinalgTestCase): + + def test_herm_cases(self): + self.check_cases(require={'hermitian'}, + exclude={'generalized', 'size-0'}) + + def test_empty_herm_cases(self): + self.check_cases(require={'hermitian', 'size-0'}, + exclude={'generalized'}) + + +class LinalgGeneralizedSquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_sq_cases(self): + self.check_cases(require={'generalized', 'square'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_sq_cases(self): + self.check_cases(require={'generalized', 'square', 'size-0'}) + + +class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare', 'size-0'}) + + +class HermitianGeneralizedTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian', 'size-0'}, + exclude={'none'}) + + +def dot_generalized(a, b): + a = asarray(a) + if a.ndim >= 3: + if a.ndim == b.ndim: + # matrix x matrix + new_shape = a.shape[:-1] + b.shape[-1:] + elif a.ndim == b.ndim + 1: + # matrix x vector + new_shape = a.shape[:-1] + else: + raise ValueError("Not implemented...") + r = np.empty(new_shape, dtype=np.common_type(a, b)) + for c in itertools.product(*map(range, a.shape[:-2])): + r[c] = dot(a[c], b[c]) + return r + else: + return dot(a, b) + + +def identity_like_generalized(a): + a = asarray(a) + if a.ndim >= 3: + r = np.empty(a.shape, dtype=a.dtype) + r[...] = identity(a.shape[-2]) + return r + else: + return identity(a.shape[0]) + + +class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # kept apart from TestSolve for use for testing with matrices. + def do(self, a, b, tags): + x = linalg.solve(a, b) + assert_almost_equal(b, dot_generalized(a, x)) + assert_(consistent_subclass(x, b)) + + +class TestSolve(SolveCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.solve(x, x).dtype, dtype) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + # Test system of 0x0 matrices + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, 0:0, :] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # Test errors for non-square and only b's dimension being 0 + assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) + assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) + + # Test broadcasting error + b = np.arange(6).reshape(1, 3, 2) # broadcasting error + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + + # Test zero "single equations" with 0x0 matrices. + b = np.arange(2).reshape(1, 2).view(ArraySubclass) + expected = linalg.solve(a, b)[:, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + b = np.arange(3).reshape(1, 3) + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) + + def test_0_size_k(self): + # test zero multiple equation (K=0) case. + class ArraySubclass(np.ndarray): + pass + a = np.arange(4).reshape(1, 2, 2) + b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, :, 0:0] + result = linalg.solve(a, b[:, :, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # test both zero. + expected = linalg.solve(a, b)[:, 0:0, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + +class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + a_inv = linalg.inv(a) + assert_almost_equal(dot_generalized(a, a_inv), + identity_like_generalized(a)) + assert_(consistent_subclass(a_inv, a)) + + +class TestInv(InvCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.inv(x).dtype, dtype) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + +class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + ev = linalg.eigvals(a) + evalues, evectors = linalg.eig(a) + assert_almost_equal(ev, evalues) + + +class TestEigvals(EigvalsCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, dtype) + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.complex64) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + evalues, evectors = linalg.eig(a) + assert_allclose(dot_generalized(a, evectors), + np.asarray(evectors) * np.asarray(evalues)[..., None, :], + rtol=get_rtol(evalues.dtype)) + assert_(consistent_subclass(evectors, a)) + + +class TestEig(EigCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, dtype) + assert_equal(v.dtype, dtype) + + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class SVDBaseTests: + hermitian = False + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + u, s, vh = linalg.svd(x) + assert_equal(u.dtype, dtype) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(vh.dtype, dtype) + s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + assert_equal(s.dtype, get_real_dtype(dtype)) + + +class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False) + assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVD(SVDCases, SVDBaseTests): + def test_empty_identity(self): + """ Empty input should put an identity matrix in u or vh """ + x = np.empty((4, 0)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (4, 4)) + assert_equal(vh.shape, (0, 0)) + assert_equal(u, np.eye(4)) + + x = np.empty((0, 4)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (0, 0)) + assert_equal(vh.shape, (4, 4)) + assert_equal(vh, np.eye(4)) + + +class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False, hermitian=True) + assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + def hermitian(mat): + axes = list(range(mat.ndim)) + axes[-1], axes[-2] = axes[-2], axes[-1] + return np.conj(np.transpose(mat, axes=axes)) + + assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) + assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) + assert_equal(np.sort(s)[..., ::-1], s) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVDHermitian(SVDHermitianCases, SVDBaseTests): + hermitian = True + + +class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # cond(x, p) for p in (None, 2, -2) + + def do(self, a, b, tags): + c = asarray(a) # a might be a matrix + if 'size-0' in tags: + assert_raises(LinAlgError, linalg.cond, c) + return + + # +-2 norms + s = linalg.svd(c, compute_uv=False) + assert_almost_equal( + linalg.cond(a), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 2), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -2), s[..., -1] / s[..., 0], + single_decimal=5, double_decimal=11) + + # Other norms + cinv = np.linalg.inv(c) + assert_almost_equal( + linalg.cond(a, 1), + abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -1), + abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, np.inf), + abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -np.inf), + abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 'fro'), + np.sqrt((abs(c)**2).sum(-1).sum(-1) + * (abs(cinv)**2).sum(-1).sum(-1)), + single_decimal=5, double_decimal=11) + + +class TestCond(CondCases): + def test_basic_nonsvd(self): + # Smoketest the non-svd norms + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + assert_almost_equal(linalg.cond(A, inf), 4) + assert_almost_equal(linalg.cond(A, -inf), 2/3) + assert_almost_equal(linalg.cond(A, 1), 4) + assert_almost_equal(linalg.cond(A, -1), 0.5) + assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + + def test_singular(self): + # Singular matrices have infinite condition number for + # positive norms, and negative norms shouldn't raise + # exceptions + As = [np.zeros((2, 2)), np.ones((2, 2))] + p_pos = [None, 1, 2, 'fro'] + p_neg = [-1, -2] + for A, p in itertools.product(As, p_pos): + # Inversion may not hit exact infinity, so just check the + # number is large + assert_(linalg.cond(A, p) > 1e15) + for A, p in itertools.product(As, p_neg): + linalg.cond(A, p) + + @pytest.mark.xfail(True, run=False, + reason="Platform/LAPACK-dependent failure, " + "see gh-18914") + def test_nan(self): + # nans should be passed through, not converted to infs + ps = [None, 1, -1, 2, -2, 'fro'] + p_pos = [None, 1, 2, 'fro'] + + A = np.ones((2, 2)) + A[0,1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(isinstance(c, np.float_)) + assert_(np.isnan(c)) + + A = np.ones((3, 2, 2)) + A[1,0,1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(np.isnan(c[1])) + if p in p_pos: + assert_(c[0] > 1e15) + assert_(c[2] > 1e15) + else: + assert_(not np.isnan(c[0])) + assert_(not np.isnan(c[2])) + + def test_stacked_singular(self): + # Check behavior when only some of the stacked matrices are + # singular + np.random.seed(1234) + A = np.random.rand(2, 2, 2, 2) + A[0,0] = 0 + A[1,1] = 0 + + for p in (None, 1, 2, 'fro', -1, -2): + c = linalg.cond(A, p) + assert_equal(c[0,0], np.inf) + assert_equal(c[1,1], np.inf) + assert_(np.isfinite(c[0,1])) + assert_(np.isfinite(c[1,0])) + + +class PinvCases(LinalgSquareTestCase, + LinalgNonsquareTestCase, + LinalgGeneralizedSquareTestCase, + LinalgGeneralizedNonsquareTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a) + # `a @ a_ginv == I` does not hold if a is singular + dot = dot_generalized + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinv(PinvCases): + pass + + +class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a, hermitian=True) + # `a @ a_ginv == I` does not hold if a is singular + dot = dot_generalized + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinvHermitian(PinvHermitianCases): + pass + + +class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + d = linalg.det(a) + (s, ld) = linalg.slogdet(a) + if asarray(a).dtype.type in (single, double): + ad = asarray(a).astype(double) + else: + ad = asarray(a).astype(cdouble) + ev = linalg.eigvals(ad) + assert_almost_equal(d, multiply.reduce(ev, axis=-1)) + assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) + + s = np.atleast_1d(s) + ld = np.atleast_1d(ld) + m = (s != 0) + assert_almost_equal(np.abs(s[m]), 1) + assert_equal(ld[~m], -inf) + + +class TestDet(DetCases): + def test_zero(self): + assert_equal(linalg.det([[0.0]]), 0.0) + assert_equal(type(linalg.det([[0.0]])), double) + assert_equal(linalg.det([[0.0j]]), 0.0) + assert_equal(type(linalg.det([[0.0j]])), cdouble) + + assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) + assert_equal(type(linalg.slogdet([[0.0]])[0]), double) + assert_equal(type(linalg.slogdet([[0.0]])[1]), double) + assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) + assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) + assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(np.linalg.det(x).dtype, dtype) + ph, s = np.linalg.slogdet(x) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(ph.dtype, dtype) + + def test_0_size(self): + a = np.zeros((0, 0), dtype=np.complex64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.complex64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.complex64) + assert_(res[1].dtype.type is np.float32) + + a = np.zeros((0, 0), dtype=np.float64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.float64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.float64) + assert_(res[1].dtype.type is np.float64) + + +class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): + + def do(self, a, b, tags): + arr = np.asarray(a) + m, n = arr.shape + u, s, vt = linalg.svd(a, False) + x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) + if m == 0: + assert_((x == 0).all()) + if m <= n: + assert_almost_equal(b, dot(a, x)) + assert_equal(rank, m) + else: + assert_equal(rank, n) + assert_almost_equal(sv, sv.__array_wrap__(s)) + if rank == n and m > n: + expect_resids = ( + np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) + expect_resids = np.asarray(expect_resids) + if np.asarray(b).ndim == 1: + expect_resids.shape = (1,) + assert_equal(residuals.shape, expect_resids.shape) + else: + expect_resids = np.array([]).view(type(x)) + assert_almost_equal(residuals, expect_resids) + assert_(np.issubdtype(residuals.dtype, np.floating)) + assert_(consistent_subclass(x, b)) + assert_(consistent_subclass(residuals, b)) + + +class TestLstsq(LstsqCases): + def test_future_rcond(self): + a = np.array([[0., 1., 0., 1., 2., 0.], + [0., 2., 0., 0., 1., 0.], + [1., 0., 1., 0., 0., 4.], + [0., 0., 0., 2., 3., 0.]]).T + + b = np.array([1, 0, 0, 0, 0, 0]) + with suppress_warnings() as sup: + w = sup.record(FutureWarning, "`rcond` parameter will change") + x, residuals, rank, s = linalg.lstsq(a, b) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + assert_(rank == 3) + # Warning should be raised exactly once (first command) + assert_(len(w) == 1) + + @pytest.mark.parametrize(["m", "n", "n_rhs"], [ + (4, 2, 2), + (0, 4, 1), + (0, 4, 2), + (4, 0, 1), + (4, 0, 2), + (4, 2, 0), + (0, 0, 0) + ]) + def test_empty_a_b(self, m, n, n_rhs): + a = np.arange(m * n).reshape(m, n) + b = np.ones((m, n_rhs)) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + if m == 0: + assert_((x == 0).all()) + assert_equal(x.shape, (n, n_rhs)) + assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) + if m > n and n_rhs > 0: + # residuals are exactly the squared norms of b's columns + r = b - np.dot(a, x) + assert_almost_equal(residuals, (r * r).sum(axis=-2)) + assert_equal(rank, min(m, n)) + assert_equal(s.shape, (min(m, n),)) + + def test_incompatible_dims(self): + # use modified version of docstring example + x = np.array([0, 1, 2, 3]) + y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) + A = np.vstack([x, np.ones(len(x))]).T + with assert_raises_regex(LinAlgError, "Incompatible dimensions"): + linalg.lstsq(A, y, rcond=None) + + +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) +class TestMatrixPower: + + rshft_0 = np.eye(4) + rshft_1 = rshft_0[[3, 0, 1, 2]] + rshft_2 = rshft_0[[2, 3, 0, 1]] + rshft_3 = rshft_0[[1, 2, 3, 0]] + rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] + noninv = array([[1, 0], [0, 0]]) + stacked = np.block([[[rshft_0]]]*2) + #FIXME the 'e' dtype might work in future + dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] + + def test_large_power(self, dt): + rshft = self.rshft_1.astype(dt) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) + + def test_power_is_zero(self, dt): + def tz(M): + mz = matrix_power(M, 0) + assert_equal(mz, identity_like_generalized(M)) + assert_equal(mz.dtype, M.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_one(self, dt): + def tz(mat): + mz = matrix_power(mat, 1) + assert_equal(mz, mat) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_two(self, dt): + def tz(mat): + mz = matrix_power(mat, 2) + mmul = matmul if mat.dtype != object else dot + assert_equal(mz, mmul(mat, mat)) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_minus_one(self, dt): + def tz(mat): + invmat = matrix_power(mat, -1) + mmul = matmul if mat.dtype != object else dot + assert_almost_equal( + mmul(invmat, mat), identity_like_generalized(mat)) + + for mat in self.rshft_all: + if dt not in self.dtnoinv: + tz(mat.astype(dt)) + + def test_exceptions_bad_power(self, dt): + mat = self.rshft_0.astype(dt) + assert_raises(TypeError, matrix_power, mat, 1.5) + assert_raises(TypeError, matrix_power, mat, [1]) + + def test_exceptions_non_square(self, dt): + assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) + + def test_exceptions_not_invertible(self, dt): + if dt in self.dtnoinv: + return + mat = self.noninv.astype(dt) + assert_raises(LinAlgError, matrix_power, mat, -1) + + + +class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + ev = linalg.eigvalsh(a, 'L') + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) + + ev2 = linalg.eigvalsh(a, 'U') + assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) + + +class TestEigvalsh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w = np.linalg.eigvalsh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") + assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w = np.linalg.eigvalsh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w = np.linalg.eigvalsh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w = np.linalg.eigvalsh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w = np.linalg.eigvalsh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w = np.linalg.eigvalsh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float32) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + ev, evc = linalg.eigh(a) + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_almost_equal(ev, evalues) + + assert_allclose(dot_generalized(a, evc), + np.asarray(ev)[..., None, :] * np.asarray(evc), + rtol=get_rtol(ev.dtype)) + + ev2, evc2 = linalg.eigh(a, 'U') + assert_almost_equal(ev2, evalues) + + assert_allclose(dot_generalized(a, evc2), + np.asarray(ev2)[..., None, :] * np.asarray(evc2), + rtol=get_rtol(ev.dtype), err_msg=repr(a)) + + +class TestEigh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eigh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + assert_equal(v.dtype, dtype) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigh, x, "lower") + assert_raises(ValueError, np.linalg.eigh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w, v = np.linalg.eigh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w, v = np.linalg.eigh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w, v = np.linalg.eigh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w, v = np.linalg.eigh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w, v = np.linalg.eigh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.float32) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class _TestNormBase: + dt = None + dec = None + + +class _TestNormGeneral(_TestNormBase): + + def test_empty(self): + assert_equal(norm([]), 0.0) + assert_equal(norm(array([], dtype=self.dt)), 0.0) + assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) + + def test_vector_return_type(self): + a = np.array([1, 0, 1]) + + exact_types = np.typecodes['AllInteger'] + inexact_types = np.typecodes['AllFloat'] + + all_types = exact_types + inexact_types + + for each_inexact_types in all_types: + at = a.astype(each_inexact_types) + + an = norm(at, -np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 0.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 0.0) + + an = norm(at, 0) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2) + + an = norm(at, 1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0)) + + an = norm(at, 4) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0)) + + an = norm(at, np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + def test_vector(self): + a = [1, 2, 3, 4] + b = [-1, -2, -3, -4] + c = [-1, 2, -3, 4] + + def _test(v): + np.testing.assert_almost_equal(norm(v), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, inf), 4.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -inf), 1.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 1), 10.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5), + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 0), 4, + decimal=self.dec) + + for v in (a, b, c,): + _test(v) + + for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), + array(c, dtype=self.dt)): + _test(v) + + def test_axis(self): + # Vector norms. + # Compare the use of `axis` with computing the norm of each row + # or column separately. + A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: + expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] + assert_almost_equal(norm(A, ord=order, axis=0), expected0) + expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] + assert_almost_equal(norm(A, ord=order, axis=1), expected1) + + # Matrix norms. + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + nd = B.ndim + for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']: + for axis in itertools.combinations(range(-nd, nd), 2): + row_axis, col_axis = axis + if row_axis < 0: + row_axis += nd + if col_axis < 0: + col_axis += nd + if row_axis == col_axis: + assert_raises(ValueError, norm, B, ord=order, axis=axis) + else: + n = norm(B, ord=order, axis=axis) + + # The logic using k_index only works for nd = 3. + # This has to be changed if nd is increased. + k_index = nd - (row_axis + col_axis) + if row_axis < col_axis: + expected = [norm(B[:].take(k, axis=k_index), ord=order) + for k in range(B.shape[k_index])] + else: + expected = [norm(B[:].take(k, axis=k_index).T, ord=order) + for k in range(B.shape[k_index])] + assert_almost_equal(n, expected) + + def test_keepdims(self): + A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + allclose_err = 'order {0}, axis = {1}' + shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' + + # check the order=None, axis=None case + expected = norm(A, ord=None, axis=None) + found = norm(A, ord=None, axis=None, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(None, None)) + expected_shape = (1, 1, 1) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, None, None)) + + # Vector norms. + for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: + for k in range(A.ndim): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + # Matrix norms. + for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']: + for k in itertools.permutations(range(A.ndim), 2): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k[0]] = 1 + expected_shape[k[1]] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + +class _TestNorm2D(_TestNormBase): + # Define the part for 2d arrays separately, so we can subclass this + # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. + array = np.array + + def test_matrix_empty(self): + assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0) + + def test_matrix_return_type(self): + a = self.array([[1, 0, 1], [0, 1, 1]]) + + exact_types = np.typecodes['AllInteger'] + + # float32, complex64, float64, complex128 types are the only types + # allowed by `linalg`, which performs the matrix operations used + # within `norm`. + inexact_types = 'fdFD' + + all_types = exact_types + inexact_types + + for each_inexact_types in all_types: + at = a.astype(each_inexact_types) + + an = norm(at, -np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + an = norm(at, 1) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 3.0**(1.0/2.0)) + + an = norm(at, -2) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 1.0) + + an = norm(at, np.inf) + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 'fro') + assert_(issubclass(an.dtype.type, np.floating)) + assert_almost_equal(an, 2.0) + + an = norm(at, 'nuc') + assert_(issubclass(an.dtype.type, np.floating)) + # Lower bar needed to support low precision floats. + # They end up being off by 1 in the 7th place. + np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) + + def test_matrix_2x2(self): + A = self.array([[1, 3], [5, 7]], dtype=self.dt) + assert_almost_equal(norm(A), 84 ** 0.5) + assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 10.0) + assert_almost_equal(norm(A, inf), 12.0) + assert_almost_equal(norm(A, -inf), 4.0) + assert_almost_equal(norm(A, 1), 10.0) + assert_almost_equal(norm(A, -1), 6.0) + assert_almost_equal(norm(A, 2), 9.1231056256176615) + assert_almost_equal(norm(A, -2), 0.87689437438234041) + + assert_raises(ValueError, norm, A, 'nofro') + assert_raises(ValueError, norm, A, -3) + assert_raises(ValueError, norm, A, 0) + + def test_matrix_3x3(self): + # This test has been added because the 2x2 example + # happened to have equal nuclear norm and induced 1-norm. + # The 1/10 scaling factor accommodates the absolute tolerance + # used in assert_almost_equal. + A = (1 / 10) * \ + self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) + assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) + assert_almost_equal(norm(A, inf), 1.1) + assert_almost_equal(norm(A, -inf), 0.6) + assert_almost_equal(norm(A, 1), 1.0) + assert_almost_equal(norm(A, -1), 0.4) + assert_almost_equal(norm(A, 2), 0.88722940323461277) + assert_almost_equal(norm(A, -2), 0.19456584790481812) + + def test_bad_args(self): + # Check that bad arguments raise the appropriate exceptions. + + A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + # Using `axis=` or passing in a 1-D array implies vector + # norms are being computed, so also using `ord='fro'` + # or `ord='nuc'` or any other string raises a ValueError. + assert_raises(ValueError, norm, A, 'fro', 0) + assert_raises(ValueError, norm, A, 'nuc', 0) + assert_raises(ValueError, norm, [3, 4], 'fro', None) + assert_raises(ValueError, norm, [3, 4], 'nuc', None) + assert_raises(ValueError, norm, [3, 4], 'test', None) + + # Similarly, norm should raise an exception when ord is any finite + # number other than 1, 2, -1 or -2 when computing matrix norms. + for order in [0, 3]: + assert_raises(ValueError, norm, A, order, None) + assert_raises(ValueError, norm, A, order, (0, 1)) + assert_raises(ValueError, norm, B, order, (1, 2)) + + # Invalid axis + assert_raises(np.AxisError, norm, B, None, 3) + assert_raises(np.AxisError, norm, B, None, (2, 3)) + assert_raises(ValueError, norm, B, None, (0, 1, 2)) + + +class _TestNorm(_TestNorm2D, _TestNormGeneral): + pass + + +class TestNorm_NonSystematic: + + def test_longdouble_norm(self): + # Non-regression test: p-norm of longdouble would previously raise + # UnboundLocalError. + x = np.arange(10, dtype=np.longdouble) + old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) + + def test_intmin(self): + # Non-regression test: p-norm of signed integer would previously do + # float cast and abs in the wrong order. + x = np.array([-2 ** 31], dtype=np.int32) + old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) + + def test_complex_high_ord(self): + # gh-4156 + d = np.empty((2,), dtype=np.clongdouble) + d[0] = 6 + 7j + d[1] = -6 + 7j + res = 11.615898132184 + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) + d = d.astype(np.complex128) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) + d = d.astype(np.complex64) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) + + +# Separate definitions so we can use them for matrix tests. +class _TestNormDoubleBase(_TestNormBase): + dt = np.double + dec = 12 + + +class _TestNormSingleBase(_TestNormBase): + dt = np.float32 + dec = 6 + + +class _TestNormInt64Base(_TestNormBase): + dt = np.int64 + dec = 12 + + +class TestNormDouble(_TestNorm, _TestNormDoubleBase): + pass + + +class TestNormSingle(_TestNorm, _TestNormSingleBase): + pass + + +class TestNormInt64(_TestNorm, _TestNormInt64Base): + pass + + +class TestMatrixRank: + + def test_matrix_rank(self): + # Full rank matrix + assert_equal(4, matrix_rank(np.eye(4))) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(matrix_rank(I), 3) + # All zeros - zero rank + assert_equal(matrix_rank(np.zeros((4, 4))), 0) + # 1 dimension - rank 1 unless all 0 + assert_equal(matrix_rank([1, 0, 0, 0]), 1) + assert_equal(matrix_rank(np.zeros((4,))), 0) + # accepts array-like + assert_equal(matrix_rank([1]), 1) + # greater than 2 dimensions treated as stacked matrices + ms = np.array([I, np.eye(4), np.zeros((4,4))]) + assert_equal(matrix_rank(ms), np.array([3, 4, 0])) + # works on scalar + assert_equal(matrix_rank(1), 1) + + def test_symmetric_rank(self): + assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) + assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) + assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(3, matrix_rank(I, hermitian=True)) + # manually supplied tolerance + I[-1, -1] = 1e-8 + assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) + assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8)) + + +def test_reduced_rank(): + # Test matrices with reduced rank + rng = np.random.RandomState(20120714) + for i in range(100): + # Make a rank deficient matrix + X = rng.normal(size=(40, 10)) + X[:, 0] = X[:, 1] + X[:, 2] + # Assert that matrix_rank detected deficiency + assert_equal(matrix_rank(X), 9) + X[:, 3] = X[:, 4] + X[:, 5] + assert_equal(matrix_rank(X), 8) + + +class TestQR: + # Define the array class here, so run this on matrices elsewhere. + array = np.array + + def check_qr(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape + k = min(m, n) + + # mode == 'complete' + q, r = linalg.qr(a, mode='complete') + assert_(q.dtype == a_dtype) + assert_(r.dtype == a_dtype) + assert_(isinstance(q, a_type)) + assert_(isinstance(r, a_type)) + assert_(q.shape == (m, m)) + assert_(r.shape == (m, n)) + assert_almost_equal(dot(q, r), a) + assert_almost_equal(dot(q.T.conj(), q), np.eye(m)) + assert_almost_equal(np.triu(r), r) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape == (m, k)) + assert_(r1.shape == (k, n)) + assert_almost_equal(dot(q1, r1), a) + assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) + assert_almost_equal(np.triu(r1), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + + @pytest.mark.parametrize(["m", "n"], [ + (3, 0), + (0, 3), + (0, 0) + ]) + def test_qr_empty(self, m, n): + k = min(m, n) + a = np.empty((m, n)) + + self.check_qr(a) + + h, tau = np.linalg.qr(a, mode='raw') + assert_equal(h.dtype, np.double) + assert_equal(tau.dtype, np.double) + assert_equal(h.shape, (n, m)) + assert_equal(tau.shape, (k,)) + + def test_mode_raw(self): + # The factorization is not unique and varies between libraries, + # so it is not possible to check against known values. Functional + # testing is a possibility, but awaits the exposure of more + # of the functions in lapack_lite. Consequently, this test is + # very limited in scope. Note that the results are in FORTRAN + # order, hence the h arrays are transposed. + a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) + + # Test double + h, tau = linalg.qr(a, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (2, 3)) + assert_(tau.shape == (2,)) + + h, tau = linalg.qr(a.T, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (3, 2)) + assert_(tau.shape == (2,)) + + def test_mode_all_but_economic(self): + a = self.array([[1, 2], [3, 4]]) + b = self.array([[1, 2], [3, 4], [5, 6]]) + for dt in "fd": + m1 = a.astype(dt) + m2 = b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + for dt in "fd": + m1 = 1 + 1j * a.astype(dt) + m2 = 1 + 1j * b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + +class TestCholesky: + # TODO: are there no other tests for cholesky? + + def test_basic_property(self): + # Check A = L L^H + shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] + dtypes = (np.float32, np.float64, np.complex64, np.complex128) + + for shape, dtype in itertools.product(shapes, dtypes): + np.random.seed(1) + a = np.random.randn(*shape) + if np.issubdtype(dtype, np.complexfloating): + a = a + 1j*np.random.randn(*shape) + + t = list(range(len(shape))) + t[-2:] = -1, -2 + + a = np.matmul(a.transpose(t).conj(), a) + a = np.asarray(a, dtype=dtype) + + c = np.linalg.cholesky(a) + + b = np.matmul(c, c.transpose(t).conj()) + assert_allclose(b, a, + err_msg=f'{shape} {dtype}\n{a}\n{c}', + atol=500 * a.shape[0] * np.finfo(dtype).eps) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.float64) + # for documentation purpose: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.complex64) + assert_(isinstance(res, np.ndarray)) + + +def test_byteorder_check(): + # Byte order check should pass for native order + if sys.byteorder == 'little': + native = '<' + else: + native = '>' + + for dtt in (np.float32, np.float64): + arr = np.eye(4, dtype=dtt) + n_arr = arr.newbyteorder(native) + sw_arr = arr.newbyteorder('S').byteswap() + assert_equal(arr.dtype.byteorder, '=') + for routine in (linalg.inv, linalg.det, linalg.pinv): + # Normal call + res = routine(arr) + # Native but not '=' + assert_array_equal(res, routine(n_arr)) + # Swapped + assert_array_equal(res, routine(sw_arr)) + + +def test_generalized_raise_multiloop(): + # It should raise an error even if the error doesn't occur in the + # last iteration of the ufunc inner loop + + invertible = np.array([[1, 2], [3, 4]]) + non_invertible = np.array([[1, 1], [1, 1]]) + + x = np.zeros([4, 4, 2, 2])[1::2] + x[...] = invertible + x[0, 0] = non_invertible + + assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) + + +def test_xerbla_override(): + # Check that our xerbla has been successfully linked in. If it is not, + # the default xerbla routine is called, which prints a message to stdout + # and may, or may not, abort the process depending on the LAPACK package. + + XERBLA_OK = 255 + + try: + pid = os.fork() + except (OSError, AttributeError): + # fork failed, or not running on POSIX + pytest.skip("Not POSIX or fork failed.") + + if pid == 0: + # child; close i/o file handles + os.close(1) + os.close(0) + # Avoid producing core files. + import resource + resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) + # These calls may abort. + try: + np.linalg.lapack_lite.xerbla() + except ValueError: + pass + except Exception: + os._exit(os.EX_CONFIG) + + try: + a = np.array([[1.]]) + np.linalg.lapack_lite.dorgqr( + 1, 1, 1, a, + 0, # <- invalid value + a, a, 0, 0) + except ValueError as e: + if "DORGQR parameter number 5" in str(e): + # success, reuse error code to mark success as + # FORTRAN STOP returns as success. + os._exit(XERBLA_OK) + + # Did not abort, but our xerbla was not linked in. + os._exit(os.EX_CONFIG) + else: + # parent + pid, status = os.wait() + if os.WEXITSTATUS(status) != XERBLA_OK: + pytest.skip('Numpy xerbla not linked in.') + + +@pytest.mark.slow +def test_sdot_bug_8577(): + # Regression test that loading certain other libraries does not + # result to wrong results in float32 linear algebra. + # + # There's a bug gh-8577 on OSX that can trigger this, and perhaps + # there are also other situations in which it occurs. + # + # Do the check in a separate process. + + bad_libs = ['PyQt5.QtWidgets', 'IPython'] + + template = textwrap.dedent(""" + import sys + {before} + try: + import {bad_lib} + except ImportError: + sys.exit(0) + {after} + x = np.ones(2, dtype=np.float32) + sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) + """) + + for bad_lib in bad_libs: + code = template.format(before="import numpy as np", after="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + # Swapped import order + code = template.format(after="import numpy as np", before="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + +class TestMultiDot: + + def test_basic_function_with_three_arguments(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) + assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C))) + + def test_basic_function_with_two_arguments(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + + assert_almost_equal(multi_dot([A, B]), A.dot(B)) + assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) + + def test_basic_function_with_dynamic_programing_optimization(self): + # multi_dot with four or more arguments uses the dynamic programing + # optimization and therefore deserve a separate + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D)) + + def test_vector_as_first_argument(self): + # The first argument can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 2)) + + # the result should be 1-D + assert_equal(multi_dot([A1d, B, C, D]).shape, (2,)) + + def test_vector_as_last_argument(self): + # The last argument can be 1-D + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be 1-D + assert_equal(multi_dot([A, B, C, D1d]).shape, (6,)) + + def test_vector_as_first_and_last_argument(self): + # The first and last arguments can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be a scalar + assert_equal(multi_dot([A1d, B, C, D1d]).shape, ()) + + def test_three_arguments_and_out(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + out = np.zeros((6, 2)) + ret = multi_dot([A, B, C], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C)) + assert_almost_equal(out, np.dot(A, np.dot(B, C))) + + def test_two_arguments_and_out(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + out = np.zeros((6, 6)) + ret = multi_dot([A, B], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B)) + assert_almost_equal(out, np.dot(A, B)) + + def test_dynamic_programing_optimization_and_out(self): + # multi_dot with four or more arguments uses the dynamic programing + # optimization and therefore deserve a separate test + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + out = np.zeros((6, 1)) + ret = multi_dot([A, B, C, D], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C).dot(D)) + + def test_dynamic_programming_logic(self): + # Test for the dynamic programming part + # This test is directly taken from Cormen page 376. + arrays = [np.random.random((30, 35)), + np.random.random((35, 15)), + np.random.random((15, 5)), + np.random.random((5, 10)), + np.random.random((10, 20)), + np.random.random((20, 25))] + m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.], + [0., 0., 2625., 4375., 7125., 10500.], + [0., 0., 0., 750., 2500., 5375.], + [0., 0., 0., 0., 1000., 3500.], + [0., 0., 0., 0., 0., 5000.], + [0., 0., 0., 0., 0., 0.]]) + s_expected = np.array([[0, 1, 1, 3, 3, 3], + [0, 0, 2, 3, 3, 3], + [0, 0, 0, 3, 3, 3], + [0, 0, 0, 0, 4, 5], + [0, 0, 0, 0, 0, 5], + [0, 0, 0, 0, 0, 0]], dtype=int) + s_expected -= 1 # Cormen uses 1-based index, python does not. + + s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) + + # Only the upper triangular part (without the diagonal) is interesting. + assert_almost_equal(np.triu(s[:-1, 1:]), + np.triu(s_expected[:-1, 1:])) + assert_almost_equal(np.triu(m), np.triu(m_expected)) + + def test_too_few_input_arrays(self): + assert_raises(ValueError, multi_dot, []) + assert_raises(ValueError, multi_dot, [np.random.random((3, 3))]) + + +class TestTensorinv: + + @pytest.mark.parametrize("arr, ind", [ + (np.ones((4, 6, 8, 2)), 2), + (np.ones((3, 3, 2)), 1), + ]) + def test_non_square_handling(self, arr, ind): + with assert_raises(LinAlgError): + linalg.tensorinv(arr, ind=ind) + + @pytest.mark.parametrize("shape, ind", [ + # examples from docstring + ((4, 6, 8, 3), 2), + ((24, 8, 3), 1), + ]) + def test_tensorinv_shape(self, shape, ind): + a = np.eye(24) + a.shape = shape + ainv = linalg.tensorinv(a=a, ind=ind) + expected = a.shape[ind:] + a.shape[:ind] + actual = ainv.shape + assert_equal(actual, expected) + + @pytest.mark.parametrize("ind", [ + 0, -2, + ]) + def test_tensorinv_ind_limit(self, ind): + a = np.eye(24) + a.shape = (4, 6, 8, 3) + with assert_raises(ValueError): + linalg.tensorinv(a=a, ind=ind) + + def test_tensorinv_result(self): + # mimic a docstring example + a = np.eye(24) + a.shape = (24, 8, 3) + ainv = linalg.tensorinv(a, ind=1) + b = np.ones(24) + assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + + +def test_unsupported_commontype(): + # linalg gracefully handles unsupported type + arr = np.array([[1, -2], [2, 5]], dtype='float16') + with assert_raises_regex(TypeError, "unsupported in linalg"): + linalg.cholesky(arr) + + +@pytest.mark.slow +@pytest.mark.xfail(not HAS_LAPACK64, run=False, + reason="Numpy not compiled with 64-bit BLAS/LAPACK") +@requires_memory(free_bytes=16e9) +def test_blas64_dot(): + n = 2**32 + a = np.zeros([1, n], dtype=np.float32) + b = np.ones([1, 1], dtype=np.float32) + a[0,-1] = 1 + c = np.dot(b, a) + assert_equal(c[0,-1], 1) + + +@pytest.mark.xfail(not HAS_LAPACK64, + reason="Numpy not compiled with 64-bit BLAS/LAPACK") +def test_blas64_geqrf_lwork_smoketest(): + # Smoke test LAPACK geqrf lwork call with 64-bit integers + dtype = np.float64 + lapack_routine = np.linalg.lapack_lite.dgeqrf + + m = 2**32 + 1 + n = 2**32 + 1 + lda = m + + # Dummy arrays, not referenced by the lapack routine, so don't + # need to be of the right size + a = np.zeros([1, 1], dtype=dtype) + work = np.zeros([1], dtype=dtype) + tau = np.zeros([1], dtype=dtype) + + # Size query + results = lapack_routine(m, n, a, lda, tau, work, -1, 0) + assert_equal(results['info'], 0) + assert_equal(results['m'], m) + assert_equal(results['n'], m) + + # Should result to an integer of a reasonable size + lwork = int(work.item()) + assert_(2**32 < lwork < 2**42) diff --git a/MLPY/Lib/site-packages/numpy/linalg/tests/test_regression.py b/MLPY/Lib/site-packages/numpy/linalg/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..0ffdb6d29ae5ff17102acae52287e310f9057ed7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/linalg/tests/test_regression.py @@ -0,0 +1,148 @@ +""" Test functions for linalg module +""" +import warnings + +import numpy as np +from numpy import linalg, arange, float64, array, dot, transpose +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_array_equal, + assert_array_almost_equal, assert_array_less +) + + +class TestRegression: + + def test_eig_build(self): + # Ticket #652 + rva = array([1.03221168e+02 + 0.j, + -1.91843603e+01 + 0.j, + -6.04004526e-01 + 15.84422474j, + -6.04004526e-01 - 15.84422474j, + -1.13692929e+01 + 0.j, + -6.57612485e-01 + 10.41755503j, + -6.57612485e-01 - 10.41755503j, + 1.82126812e+01 + 0.j, + 1.06011014e+01 + 0.j, + 7.80732773e+00 + 0.j, + -7.65390898e-01 + 0.j, + 1.51971555e-15 + 0.j, + -1.51308713e-15 + 0.j]) + a = arange(13 * 13, dtype=float64) + a.shape = (13, 13) + a = a % 17 + va, ve = linalg.eig(a) + va.sort() + rva.sort() + assert_array_almost_equal(va, rva) + + def test_eigh_build(self): + # Ticket 662. + rvals = [68.60568999, 89.57756725, 106.67185574] + + cov = array([[77.70273908, 3.51489954, 15.64602427], + [3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) + + vals, vecs = linalg.eigh(cov) + assert_array_almost_equal(vals, rvals) + + def test_svd_build(self): + # Ticket 627. + a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]]) + m, n = a.shape + u, s, vh = linalg.svd(a) + + b = dot(transpose(u[:, n:]), a) + + assert_array_almost_equal(b, np.zeros((2, 2))) + + def test_norm_vector_badarg(self): + # Regression for #786: Frobenius norm for vectors raises + # ValueError. + assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + + def test_lapack_endian(self): + # For bug #1482 + a = array([[5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') + b = array(a, dtype=' 0.5) + assert_equal(c, 1) + assert_equal(np.linalg.matrix_rank(a), 1) + assert_array_less(1, np.linalg.norm(a, ord=2)) + + def test_norm_object_array(self): + # gh-7575 + testvector = np.array([np.array([0, 1]), 0, 0], dtype=object) + + norm = linalg.norm(testvector) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testvector, ord=1) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype != np.dtype('float64')) + + norm = linalg.norm(testvector, ord=2) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(ValueError, linalg.norm, testvector, ord='fro') + assert_raises(ValueError, linalg.norm, testvector, ord='nuc') + assert_raises(ValueError, linalg.norm, testvector, ord=np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf) + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + assert_raises((AttributeError, DeprecationWarning), + linalg.norm, testvector, ord=0) + assert_raises(ValueError, linalg.norm, testvector, ord=-1) + assert_raises(ValueError, linalg.norm, testvector, ord=-2) + + testmatrix = np.array([[np.array([0, 1]), 0, 0], + [0, 0, 0]], dtype=object) + + norm = linalg.norm(testmatrix) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testmatrix, ord='fro') + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc') + assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=0) + assert_raises(ValueError, linalg.norm, testmatrix, ord=1) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-1) + assert_raises(TypeError, linalg.norm, testmatrix, ord=2) + assert_raises(TypeError, linalg.norm, testmatrix, ord=-2) + assert_raises(ValueError, linalg.norm, testmatrix, ord=3) + + def test_lstsq_complex_larger_rhs(self): + # gh-9891 + size = 20 + n_rhs = 70 + G = np.random.randn(size, size) + 1j * np.random.randn(size, size) + u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs) + b = G.dot(u) + # This should work without segmentation fault. + u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None) + # check results just in case + assert_array_almost_equal(u_lstsq, u) diff --git a/MLPY/Lib/site-packages/numpy/ma/__init__.py b/MLPY/Lib/site-packages/numpy/ma/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b666aaeb6889a369b1bba08fae97d489a5e4c3a4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/__init__.py @@ -0,0 +1,54 @@ +""" +============= +Masked Arrays +============= + +Arrays sometimes contain invalid or missing data. When doing operations +on such arrays, we wish to suppress invalid values, which is the purpose masked +arrays fulfill (an example of typical use is given below). + +For example, examine the following array: + +>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) + +When we try to calculate the mean of the data, the result is undetermined: + +>>> np.mean(x) +nan + +The mean is calculated using roughly ``np.sum(x)/len(x)``, but since +any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter +masked arrays: + +>>> m = np.ma.masked_array(x, np.isnan(x)) +>>> m +masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], + mask = [False False False True False False False True], + fill_value=1e+20) + +Here, we construct a masked array that suppress all ``NaN`` values. We +may now proceed to calculate the mean of the other values: + +>>> np.mean(m) +2.6666666666666665 + +.. [1] Not-a-Number, a floating point value that is the result of an + invalid operation. + +.. moduleauthor:: Pierre Gerard-Marchant +.. moduleauthor:: Jarrod Millman + +""" +from . import core +from .core import * + +from . import extras +from .extras import * + +__all__ = ['core', 'extras'] +__all__ += core.__all__ +__all__ += extras.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/ma/__init__.pyi b/MLPY/Lib/site-packages/numpy/ma/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b35d4a3d88464d6369081a835029f7390807f4b8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/__init__.pyi @@ -0,0 +1,232 @@ +from typing import Any, List + +from numpy.ma import extras as extras + +from numpy.ma.core import ( + MAError as MAError, + MaskError as MaskError, + MaskType as MaskType, + MaskedArray as MaskedArray, + abs as abs, + absolute as absolute, + add as add, + all as all, + allclose as allclose, + allequal as allequal, + alltrue as alltrue, + amax as amax, + amin as amin, + angle as angle, + anom as anom, + anomalies as anomalies, + any as any, + append as append, + arange as arange, + arccos as arccos, + arccosh as arccosh, + arcsin as arcsin, + arcsinh as arcsinh, + arctan as arctan, + arctan2 as arctan2, + arctanh as arctanh, + argmax as argmax, + argmin as argmin, + argsort as argsort, + around as around, + array as array, + asanyarray as asanyarray, + asarray as asarray, + bitwise_and as bitwise_and, + bitwise_or as bitwise_or, + bitwise_xor as bitwise_xor, + bool_ as bool_, + ceil as ceil, + choose as choose, + clip as clip, + common_fill_value as common_fill_value, + compress as compress, + compressed as compressed, + concatenate as concatenate, + conjugate as conjugate, + convolve as convolve, + copy as copy, + correlate as correlate, + cos as cos, + cosh as cosh, + count as count, + cumprod as cumprod, + cumsum as cumsum, + default_fill_value as default_fill_value, + diag as diag, + diagonal as diagonal, + diff as diff, + divide as divide, + empty as empty, + empty_like as empty_like, + equal as equal, + exp as exp, + expand_dims as expand_dims, + fabs as fabs, + filled as filled, + fix_invalid as fix_invalid, + flatten_mask as flatten_mask, + flatten_structured_array as flatten_structured_array, + floor as floor, + floor_divide as floor_divide, + fmod as fmod, + frombuffer as frombuffer, + fromflex as fromflex, + fromfunction as fromfunction, + getdata as getdata, + getmask as getmask, + getmaskarray as getmaskarray, + greater as greater, + greater_equal as greater_equal, + harden_mask as harden_mask, + hypot as hypot, + identity as identity, + ids as ids, + indices as indices, + inner as inner, + innerproduct as innerproduct, + isMA as isMA, + isMaskedArray as isMaskedArray, + is_mask as is_mask, + is_masked as is_masked, + isarray as isarray, + left_shift as left_shift, + less as less, + less_equal as less_equal, + log as log, + log10 as log10, + log2 as log2, + logical_and as logical_and, + logical_not as logical_not, + logical_or as logical_or, + logical_xor as logical_xor, + make_mask as make_mask, + make_mask_descr as make_mask_descr, + make_mask_none as make_mask_none, + mask_or as mask_or, + masked as masked, + masked_array as masked_array, + masked_equal as masked_equal, + masked_greater as masked_greater, + masked_greater_equal as masked_greater_equal, + masked_inside as masked_inside, + masked_invalid as masked_invalid, + masked_less as masked_less, + masked_less_equal as masked_less_equal, + masked_not_equal as masked_not_equal, + masked_object as masked_object, + masked_outside as masked_outside, + masked_print_option as masked_print_option, + masked_singleton as masked_singleton, + masked_values as masked_values, + masked_where as masked_where, + max as max, + maximum as maximum, + maximum_fill_value as maximum_fill_value, + mean as mean, + min as min, + minimum as minimum, + minimum_fill_value as minimum_fill_value, + mod as mod, + multiply as multiply, + mvoid as mvoid, + ndim as ndim, + negative as negative, + nomask as nomask, + nonzero as nonzero, + not_equal as not_equal, + ones as ones, + outer as outer, + outerproduct as outerproduct, + power as power, + prod as prod, + product as product, + ptp as ptp, + put as put, + putmask as putmask, + ravel as ravel, + remainder as remainder, + repeat as repeat, + reshape as reshape, + resize as resize, + right_shift as right_shift, + round as round, + round_ as round_, + set_fill_value as set_fill_value, + shape as shape, + sin as sin, + sinh as sinh, + size as size, + soften_mask as soften_mask, + sometrue as sometrue, + sort as sort, + sqrt as sqrt, + squeeze as squeeze, + std as std, + subtract as subtract, + sum as sum, + swapaxes as swapaxes, + take as take, + tan as tan, + tanh as tanh, + trace as trace, + transpose as transpose, + true_divide as true_divide, + var as var, + where as where, + zeros as zeros, +) + +from numpy.ma.extras import ( + apply_along_axis as apply_along_axis, + apply_over_axes as apply_over_axes, + atleast_1d as atleast_1d, + atleast_2d as atleast_2d, + atleast_3d as atleast_3d, + average as average, + clump_masked as clump_masked, + clump_unmasked as clump_unmasked, + column_stack as column_stack, + compress_cols as compress_cols, + compress_nd as compress_nd, + compress_rowcols as compress_rowcols, + compress_rows as compress_rows, + count_masked as count_masked, + corrcoef as corrcoef, + cov as cov, + diagflat as diagflat, + dot as dot, + dstack as dstack, + ediff1d as ediff1d, + flatnotmasked_contiguous as flatnotmasked_contiguous, + flatnotmasked_edges as flatnotmasked_edges, + hsplit as hsplit, + hstack as hstack, + isin as isin, + in1d as in1d, + intersect1d as intersect1d, + mask_cols as mask_cols, + mask_rowcols as mask_rowcols, + mask_rows as mask_rows, + masked_all as masked_all, + masked_all_like as masked_all_like, + median as median, + mr_ as mr_, + notmasked_contiguous as notmasked_contiguous, + notmasked_edges as notmasked_edges, + polyfit as polyfit, + row_stack as row_stack, + setdiff1d as setdiff1d, + setxor1d as setxor1d, + stack as stack, + unique as unique, + union1d as union1d, + vander as vander, + vstack as vstack, +) + +__all__: List[str] diff --git a/MLPY/Lib/site-packages/numpy/ma/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25f4bb63073d0f9c21238945484948eafd15d3a1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/__pycache__/bench.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/__pycache__/bench.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..695ef7f34a568bf2600e2672f6168c97aa1e689b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/__pycache__/bench.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/__pycache__/core.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/__pycache__/core.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54f330d4c9caa4b6001a6dfbfaffc82583561b84 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/__pycache__/core.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/__pycache__/extras.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/__pycache__/extras.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6e854ecac619ccbf366cf8305d2cfd2e6a4eeb2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/__pycache__/extras.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/__pycache__/mrecords.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/__pycache__/mrecords.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09f864dd237b0cc5ce3aafc62d78cce82eeea7b5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/__pycache__/mrecords.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7a4450689d12535d1cf7ee651c9cb1b9a360b77 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/__pycache__/testutils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/__pycache__/testutils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30ec6f851f35062d90230334b9bafe66f4c7cda4 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/__pycache__/testutils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/__pycache__/timer_comparison.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/__pycache__/timer_comparison.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0700f2aa3f4006d2a55a3cc0422a7c01b4639150 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/__pycache__/timer_comparison.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/bench.py b/MLPY/Lib/site-packages/numpy/ma/bench.py new file mode 100644 index 0000000000000000000000000000000000000000..2e0e14fcfa4515c9fda1e8b8c17b6a4b3ee1aa9a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/bench.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import timeit +import numpy + + +############################################################################### +# Global variables # +############################################################################### + + +# Small arrays +xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3) +ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3) +zs = xs + 1j * ys +m1 = [[True, False, False], [False, False, True]] +m2 = [[True, False, True], [False, False, True]] +nmxs = numpy.ma.array(xs, mask=m1) +nmys = numpy.ma.array(ys, mask=m2) +nmzs = numpy.ma.array(zs, mask=m1) + +# Big arrays +xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) +yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) +zl = xl + 1j * yl +maskx = xl > 0.8 +masky = yl < -0.8 +nmxl = numpy.ma.array(xl, mask=maskx) +nmyl = numpy.ma.array(yl, mask=masky) +nmzl = numpy.ma.array(zl, mask=maskx) + + +############################################################################### +# Functions # +############################################################################### + + +def timer(s, v='', nloop=500, nrep=3): + units = ["s", "ms", "µs", "ns"] + scaling = [1, 1e3, 1e6, 1e9] + print("%s : %-50s : " % (v, s), end=' ') + varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz'] + setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames) + Timer = timeit.Timer(stmt=s, setup=setup) + best = min(Timer.repeat(nrep, nloop)) / nloop + if best > 0.0: + order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3) + else: + order = 3 + print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep, + 3, + best * scaling[order], + units[order])) + + +def compare_functions_1v(func, nloop=500, + xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): + funcname = func.__name__ + print("-"*50) + print(f'{funcname} on small arrays') + module, data = "numpy.ma", "nmxs" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + + print("%s on large arrays" % funcname) + module, data = "numpy.ma", "nmxl" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + return + +def compare_methods(methodname, args, vars='x', nloop=500, test=True, + xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl): + print("-"*50) + print(f'{methodname} on small arrays') + data, ver = f'nm{vars}l', 'numpy.ma' + timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) + + print("%s on large arrays" % methodname) + data, ver = "nm%sl" % vars, 'numpy.ma' + timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) + return + +def compare_functions_2v(func, nloop=500, test=True, + xs=xs, nmxs=nmxs, + ys=ys, nmys=nmys, + xl=xl, nmxl=nmxl, + yl=yl, nmyl=nmyl): + funcname = func.__name__ + print("-"*50) + print(f'{funcname} on small arrays') + module, data = "numpy.ma", "nmxs,nmys" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + + print(f'{funcname} on large arrays') + module, data = "numpy.ma", "nmxl,nmyl" + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) + return + + +if __name__ == '__main__': + compare_functions_1v(numpy.sin) + compare_functions_1v(numpy.log) + compare_functions_1v(numpy.sqrt) + + compare_functions_2v(numpy.multiply) + compare_functions_2v(numpy.divide) + compare_functions_2v(numpy.power) + + compare_methods('ravel', '', nloop=1000) + compare_methods('conjugate', '', 'z', nloop=1000) + compare_methods('transpose', '', nloop=1000) + compare_methods('compressed', '', nloop=1000) + compare_methods('__getitem__', '0', nloop=1000) + compare_methods('__getitem__', '(0,0)', nloop=1000) + compare_methods('__getitem__', '[0,-1]', nloop=1000) + compare_methods('__setitem__', '0, 17', nloop=1000, test=False) + compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False) + + print("-"*50) + print("__setitem__ on small arrays") + timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) + + print("-"*50) + print("__setitem__ on large arrays") + timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) + + print("-"*50) + print("where on small arrays") + timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000) + print("-"*50) + print("where on large arrays") + timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100) diff --git a/MLPY/Lib/site-packages/numpy/ma/core.py b/MLPY/Lib/site-packages/numpy/ma/core.py new file mode 100644 index 0000000000000000000000000000000000000000..e6c2dbd058283e87685eb5d854c5c9781bac5961 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/core.py @@ -0,0 +1,8186 @@ +""" +numpy.ma : a package to handle missing or invalid values. + +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. + +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# pylint: disable-msg=E1002 +import builtins +import inspect +import operator +import warnings +import textwrap +import re +from functools import reduce + +import numpy as np +import numpy.core.umath as umath +import numpy.core.numerictypes as ntypes +from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue +from numpy import array as narray +from numpy.lib.function_base import angle +from numpy.compat import ( + getargspec, formatargspec, long, unicode, bytes + ) +from numpy import expand_dims +from numpy.core.numeric import normalize_axis_tuple +from numpy.core._internal import recursive +from numpy.compat import pickle + + +__all__ = [ + 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', + 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', + 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', + 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', + 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', + 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', + 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', + 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', + 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp', + 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', + 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', + 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', + 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', + 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', + 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', + 'less', 'less_equal', 'log', 'log10', 'log2', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', + 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', + 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_invalid', + 'masked_less', 'masked_less_equal', 'masked_not_equal', + 'masked_object', 'masked_outside', 'masked_print_option', + 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', + 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', + 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', + 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod', + 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder', + 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', + 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', + 'var', 'where', 'zeros', + ] + +MaskType = np.bool_ +nomask = MaskType(0) + +class MaskedArrayFutureWarning(FutureWarning): + pass + +def _deprecate_argsort_axis(arr): + """ + Adjust the axis passed to argsort, warning if necessary + + Parameters + ---------- + arr + The array which argsort was called on + + np.ma.argsort has a long-term bug where the default of the axis argument + is wrong (gh-8701), which now must be kept for backwards compatibility. + Thankfully, this only makes a difference when arrays are 2- or more- + dimensional, so we only need a warning then. + """ + if arr.ndim <= 1: + # no warning needed - but switch to -1 anyway, to avoid surprising + # subclasses, which are more likely to implement scalar axes. + return -1 + else: + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + warnings.warn( + "In the future the default for argsort will be axis=-1, not the " + "current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=3) + return None + + +def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + + """ + if initialdoc is None: + return + if note is None: + return initialdoc + + notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc)) + notedoc = "\n\nNotes\n-----\n%s\n" % inspect.cleandoc(note) + + return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) + + +def get_object_signature(obj): + """ + Get the signature from obj + + """ + try: + sig = formatargspec(*getargspec(obj)) + except TypeError: + sig = '' + return sig + + +############################################################################### +# Exceptions # +############################################################################### + + +class MAError(Exception): + """ + Class for masked array related errors. + + """ + pass + + +class MaskError(MAError): + """ + Class for mask related errors. + + """ + pass + + +############################################################################### +# Filling options # +############################################################################### + + +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c': 1.e20 + 0.0j, + 'f': 1.e20, + 'i': 999999, + 'O': '?', + 'S': b'N/A', + 'u': 999999, + 'V': b'???', + 'U': u'N/A' + } + +# Add datetime64 and timedelta64 types +for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", + "fs", "as"]: + default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) + default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) + +float_types_list = [np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble] +max_filler = ntypes._minvals +max_filler.update([(k, -np.inf) for k in float_types_list[:4]]) +max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]]) + +min_filler = ntypes._maxvals +min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) +min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]]) + +del float_types_list + +def _recursive_fill_value(dtype, f): + """ + Recursively produce a fill value for `dtype`, calling f on scalar dtypes + """ + if dtype.names is not None: + vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names) + return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + elif dtype.subdtype: + subtype, shape = dtype.subdtype + subval = _recursive_fill_value(subtype, f) + return np.full(shape, subval) + else: + return f(dtype) + + +def _get_dtype_of(obj): + """ Convert the argument for *_fill_value into a dtype """ + if isinstance(obj, np.dtype): + return obj + elif hasattr(obj, 'dtype'): + return obj.dtype + else: + return np.asanyarray(obj).dtype + + +def default_fill_value(obj): + """ + Return the default fill value for the argument object. + + The default filling value depends on the datatype of the input + array or the type of the input scalar: + + ======== ======== + datatype default + ======== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + ======== ======== + + For structured types, a structured scalar is returned, with each field the + default fill value for its type. + + For subarray types, the fill value is an array of the same size containing + the default scalar fill value. + + Parameters + ---------- + obj : ndarray, dtype or scalar + The array data-type or scalar for which the default fill value + is returned. + + Returns + ------- + fill_value : scalar + The default fill value. + + Examples + -------- + >>> np.ma.default_fill_value(1) + 999999 + >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) + 1e+20 + >>> np.ma.default_fill_value(np.dtype(complex)) + (1e+20+0j) + + """ + def _scalar_fill_value(dtype): + if dtype.kind in 'Mm': + return default_filler.get(dtype.str[1:], '?') + else: + return default_filler.get(dtype.kind, '?') + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def _extremum_fill_value(obj, extremum, extremum_name): + + def _scalar_fill_value(dtype): + try: + return extremum[dtype] + except KeyError as e: + raise TypeError( + f"Unsuitable type {dtype} for calculating {extremum_name}." + ) from None + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def minimum_fill_value(obj): + """ + Return the maximum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the minimum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The maximum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + maximum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.int32() + >>> ma.minimum_fill_value(a) + 2147483647 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.minimum_fill_value(a) + inf + + """ + return _extremum_fill_value(obj, min_filler, "minimum") + + +def maximum_fill_value(obj): + """ + Return the minimum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the maximum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The minimum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + minimum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.int32() + >>> ma.maximum_fill_value(a) + -2147483648 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.maximum_fill_value(a) + -inf + + """ + return _extremum_fill_value(obj, max_filler, "maximum") + + +def _recursive_set_fill_value(fillvalue, dt): + """ + Create a fill value for a structured dtype. + + Parameters + ---------- + fillvalue : scalar or array_like + Scalar or array representing the fill value. If it is of shorter + length than the number of fields in dt, it will be resized. + dt : dtype + The structured dtype for which to create the fill value. + + Returns + ------- + val: tuple + A tuple of values corresponding to the structured fill value. + + """ + fillvalue = np.resize(fillvalue, len(dt.names)) + output_value = [] + for (fval, name) in zip(fillvalue, dt.names): + cdtype = dt[name] + if cdtype.subdtype: + cdtype = cdtype.subdtype[0] + + if cdtype.names is not None: + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + +def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype. + + If fill_value is not None, its value is forced to the given dtype. + + The result is always a 0d array. + + """ + ndtype = np.dtype(ndtype) + if fill_value is None: + fill_value = default_fill_value(ndtype) + elif ndtype.names is not None: + if isinstance(fill_value, (ndarray, np.void)): + try: + fill_value = np.array(fill_value, copy=False, dtype=ndtype) + except ValueError as e: + err_msg = "Unable to transform %s to dtype %s" + raise ValueError(err_msg % (fill_value, ndtype)) from e + else: + fill_value = np.asarray(fill_value, dtype=object) + fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), + dtype=ndtype) + else: + if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + # Note this check doesn't work if fill_value is not a scalar + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) + else: + # In case we want to convert 1e20 to int. + # Also in case of converting string arrays. + try: + fill_value = np.array(fill_value, copy=False, dtype=ndtype) + except (OverflowError, ValueError) as e: + # Raise TypeError instead of OverflowError or ValueError. + # OverflowError is seldom used, and the real problem here is + # that the passed fill_value is not compatible with the ndtype. + err_msg = "Cannot convert fill_value %s to dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) from e + return np.array(fill_value) + + +def set_fill_value(a, fill_value): + """ + Set the filling value of a, if a is a masked array. + + This function changes the fill value of the masked array `a` in place. + If `a` is not a masked array, the function returns silently, without + doing anything. + + Parameters + ---------- + a : array_like + Input array. + fill_value : dtype + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of `a`. + + Returns + ------- + None + Nothing returned by this function. + + See Also + -------- + maximum_fill_value : Return the default fill value for a dtype. + MaskedArray.fill_value : Return current fill value. + MaskedArray.set_fill_value : Equivalent method. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> a = ma.masked_where(a < 3, a) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=999999) + >>> ma.set_fill_value(a, -999) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=-999) + + Nothing happens if `a` is not a masked array. + + >>> a = list(range(5)) + >>> a + [0, 1, 2, 3, 4] + >>> ma.set_fill_value(a, 100) + >>> a + [0, 1, 2, 3, 4] + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> ma.set_fill_value(a, 100) + >>> a + array([0, 1, 2, 3, 4]) + + """ + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + return + + +def get_fill_value(a): + """ + Return the filling value of a, if any. Otherwise, returns the + default filling value for that type. + + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + + +def common_fill_value(a, b): + """ + Return the common filling value of two masked arrays, if any. + + If ``a.fill_value == b.fill_value``, return the fill value, + otherwise return None. + + Parameters + ---------- + a, b : MaskedArray + The masked arrays for which to compare fill values. + + Returns + ------- + fill_value : scalar or None + The common fill value, or None. + + Examples + -------- + >>> x = np.ma.array([0, 1.], fill_value=3) + >>> y = np.ma.array([0, 1.], fill_value=3) + >>> np.ma.common_fill_value(x, y) + 3.0 + + """ + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + + +def filled(a, fill_value=None): + """ + Return input as an array with masked data replaced by a fill value. + + If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to + ``a.fill_value``. + + Parameters + ---------- + a : MaskedArray or array_like + An input object. + fill_value : array_like, optional. + Can be scalar or non-scalar. If non-scalar, the + resulting filled array should be broadcastable + over input array. Default is None. + + Returns + ------- + a : ndarray + The filled array. + + See Also + -------- + compressed + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x.filled() + array([[999999, 1, 2], + [999999, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=333) + array([[333, 1, 2], + [333, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=np.arange(3)) + array([[0, 1, 2], + [0, 4, 5], + [6, 7, 8]]) + + """ + if hasattr(a, 'filled'): + return a.filled(fill_value) + + elif isinstance(a, ndarray): + # Should we check for contiguity ? and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return np.array(a, 'O') + else: + return np.array(a) + + +def get_masked_subclass(*arrays): + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + + In case of siblings, the first listed takes over. + + """ + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + # Don't return MaskedConstant as result: revert to MaskedArray + if rcls.__name__ == 'MaskedConstant': + return MaskedArray + return rcls + + +def getdata(a, subok=True): + """ + Return the data of a masked array as an ndarray. + + Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, + else return `a` as a ndarray or subclass (depending on `subok`) if not. + + Parameters + ---------- + a : array_like + Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + subok : bool + Whether to force the output to be a `pure` ndarray (False) or to + return a subclass of ndarray if appropriate (True, default). + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getdata(a) + array([[1, 2], + [3, 4]]) + + Equivalently use the ``MaskedArray`` `data` attribute. + + >>> a.data + array([[1, 2], + [3, 4]]) + + """ + try: + data = a._data + except AttributeError: + data = np.array(a, copy=False, subok=subok) + if not subok: + return data.view(ndarray) + return data + + +get_data = getdata + + +def fix_invalid(a, mask=nomask, copy=True, fill_value=None): + """ + Return input with invalid data masked and replaced by a fill value. + + Invalid data means values of `nan`, `inf`, etc. + + Parameters + ---------- + a : array_like + Input array, a (subclass of) ndarray. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + copy : bool, optional + Whether to use a copy of `a` (True) or to fix `a` in place (False). + Default is True. + fill_value : scalar, optional + Value used for fixing invalid data. Default is None, in which case + the ``a.fill_value`` is used. + + Returns + ------- + b : MaskedArray + The input array with invalid entries fixed. + + Notes + ----- + A copy is performed by default. + + Examples + -------- + >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) + >>> x + masked_array(data=[--, -1.0, nan, inf], + mask=[ True, False, False, False], + fill_value=1e+20) + >>> np.ma.fix_invalid(x) + masked_array(data=[--, -1.0, --, --], + mask=[ True, False, True, True], + fill_value=1e+20) + + >>> fixed = np.ma.fix_invalid(x) + >>> fixed.data + array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) + >>> x.data + array([ 1., -1., nan, inf]) + + """ + a = masked_array(a, copy=copy, mask=mask, subok=True) + invalid = np.logical_not(np.isfinite(a._data)) + if not invalid.any(): + return a + a._mask |= invalid + if fill_value is None: + fill_value = a.fill_value + a._data[invalid] = fill_value + return a + +def is_string_or_list_of_strings(val): + return (isinstance(val, str) or + (isinstance(val, list) and val and + builtins.all(isinstance(s, str) for s in val))) + +############################################################################### +# Ufuncs # +############################################################################### + + +ufunc_domain = {} +ufunc_fills = {} + + +class _DomainCheckInterval: + """ + Define a valid interval, so that : + + ``domain_check_interval(a,b)(x) == True`` where + ``x < a`` or ``x > b``. + + """ + + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if a > b: + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__(self, x): + "Execute the call behavior." + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(invalid='ignore'): + return umath.logical_or(umath.greater(x, self.b), + umath.less(x, self.a)) + + +class _DomainTan: + """ + Define a valid interval for the `tan` function, so that: + + ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` + + """ + + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(umath.absolute(umath.cos(x)), self.eps) + + +class _DomainSafeDivide: + """ + Define a domain for safe division. + + """ + + def __init__(self, tolerance=None): + self.tolerance = tolerance + + def __call__(self, a, b): + # Delay the selection of the tolerance to here in order to reduce numpy + # import times. The calculation of these parameters is a substantial + # component of numpy's import time. + if self.tolerance is None: + self.tolerance = np.finfo(float).tiny + # don't call ma ufuncs from __array_wrap__ which would fail for scalars + a, b = np.asarray(a), np.asarray(b) + with np.errstate(invalid='ignore'): + return umath.absolute(a) * self.tolerance >= umath.absolute(b) + + +class _DomainGreater: + """ + DomainGreater(v)(x) is True where x <= v. + + """ + + def __init__(self, critical_value): + "DomainGreater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less_equal(x, self.critical_value) + + +class _DomainGreaterEqual: + """ + DomainGreaterEqual(v)(x) is True where x < v. + + """ + + def __init__(self, critical_value): + "DomainGreaterEqual(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(x, self.critical_value) + + +class _MaskedUFunc: + def __init__(self, ufunc): + self.f = ufunc + self.__doc__ = ufunc.__doc__ + self.__name__ = ufunc.__name__ + + def __str__(self): + return f"Masked version of {self.f}" + + +class _MaskedUnaryOperation(_MaskedUFunc): + """ + Defines masked version of unary operations, where invalid values are + pre-masked. + + Parameters + ---------- + mufunc : callable + The function for which to define a masked version. Made available + as ``_MaskedUnaryOperation.f``. + fill : scalar, optional + Filling value, default is 0. + domain : class instance + Domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + + """ + + def __init__(self, mufunc, fill=0, domain=None): + super().__init__(mufunc) + self.fill = fill + self.domain = domain + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + + def __call__(self, a, *args, **kwargs): + """ + Execute the call behavior. + + """ + d = getdata(a) + # Deal with domain + if self.domain is not None: + # Case 1.1. : Domained function + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + # Make a mask + m = ~umath.isfinite(result) + m |= self.domain(d) + m |= getmask(a) + else: + # Case 1.2. : Function without a domain + # Get the result and the mask + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + m = getmask(a) + + if not result.ndim: + # Case 2.1. : The result is scalarscalar + if m: + return masked + return result + + if m is not nomask: + # Case 2.2. The result is an array + # We need to fill the invalid data back w/ the input Now, + # that's plain silly: in C, we would just skip the element and + # keep the original, but we do have to do it that way in Python + + # In case result has a lower dtype than the inputs (as in + # equal) + try: + np.copyto(result, d, where=m) + except TypeError: + pass + # Transform to + masked_result = result.view(get_masked_subclass(a)) + masked_result._mask = m + masked_result._update_from(a) + return masked_result + + +class _MaskedBinaryOperation(_MaskedUFunc): + """ + Define masked version of binary operations, where invalid + values are pre-masked. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_MaskedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, mbfunc, fillx=0, filly=0): + """ + abfunc(fillx, filly) must be defined. + + abfunc(x, filly) = x for all x to enable reduce. + + """ + super().__init__(mbfunc) + self.fillx = fillx + self.filly = filly + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + """ + Execute the call behavior. + + """ + # Get the data, as ndarray + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(): + np.seterr(divide='ignore', invalid='ignore') + result = self.f(da, db, *args, **kwargs) + # Get the mask for the result + (ma, mb) = (getmask(a), getmask(b)) + if ma is nomask: + if mb is nomask: + m = nomask + else: + m = umath.logical_or(getmaskarray(a), mb) + elif mb is nomask: + m = umath.logical_or(ma, getmaskarray(b)) + else: + m = umath.logical_or(ma, mb) + + # Case 1. : scalar + if not result.ndim: + if m: + return masked + return result + + # Case 2. : array + # Revert result to da where masked + if m is not nomask and m.any(): + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, da, casting='unsafe', where=m) + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + def reduce(self, target, axis=0, dtype=None): + """ + Reduce `target` along the given `axis`. + + """ + tclass = get_masked_subclass(target) + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=True) + m.shape = (1,) + + if m is nomask: + tr = self.f.reduce(t, axis) + mr = nomask + else: + tr = self.f.reduce(t, axis, dtype=dtype or t.dtype) + mr = umath.logical_and.reduce(m, axis) + + if not tr.shape: + if mr: + return masked + else: + return tr + masked_tr = tr.view(tclass) + masked_tr._mask = mr + return masked_tr + + def outer(self, a, b): + """ + Return the function applied to the outer product of a and b. + + """ + (da, db) = (getdata(a), getdata(b)) + d = self.f.outer(da, db) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + if m is not nomask: + np.copyto(d, da, where=m) + if not d.shape: + return d + masked_d = d.view(get_masked_subclass(a, b)) + masked_d._mask = m + return masked_d + + def accumulate(self, target, axis=0): + """Accumulate `target` along `axis` after filling with y fill + value. + + """ + tclass = get_masked_subclass(target) + t = filled(target, self.filly) + result = self.f.accumulate(t, axis) + masked_result = result.view(tclass) + return masked_result + + + +class _DomainedBinaryOperation(_MaskedUFunc): + """ + Define binary operations that have a domain, like divide. + + They have no reduce, outer or accumulate. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_DomainedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + super().__init__(dbfunc) + self.domain = domain + self.fillx = fillx + self.filly = filly + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + "Execute the call behavior." + # Get the data + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(da, db, *args, **kwargs) + # Get the mask as a combination of the source masks and invalid + m = ~umath.isfinite(result) + m |= getmask(a) + m |= getmask(b) + # Apply the domain + domain = ufunc_domain.get(self.f, None) + if domain is not None: + m |= domain(da, db) + # Take care of the scalar case first + if not m.ndim: + if m: + return masked + else: + return result + # When the mask is True, put back da if possible + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, 0, casting='unsafe', where=m) + # avoid using "*" since this may be overlaid + masked_da = umath.multiply(m, da) + # only add back if it can be cast safely + if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): + result += masked_da + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + +# Unary ufuncs +exp = _MaskedUnaryOperation(umath.exp) +conjugate = _MaskedUnaryOperation(umath.conjugate) +sin = _MaskedUnaryOperation(umath.sin) +cos = _MaskedUnaryOperation(umath.cos) +arctan = _MaskedUnaryOperation(umath.arctan) +arcsinh = _MaskedUnaryOperation(umath.arcsinh) +sinh = _MaskedUnaryOperation(umath.sinh) +cosh = _MaskedUnaryOperation(umath.cosh) +tanh = _MaskedUnaryOperation(umath.tanh) +abs = absolute = _MaskedUnaryOperation(umath.absolute) +angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base +fabs = _MaskedUnaryOperation(umath.fabs) +negative = _MaskedUnaryOperation(umath.negative) +floor = _MaskedUnaryOperation(umath.floor) +ceil = _MaskedUnaryOperation(umath.ceil) +around = _MaskedUnaryOperation(np.round_) +logical_not = _MaskedUnaryOperation(umath.logical_not) + +# Domained unary ufuncs +sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, + _DomainGreaterEqual(0.0)) +log = _MaskedUnaryOperation(umath.log, 1.0, + _DomainGreater(0.0)) +log2 = _MaskedUnaryOperation(umath.log2, 1.0, + _DomainGreater(0.0)) +log10 = _MaskedUnaryOperation(umath.log10, 1.0, + _DomainGreater(0.0)) +tan = _MaskedUnaryOperation(umath.tan, 0.0, + _DomainTan(1e-35)) +arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccos = _MaskedUnaryOperation(umath.arccos, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, + _DomainGreaterEqual(1.0)) +arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, + _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) + +# Binary ufuncs +add = _MaskedBinaryOperation(umath.add) +subtract = _MaskedBinaryOperation(umath.subtract) +multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) +arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) +equal = _MaskedBinaryOperation(umath.equal) +equal.reduce = None +not_equal = _MaskedBinaryOperation(umath.not_equal) +not_equal.reduce = None +less_equal = _MaskedBinaryOperation(umath.less_equal) +less_equal.reduce = None +greater_equal = _MaskedBinaryOperation(umath.greater_equal) +greater_equal.reduce = None +less = _MaskedBinaryOperation(umath.less) +less.reduce = None +greater = _MaskedBinaryOperation(umath.greater) +greater.reduce = None +logical_and = _MaskedBinaryOperation(umath.logical_and) +alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce +logical_or = _MaskedBinaryOperation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = _MaskedBinaryOperation(umath.logical_xor) +bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) +bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) +bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) +hypot = _MaskedBinaryOperation(umath.hypot) + +# Domained binary ufuncs +divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) +true_divide = _DomainedBinaryOperation(umath.true_divide, + _DomainSafeDivide(), 0, 1) +floor_divide = _DomainedBinaryOperation(umath.floor_divide, + _DomainSafeDivide(), 0, 1) +remainder = _DomainedBinaryOperation(umath.remainder, + _DomainSafeDivide(), 0, 1) +fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) + + +############################################################################### +# Mask creation functions # +############################################################################### + + +def _replace_dtype_fields_recursive(dtype, primitive_dtype): + "Private function allowing recursion in _replace_dtype_fields." + _recurse = _replace_dtype_fields_recursive + + # Do we have some name fields ? + if dtype.names is not None: + descr = [] + for name in dtype.names: + field = dtype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recurse(field[0], primitive_dtype))) + new_dtype = np.dtype(descr) + + # Is this some kind of composite a la (float,2) + elif dtype.subdtype: + descr = list(dtype.subdtype) + descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) + new_dtype = np.dtype(tuple(descr)) + + # this is a primitive type, so do a direct replacement + else: + new_dtype = primitive_dtype + + # preserve identity of dtypes + if new_dtype == dtype: + new_dtype = dtype + + return new_dtype + + +def _replace_dtype_fields(dtype, primitive_dtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with all fields and subtypes in the given type + recursively replaced with `primitive_dtype`. + + Arguments are coerced to dtypes first. + """ + dtype = np.dtype(dtype) + primitive_dtype = np.dtype(primitive_dtype) + return _replace_dtype_fields_recursive(dtype, primitive_dtype) + + +def make_mask_descr(ndtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with the type of all fields in `ndtype` to a + boolean type. Field names are not altered. + + Parameters + ---------- + ndtype : dtype + The dtype to convert. + + Returns + ------- + result : dtype + A dtype that looks like `ndtype`, the type of all fields is boolean. + + Examples + -------- + >>> import numpy.ma as ma + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_descr(dtype) + dtype([('foo', '|b1'), ('bar', '|b1')]) + >>> ma.make_mask_descr(np.float32) + dtype('bool') + + """ + return _replace_dtype_fields(ndtype, MaskType) + + +def getmask(a): + """ + Return the mask of a masked array, or nomask. + + Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the + mask is not `nomask`, else return `nomask`. To guarantee a full array + of booleans of the same shape as a, use `getmaskarray`. + + Parameters + ---------- + a : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getdata : Return the data of a masked array as an ndarray. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmask(a) + array([[False, True], + [False, False]]) + + Equivalently use the `MaskedArray` `mask` attribute. + + >>> a.mask + array([[False, True], + [False, False]]) + + Result when mask == `nomask` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.nomask + False + >>> ma.getmask(b) == ma.nomask + True + >>> b.mask == ma.nomask + True + + """ + return getattr(a, '_mask', nomask) + + +get_mask = getmask + + +def getmaskarray(arr): + """ + Return the mask of a masked array, or full boolean array of False. + + Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and + the mask is not `nomask`, else return a full boolean array of False of + the same shape as `arr`. + + Parameters + ---------- + arr : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getdata : Return the data of a masked array as an ndarray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmaskarray(a) + array([[False, True], + [False, False]]) + + Result when mask == ``nomask`` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.getmaskarray(b) + array([[False, False], + [False, False]]) + + """ + mask = getmask(arr) + if mask is nomask: + mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) + return mask + + +def is_mask(m): + """ + Return True if m is a valid, standard mask. + + This function does not check the contents of the input, only that the + type is MaskType. In particular, this function returns False if the + mask has a flexible dtype. + + Parameters + ---------- + m : array_like + Array to test. + + Returns + ------- + result : bool + True if `m.dtype.type` is MaskType, False otherwise. + + See Also + -------- + ma.isMaskedArray : Test whether input is an instance of MaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> m + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_mask(m) + False + >>> ma.is_mask(m.mask) + True + + Input must be an ndarray (or have similar attributes) + for it to be considered a valid mask. + + >>> m = [False, True, False] + >>> ma.is_mask(m) + False + >>> m = np.array([False, True, False]) + >>> m + array([False, True, False]) + >>> ma.is_mask(m) + True + + Arrays with complex dtypes don't return True. + + >>> dtype = np.dtype({'names':['monty', 'pithon'], + ... 'formats':[bool, bool]}) + >>> dtype + dtype([('monty', '|b1'), ('pithon', '|b1')]) + >>> m = np.array([(True, False), (False, True), (True, False)], + ... dtype=dtype) + >>> m + array([( True, False), (False, True), ( True, False)], + dtype=[('monty', '?'), ('pithon', '?')]) + >>> ma.is_mask(m) + False + + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False + + +def _shrink_mask(m): + """ + Shrink a mask to nomask if possible + """ + if m.dtype.names is None and not m.any(): + return nomask + else: + return m + + +def make_mask(m, copy=False, shrink=True, dtype=MaskType): + """ + Create a boolean mask from an array. + + Return `m` as a boolean mask, creating a copy if necessary or requested. + The function can accept any sequence that is convertible to integers, + or ``nomask``. Does not require that contents must be 0s and 1s, values + of 0 are interpreted as False, everything else as True. + + Parameters + ---------- + m : array_like + Potential mask. + copy : bool, optional + Whether to return a copy of `m` (True) or `m` itself (False). + shrink : bool, optional + Whether to shrink `m` to ``nomask`` if all its values are False. + dtype : dtype, optional + Data-type of the output mask. By default, the output mask has a + dtype of MaskType (bool). If the dtype is flexible, each field has + a boolean dtype. This is ignored when `m` is ``nomask``, in which + case ``nomask`` is always returned. + + Returns + ------- + result : ndarray + A boolean mask derived from `m`. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = [True, False, True, True] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 1, 1] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 2, -3] + >>> ma.make_mask(m) + array([ True, False, True, True]) + + Effect of the `shrink` parameter. + + >>> m = np.zeros(4) + >>> m + array([0., 0., 0., 0.]) + >>> ma.make_mask(m) + False + >>> ma.make_mask(m, shrink=False) + array([False, False, False, False]) + + Using a flexible `dtype`. + + >>> m = [1, 0, 1, 1] + >>> n = [0, 1, 0, 0] + >>> arr = [] + >>> for man, mouse in zip(m, n): + ... arr.append((man, mouse)) + >>> arr + [(1, 0), (0, 1), (1, 0), (1, 0)] + >>> dtype = np.dtype({'names':['man', 'mouse'], + ... 'formats':[np.int64, np.int64]}) + >>> arr = np.array(arr, dtype=dtype) + >>> arr + array([(1, 0), (0, 1), (1, 0), (1, 0)], + dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) + array([(True, False), (False, True), (True, False), (True, False)], + dtype=[('man', '|b1'), ('mouse', '|b1')]) + + """ + if m is nomask: + return nomask + + # Make sure the input dtype is valid. + dtype = make_mask_descr(dtype) + + # legacy boolean special case: "existence of fields implies true" + if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_: + return np.ones(m.shape, dtype=dtype) + + # Fill the mask in case there are missing data; turn it into an ndarray. + result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) + # Bas les masques ! + if shrink: + result = _shrink_mask(result) + return result + + +def make_mask_none(newshape, dtype=None): + """ + Return a boolean mask of the given shape, filled with False. + + This function returns a boolean ndarray with all entries False, that can + be used in common mask manipulations. If a complex dtype is specified, the + type of each field is converted to a boolean type. + + Parameters + ---------- + newshape : tuple + A tuple indicating the shape of the mask. + dtype : {None, dtype}, optional + If None, use a MaskType instance. Otherwise, use a new datatype with + the same fields as `dtype`, converted to boolean types. + + Returns + ------- + result : ndarray + An ndarray of appropriate shape and dtype, filled with False. + + See Also + -------- + make_mask : Create a boolean mask from an array. + make_mask_descr : Construct a dtype description list from a given dtype. + + Examples + -------- + >>> import numpy.ma as ma + >>> ma.make_mask_none((3,)) + array([False, False, False]) + + Defining a more complex dtype. + + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) + array([(False, False), (False, False), (False, False)], + dtype=[('foo', '|b1'), ('bar', '|b1')]) + + """ + if dtype is None: + result = np.zeros(newshape, dtype=MaskType) + else: + result = np.zeros(newshape, dtype=make_mask_descr(dtype)) + return result + + +def mask_or(m1, m2, copy=False, shrink=True): + """ + Combine two masks with the ``logical_or`` operator. + + The result may be a view on `m1` or `m2` if the other is `nomask` + (i.e. False). + + Parameters + ---------- + m1, m2 : array_like + Input masks. + copy : bool, optional + If copy is False and one of the inputs is `nomask`, return a view + of the other input mask. Defaults to False. + shrink : bool, optional + Whether to shrink the output to `nomask` if all its values are + False. Defaults to True. + + Returns + ------- + mask : output mask + The result masks values that are masked in either `m1` or `m2`. + + Raises + ------ + ValueError + If `m1` and `m2` have different flexible dtypes. + + Examples + -------- + >>> m1 = np.ma.make_mask([0, 1, 1, 0]) + >>> m2 = np.ma.make_mask([1, 0, 0, 0]) + >>> np.ma.mask_or(m1, m2) + array([ True, True, True, False]) + + """ + + @recursive + def _recursive_mask_or(self, m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names is not None: + self(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + return + + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) + if m1 is m2 and is_mask(m1): + return m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if dtype1 != dtype2: + raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) + if dtype1.names is not None: + # Allocate an output mask array with the properly broadcast shape. + newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) + _recursive_mask_or(m1, m2, newmask) + return newmask + return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) + + +def flatten_mask(mask): + """ + Returns a completely flattened version of the mask, where nested fields + are collapsed. + + Parameters + ---------- + mask : array_like + Input array, which will be interpreted as booleans. + + Returns + ------- + flattened_mask : ndarray of bools + The flattened input. + + Examples + -------- + >>> mask = np.array([0, 0, 1]) + >>> np.ma.flatten_mask(mask) + array([False, False, True]) + + >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + >>> np.ma.flatten_mask(mask) + array([False, False, False, True]) + + >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) + >>> np.ma.flatten_mask(mask) + array([False, False, False, False, False, True]) + + """ + + def _flatmask(mask): + "Flatten the mask and returns a (maybe nested) sequence of booleans." + mnames = mask.dtype.names + if mnames is not None: + return [flatten_mask(mask[name]) for name in mnames] + else: + return mask + + def _flatsequence(sequence): + "Generates a flattened version of the sequence." + try: + for element in sequence: + if hasattr(element, '__iter__'): + yield from _flatsequence(element) + else: + yield element + except TypeError: + yield sequence + + mask = np.asarray(mask) + flattened = _flatsequence(_flatmask(mask)) + return np.array([_ for _ in flattened], dtype=bool) + + +def _check_mask_axis(mask, axis, keepdims=np._NoValue): + "Check whether there are masked values along the given axis" + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if mask is not nomask: + return mask.all(axis=axis, **kwargs) + return nomask + + +############################################################################### +# Masking functions # +############################################################################### + +def masked_where(condition, a, copy=True): + """ + Mask an array where a condition is met. + + Return `a` as an array masked where `condition` is True. + Any masked values of `a` or `condition` are also masked in the output. + + Parameters + ---------- + condition : array_like + Masking condition. When `condition` tests floating point values for + equality, consider using ``masked_values`` instead. + a : array_like + Array to mask. + copy : bool + If True (default) make a copy of `a` in the result. If False modify + `a` in place and return a view. + + Returns + ------- + result : MaskedArray + The result of masking `a` where `condition` is True. + + See Also + -------- + masked_values : Mask using floating point equality. + masked_equal : Mask where equal to a given value. + masked_not_equal : Mask where `not` equal to a given value. + masked_less_equal : Mask where less than or equal to a given value. + masked_greater_equal : Mask where greater than or equal to a given value. + masked_less : Mask where less than a given value. + masked_greater : Mask where greater than a given value. + masked_inside : Mask inside a given interval. + masked_outside : Mask outside a given interval. + masked_invalid : Mask invalid values (NaNs or infs). + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_where(a <= 2, a) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + Mask array `b` conditional on `a`. + + >>> b = ['a', 'b', 'c', 'd'] + >>> ma.masked_where(a == 2, b) + masked_array(data=['a', 'b', --, 'd'], + mask=[False, False, True, False], + fill_value='N/A', + dtype='>> c = ma.masked_where(a <= 2, a) + >>> c + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([0, 1, 2, 3]) + >>> c = ma.masked_where(a <= 2, a, copy=False) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([99, 1, 2, 3]) + + When `condition` or `a` contain masked values. + + >>> a = np.arange(4) + >>> a = ma.masked_where(a == 2, a) + >>> a + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=999999) + >>> b = np.arange(4) + >>> b = ma.masked_where(b == 0, b) + >>> b + masked_array(data=[--, 1, 2, 3], + mask=[ True, False, False, False], + fill_value=999999) + >>> ma.masked_where(a == 3, b) + masked_array(data=[--, 1, --, --], + mask=[ True, False, True, True], + fill_value=999999) + + """ + # Make sure that condition is a valid standard-type mask. + cond = make_mask(condition, shrink=False) + a = np.array(a, copy=copy, subok=True) + + (cshape, ashape) = (cond.shape, a.shape) + if cshape and cshape != ashape: + raise IndexError("Inconsistent shape between the condition and the input" + " (got %s and %s)" % (cshape, ashape)) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + # Assign to *.mask so that structured masks are handled correctly. + result.mask = _shrink_mask(cond) + # There is no view of a boolean so when 'a' is a MaskedArray with nomask + # the update to the result's mask has no effect. + if not copy and hasattr(a, '_mask') and getmask(a) is nomask: + a._mask = result._mask.view() + return result + + +def masked_greater(x, value, copy=True): + """ + Mask an array where greater than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x > value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater(a, 2) + masked_array(data=[0, 1, 2, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + return masked_where(greater(x, value), x, copy=copy) + + +def masked_greater_equal(x, value, copy=True): + """ + Mask an array where greater than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x >= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater_equal(a, 2) + masked_array(data=[0, 1, --, --], + mask=[False, False, True, True], + fill_value=999999) + + """ + return masked_where(greater_equal(x, value), x, copy=copy) + + +def masked_less(x, value, copy=True): + """ + Mask an array where less than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x < value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less(a, 2) + masked_array(data=[--, --, 2, 3], + mask=[ True, True, False, False], + fill_value=999999) + + """ + return masked_where(less(x, value), x, copy=copy) + + +def masked_less_equal(x, value, copy=True): + """ + Mask an array where less than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x <= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less_equal(a, 2) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + """ + return masked_where(less_equal(x, value), x, copy=copy) + + +def masked_not_equal(x, value, copy=True): + """ + Mask an array where `not` equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x != value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_not_equal(a, 2) + masked_array(data=[--, --, 2, --], + mask=[ True, True, False, True], + fill_value=999999) + + """ + return masked_where(not_equal(x, value), x, copy=copy) + + +def masked_equal(x, value, copy=True): + """ + Mask an array where equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x == value). For floating point arrays, + consider using ``masked_values(x, value)``. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_equal(a, 2) + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=2) + + """ + output = masked_where(equal(x, value), x, copy=copy) + output.fill_value = value + return output + + +def masked_inside(x, v1, v2, copy=True): + """ + Mask an array inside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` inside + the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` + can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_inside(x, -0.3, 0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_inside(x, 0.3, -0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + + +def masked_outside(x, v1, v2, copy=True): + """ + Mask an array outside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` outside + the interval [v1,v2] (x < v1)|(x > v2). + The boundaries `v1` and `v2` can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_outside(x, -0.3, 0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_outside(x, 0.3, -0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + + +def masked_object(x, value, copy=True, shrink=True): + """ + Mask the array `x` where the data are exactly equal to value. + + This function is similar to `masked_values`, but only suitable + for object arrays: for floating point, use `masked_values` instead. + + Parameters + ---------- + x : array_like + Array to mask + value : object + Comparison value + copy : {True, False}, optional + Whether to return a copy of `x`. + shrink : {True, False}, optional + Whether to collapse a mask full of False to nomask + + Returns + ------- + result : MaskedArray + The result of masking `x` where equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> # don't eat spoiled food + >>> eat = ma.masked_object(food, 'green_eggs') + >>> eat + masked_array(data=[--, 'ham'], + mask=[ True, False], + fill_value='green_eggs', + dtype=object) + >>> # plain ol` ham is boring + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> eat = ma.masked_object(fresh_food, 'green_eggs') + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + Note that `mask` is set to ``nomask`` if possible. + + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(np.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, shrink=shrink)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + + +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): + """ + Mask using floating point equality. + + Return a MaskedArray, masked where the data in array `x` are approximately + equal to `value`, determined using `isclose`. The default tolerances for + `masked_values` are the same as those for `isclose`. + + For integer types, exact equality is used, in the same way as + `masked_equal`. + + The fill_value is set to `value` and the mask is set to ``nomask`` if + possible. + + Parameters + ---------- + x : array_like + Array to mask. + value : float + Masking value. + rtol, atol : float, optional + Tolerance parameters passed on to `isclose` + copy : bool, optional + Whether to return a copy of `x`. + shrink : bool, optional + Whether to collapse a mask full of False to ``nomask``. + + Returns + ------- + result : MaskedArray + The result of masking `x` where approximately equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + + Examples + -------- + >>> import numpy.ma as ma + >>> x = np.array([1, 1.1, 2, 1.1, 3]) + >>> ma.masked_values(x, 1.1) + masked_array(data=[1.0, --, 2.0, --, 3.0], + mask=[False, True, False, True, False], + fill_value=1.1) + + Note that `mask` is set to ``nomask`` if possible. + + >>> ma.masked_values(x, 1.5) + masked_array(data=[1. , 1.1, 2. , 1.1, 3. ], + mask=False, + fill_value=1.5) + + For integers, the fill value will be different in general to the + result of ``masked_equal``. + + >>> x = np.arange(5) + >>> x + array([0, 1, 2, 3, 4]) + >>> ma.masked_values(x, 2) + masked_array(data=[0, 1, --, 3, 4], + mask=[False, False, True, False, False], + fill_value=2) + >>> ma.masked_equal(x, 2) + masked_array(data=[0, 1, --, 3, 4], + mask=[False, False, True, False, False], + fill_value=2) + + """ + xnew = filled(x, value) + if np.issubdtype(xnew.dtype, np.floating): + mask = np.isclose(xnew, value, atol=atol, rtol=rtol) + else: + mask = umath.equal(xnew, value) + ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value) + if shrink: + ret.shrink_mask() + return ret + + +def masked_invalid(a, copy=True): + """ + Mask an array where invalid values occur (NaNs or infs). + + This function is a shortcut to ``masked_where``, with + `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. + Only applies to arrays with a dtype where NaNs or infs make sense + (i.e. floating point types), but accepts any array_like object. + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5, dtype=float) + >>> a[2] = np.NaN + >>> a[3] = np.PINF + >>> a + array([ 0., 1., nan, inf, 4.]) + >>> ma.masked_invalid(a) + masked_array(data=[0.0, 1.0, --, --, 4.0], + mask=[False, False, True, True, False], + fill_value=1e+20) + + """ + a = np.array(a, copy=copy, subok=True) + mask = getattr(a, '_mask', None) + if mask is not None: + condition = ~(np.isfinite(getdata(a))) + if mask is not nomask: + condition |= mask + cls = type(a) + else: + condition = ~(np.isfinite(a)) + cls = MaskedArray + result = a.view(cls) + result._mask = condition + return result + + +############################################################################### +# Printing options # +############################################################################### + + +class _MaskedPrintOption: + """ + Handle the string used to represent missing data in a masked array. + + """ + + def __init__(self, display): + """ + Create the masked_print_option object. + + """ + self._display = display + self._enabled = True + + def display(self): + """ + Display the string to print for masked values. + + """ + return self._display + + def set_display(self, s): + """ + Set the string to print for masked values. + + """ + self._display = s + + def enabled(self): + """ + Is the use of the display value enabled? + + """ + return self._enabled + + def enable(self, shrink=1): + """ + Set the enabling shrink to `shrink`. + + """ + self._enabled = shrink + + def __str__(self): + return str(self._display) + + __repr__ = __str__ + +# if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + + Private function allowing for recursion + + """ + names = result.dtype.names + if names is not None: + for name in names: + curdata = result[name] + curmask = mask[name] + _recursive_printoption(curdata, curmask, printopt) + else: + np.copyto(result, printopt, where=mask) + return + +# For better or worse, these end in a newline +_legacy_print_templates = dict( + long_std=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + long_flx=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """), + short_std=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + short_flx=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """) +) + +############################################################################### +# MaskedArray class # +############################################################################### + + +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names is not None: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.copyto(current, fill_value[name], where=mask[name]) + + +def flatten_structured_array(a): + """ + Flatten a structured array. + + The data type of the output is chosen such that it can represent all of the + (nested) fields. + + Parameters + ---------- + a : structured array + + Returns + ------- + output : masked array or ndarray + A flattened masked array if the input is a masked array, otherwise a + standard ndarray. + + Examples + -------- + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> np.ma.flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + + def flatten_sequence(iterable): + """ + Flattens a compound of nested iterables. + + """ + for elm in iter(iterable): + if hasattr(elm, '__iter__'): + yield from flatten_sequence(elm) + else: + yield elm + + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + +def _arraymethod(funcname, onmask=True): + """ + Return a class method wrapper around a basic array method. + + Creates a class method which returns a masked array, where the new + ``_data`` array is the output of the corresponding basic method called + on the original ``_data``. + + If `onmask` is True, the new mask is the output of the method called + on the initial mask. Otherwise, the new mask is just a reference + to the initial mask. + + Parameters + ---------- + funcname : str + Name of the function to apply on data. + onmask : bool + Whether the mask must be processed also (True) or left + alone (False). Default is True. Make available as `_onmask` + attribute. + + Returns + ------- + method : instancemethod + Class method wrapper of the specified basic array method. + + """ + def wrapped_method(self, *args, **params): + result = getattr(self._data, funcname)(*args, **params) + result = result.view(type(self)) + result._update_from(self) + mask = self._mask + if not onmask: + result.__setmask__(mask) + elif mask is not nomask: + # __setmask__ makes a copy, which we don't want + result._mask = getattr(mask, funcname)(*args, **params) + return result + methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) + if methdoc is not None: + wrapped_method.__doc__ = methdoc.__doc__ + wrapped_method.__name__ = funcname + return wrapped_method + + +class MaskedIterator: + """ + Flat iterator object to iterate over masked arrays. + + A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array + `x`. It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in C-contiguous style, with the last index varying the + fastest. The iterator can also be indexed using basic slicing or + advanced indexing. + + See Also + -------- + MaskedArray.flat : Return a flat iterator over an array. + MaskedArray.flatten : Returns a flattened copy of an array. + + Notes + ----- + `MaskedIterator` is not exported by the `ma` module. Instead of + instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. + + Examples + -------- + >>> x = np.ma.array(arange(6).reshape(2, 3)) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + Extracting more than a single element b indexing the `MaskedIterator` + returns a masked array: + + >>> fl[2:4] + masked_array(data = [2 3], + mask = False, + fill_value = 999999) + + """ + + def __init__(self, ma): + self.ma = ma + self.dataiter = ma._data.flat + + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + if isinstance(_mask, ndarray): + # set shape to match that of data; this is needed for matrices + _mask.shape = result.shape + result._mask = _mask + elif isinstance(_mask, np.void): + return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) + elif _mask: # Just a scalar, masked + return masked + return result + + # This won't work if ravel makes a copy + def __setitem__(self, index, value): + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) + + def __next__(self): + """ + Return the next value, or raise StopIteration. + + Examples + -------- + >>> x = np.ma.array([3, 2], mask=[0, 1]) + >>> fl = x.flat + >>> next(fl) + 3 + >>> next(fl) + masked + >>> next(fl) + Traceback (most recent call last): + ... + StopIteration + + """ + d = next(self.dataiter) + if self.maskiter is not None: + m = next(self.maskiter) + if isinstance(m, np.void): + return mvoid(d, mask=m, hardmask=self.ma._hardmask) + elif m: # Just a scalar, masked + return masked + return d + + +class MaskedArray(ndarray): + """ + An array class with possibly masked values. + + Masked values of True exclude the corresponding element from any + computation. + + Construction:: + + x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, + ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, + shrink=True, order=None) + + Parameters + ---------- + data : array_like + Input data. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + dtype : dtype, optional + Data type of the output. + If `dtype` is None, the type of the data argument (``data.dtype``) + is used. If `dtype` is not None and different from ``data.dtype``, + a copy is performed. + copy : bool, optional + Whether to copy the input data (True), or to use a reference instead. + Default is False. + subok : bool, optional + Whether to return a subclass of `MaskedArray` if possible (True) or a + plain `MaskedArray`. Default is True. + ndmin : int, optional + Minimum number of dimensions. Default is 0. + fill_value : scalar, optional + Value used to fill in the masked values when necessary. + If None, a default based on the data-type is used. + keep_mask : bool, optional + Whether to combine `mask` with the mask of the input data, if any + (True), or to use only `mask` for the output (False). Default is True. + hard_mask : bool, optional + Whether to use a hard mask or not. With a hard mask, masked values + cannot be unmasked. Default is False. + shrink : bool, optional + Whether to force compression of an empty mask. Default is True. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. + + Examples + -------- + + The ``mask`` can be initialized with an array of boolean values + with the same shape as ``data``. + + >>> data = np.arange(6).reshape((2, 3)) + >>> np.ma.MaskedArray(data, mask=[[False, True, False], + ... [False, False, True]]) + masked_array( + data=[[0, --, 2], + [3, 4, --]], + mask=[[False, True, False], + [False, False, True]], + fill_value=999999) + + Alternatively, the ``mask`` can be initialized to homogeneous boolean + array with the same shape as ``data`` by passing in a scalar + boolean value: + + >>> np.ma.MaskedArray(data, mask=False) + masked_array( + data=[[0, 1, 2], + [3, 4, 5]], + mask=[[False, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.MaskedArray(data, mask=True) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=999999, + dtype=int64) + + .. note:: + The recommended practice for initializing ``mask`` with a scalar + boolean value is to use ``True``/``False`` rather than + ``np.True_``/``np.False_``. The reason is :attr:`nomask` + is represented internally as ``np.False_``. + + >>> np.False_ is np.ma.nomask + True + + """ + + __array_priority__ = 15 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = ndarray + + # Maximum number of elements per axis used when printing an array. The + # 1d case is handled separately because we need more values in this case. + _print_width = 100 + _print_width_1d = 1500 + + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, + subok=True, ndmin=0, fill_value=None, keep_mask=True, + hard_mask=None, shrink=True, order=None): + """ + Create a new masked array from scratch. + + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). + + """ + # Process data. + _data = np.array(data, dtype=dtype, copy=copy, + order=order, subok=True, ndmin=ndmin) + _baseclass = getattr(data, '_baseclass', type(_data)) + # Check that we're not erasing the mask. + if isinstance(data, MaskedArray) and (data.shape != _data.shape): + copy = True + + # Here, we copy the _view_, so that we can attach new properties to it + # we must never do .view(MaskedConstant), as that would create a new + # instance of np.ma.masked, which make identity comparison fail + if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): + _data = ndarray.view(_data, type(data)) + else: + _data = ndarray.view(_data, cls) + # Backwards compatibility w/ numpy.core.ma. + if hasattr(data, '_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + # FIXME _sharedmask is never used. + _sharedmask = True + # Process mask. + # Type of the mask + mdtype = make_mask_descr(_data.dtype) + + if mask is nomask: + # Case 1. : no mask in input. + # Erase the current mask ? + if not keep_mask: + # With a reduced version + if shrink: + _data._mask = nomask + # With full version + else: + _data._mask = np.zeros(_data.shape, dtype=mdtype) + # Check whether we missed something + elif isinstance(data, (tuple, list)): + try: + # If data is a sequence of masked array + mask = np.array( + [getmaskarray(np.asanyarray(m, dtype=_data.dtype)) + for m in data], dtype=mdtype) + except ValueError: + # If data is nested + mask = nomask + # Force shrinking of the mask if needed (and possible) + if (mdtype == MaskType) and mask.any(): + _data._mask = mask + _data._sharedmask = False + else: + _data._sharedmask = not copy + if copy: + _data._mask = _data._mask.copy() + # Reset the shape of the original mask + if getmask(data) is not nomask: + data._mask.shape = data.shape + else: + # Case 2. : With a mask in input. + # If mask is boolean, create an array of True or False + if mask is True and mdtype == MaskType: + mask = np.ones(_data.shape, dtype=mdtype) + elif mask is False and mdtype == MaskType: + mask = np.zeros(_data.shape, dtype=mdtype) + else: + # Read the mask with the current mdtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Make sure the mask and the data have the same shape + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = np.resize(mask, _data.shape) + elif nm == nd: + mask = np.reshape(mask, _data.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MaskError(msg % (nd, nm)) + copy = True + # Set the mask to the new value + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = not copy + else: + if not keep_mask: + _data._mask = mask + _data._sharedmask = not copy + else: + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) + else: + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False + # Update fill_value. + if fill_value is None: + fill_value = getattr(data, '_fill_value', None) + # But don't run the check unless we have something to check. + if fill_value is not None: + _data._fill_value = _check_fill_value(fill_value, _data.dtype) + # Process extra options .. + if hard_mask is None: + _data._hardmask = getattr(data, '_hardmask', False) + else: + _data._hardmask = hard_mask + _data._baseclass = _baseclass + return _data + + + def _update_from(self, obj): + """ + Copies some attributes of obj to self. + + """ + if isinstance(obj, ndarray): + _baseclass = type(obj) + else: + _baseclass = ndarray + # We need to copy the _basedict to avoid backward propagation + _optinfo = {} + _optinfo.update(getattr(obj, '_optinfo', {})) + _optinfo.update(getattr(obj, '_basedict', {})) + if not isinstance(obj, MaskedArray): + _optinfo.update(getattr(obj, '__dict__', {})) + _dict = dict(_fill_value=getattr(obj, '_fill_value', None), + _hardmask=getattr(obj, '_hardmask', False), + _sharedmask=getattr(obj, '_sharedmask', False), + _isfield=getattr(obj, '_isfield', False), + _baseclass=getattr(obj, '_baseclass', _baseclass), + _optinfo=_optinfo, + _basedict=_optinfo) + self.__dict__.update(_dict) + self.__dict__.update(_optinfo) + return + + def __array_finalize__(self, obj): + """ + Finalizes the masked array. + + """ + # Get main attributes. + self._update_from(obj) + + # We have to decide how to initialize self.mask, based on + # obj.mask. This is very difficult. There might be some + # correspondence between the elements in the array we are being + # created from (= obj) and us. Or there might not. This method can + # be called in all kinds of places for all kinds of reasons -- could + # be empty_like, could be slicing, could be a ufunc, could be a view. + # The numpy subclassing interface simply doesn't give us any way + # to know, which means that at best this method will be based on + # guesswork and heuristics. To make things worse, there isn't even any + # clear consensus about what the desired behavior is. For instance, + # most users think that np.empty_like(marr) -- which goes via this + # method -- should return a masked array with an empty mask (see + # gh-3404 and linked discussions), but others disagree, and they have + # existing code which depends on empty_like returning an array that + # matches the input mask. + # + # Historically our algorithm was: if the template object mask had the + # same *number of elements* as us, then we used *it's mask object + # itself* as our mask, so that writes to us would also write to the + # original array. This is horribly broken in multiple ways. + # + # Now what we do instead is, if the template object mask has the same + # number of elements as us, and we do not have the same base pointer + # as the template object (b/c views like arr[...] should keep the same + # mask), then we make a copy of the template object mask and use + # that. This is also horribly broken but somewhat less so. Maybe. + if isinstance(obj, ndarray): + # XX: This looks like a bug -- shouldn't it check self.dtype + # instead? + if obj.dtype.names is not None: + _mask = getmaskarray(obj) + else: + _mask = getmask(obj) + + # If self and obj point to exactly the same data, then probably + # self is a simple view of obj (e.g., self = obj[...]), so they + # should share the same mask. (This isn't 100% reliable, e.g. self + # could be the first row of obj, or have strange strides, but as a + # heuristic it's not bad.) In all other cases, we make a copy of + # the mask, so that future modifications to 'self' do not end up + # side-effecting 'obj' as well. + if (_mask is not nomask and obj.__array_interface__["data"][0] + != self.__array_interface__["data"][0]): + # We should make a copy. But we could get here via astype, + # in which case the mask might need a new dtype as well + # (e.g., changing to or from a structured dtype), and the + # order could have changed. So, change the mask type if + # needed and use astype instead of copy. + if self.dtype == obj.dtype: + _mask_dtype = _mask.dtype + else: + _mask_dtype = make_mask_descr(self.dtype) + + if self.flags.c_contiguous: + order = "C" + elif self.flags.f_contiguous: + order = "F" + else: + order = "K" + + _mask = _mask.astype(_mask_dtype, order) + else: + # Take a view so shape changes, etc., do not propagate back. + _mask = _mask.view() + else: + _mask = nomask + + self._mask = _mask + # Finalize the mask + if self._mask is not nomask: + try: + self._mask.shape = self.shape + except ValueError: + self._mask = nomask + except (TypeError, AttributeError): + # When _mask.shape is not writable (because it's a void) + pass + + # Finalize the fill_value + if self._fill_value is not None: + self._fill_value = _check_fill_value(self._fill_value, self.dtype) + elif self.dtype.names is not None: + # Finalize the default fill_value for structured arrays + self._fill_value = _check_fill_value(None, self.dtype) + + def __array_wrap__(self, obj, context=None): + """ + Special hook for ufuncs. + + Wraps the numpy array and sets the mask according to context. + + """ + if obj is self: # for in-place operations + result = obj + else: + result = obj.view(type(self)) + result._update_from(self) + + if context is not None: + result._mask = result._mask.copy() + func, args, out_i = context + # args sometimes contains outputs (gh-10459), which we don't want + input_args = args[:func.nin] + m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + # Get the domain mask + domain = ufunc_domain.get(func, None) + if domain is not None: + # Take the domain, and make sure it's a ndarray + with np.errstate(divide='ignore', invalid='ignore'): + d = filled(domain(*input_args), True) + + if d.any(): + # Fill the result where the domain is wrong + try: + # Binary domain: take the last value + fill_value = ufunc_fills[func][-1] + except TypeError: + # Unary domain: just use this one + fill_value = ufunc_fills[func] + except KeyError: + # Domain not recognized, use fill_value instead + fill_value = self.fill_value + + np.copyto(result, fill_value, where=d) + + # Update the mask + if m is nomask: + m = d + else: + # Don't modify inplace, we risk back-propagation + m = (m | d) + + # Make sure the mask has the proper size + if result is not self and result.shape == () and m: + return masked + else: + result._mask = m + result._sharedmask = False + + return result + + def view(self, dtype=None, type=None, fill_value=None): + """ + Return a view of the MaskedArray data. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + The default, None, results in the view having the same data-type + as `a`. As with ``ndarray.view``, dtype can also be specified as + an ndarray sub-class, which then specifies the type of the + returned object (this is equivalent to setting the ``type`` + parameter). + type : Python type, optional + Type of the returned view, either ndarray or a subclass. The + default None results in type preservation. + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, then this argument is inferred from the passed `dtype`, or + in its absence the original array, as discussed in the notes below. + + See Also + -------- + numpy.ndarray.view : Equivalent method on ndarray object. + + Notes + ----- + + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + If `fill_value` is not specified, but `dtype` is specified (and is not + an ndarray sub-class), the `fill_value` of the MaskedArray will be + reset. If neither `fill_value` nor `dtype` are specified (or if + `dtype` is an ndarray sub-class), then the fill value is preserved. + Finally, if `fill_value` is specified, but `dtype` is not, the fill + value is set to the specified value. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + """ + + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + except TypeError: + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype, type) + + # also make the mask be a view (so attr changes to the view's + # mask do no affect original object's mask) + # (especially important to avoid affecting np.masked singleton) + if getmask(output) is not nomask: + output._mask = output._mask.view() + + # Make sure to reset the _fill_value if needed + if getattr(output, '_fill_value', None) is not None: + if fill_value is None: + if dtype is None: + pass # leave _fill_value as is + else: + output._fill_value = None + else: + output.fill_value = fill_value + return output + + def __getitem__(self, indx): + """ + x.__getitem__(y) <==> x[y] + + Return the item described by i, as a masked array. + + """ + # We could directly use ndarray.__getitem__ on self. + # But then we would have to modify __array_finalize__ to prevent the + # mask of being reshaped if it hasn't been set up properly yet + # So it's easier to stick to the current version + dout = self.data[indx] + _mask = self._mask + + def _is_scalar(m): + return not isinstance(m, np.ndarray) + + def _scalar_heuristic(arr, elem): + """ + Return whether `elem` is a scalar result of indexing `arr`, or None + if undecidable without promoting nomask to a full mask + """ + # obviously a scalar + if not isinstance(elem, np.ndarray): + return True + + # object array scalar indexing can return anything + elif arr.dtype.type is np.object_: + if arr.dtype is not elem.dtype: + # elem is an array, but dtypes do not match, so must be + # an element + return True + + # well-behaved subclass that only returns 0d arrays when + # expected - this is not a scalar + elif type(arr).__getitem__ == ndarray.__getitem__: + return False + + return None + + if _mask is not nomask: + # _mask cannot be a subclass, so it tells us whether we should + # expect a scalar. It also cannot be of dtype object. + mout = _mask[indx] + scalar_expected = _is_scalar(mout) + + else: + # attempt to apply the heuristic to avoid constructing a full mask + mout = nomask + scalar_expected = _scalar_heuristic(self.data, dout) + if scalar_expected is None: + # heuristics have failed + # construct a full array, so we can be certain. This is costly. + # we could also fall back on ndarray.__getitem__(self.data, indx) + scalar_expected = _is_scalar(getmaskarray(self)[indx]) + + # Did we extract a single item? + if scalar_expected: + # A record + if isinstance(dout, np.void): + # We should always re-cast to mvoid, otherwise users can + # change masks on rows that already have masked values, but not + # on rows that have no masked values, which is inconsistent. + return mvoid(dout, mask=mout, hardmask=self._hardmask) + + # special case introduced in gh-5962 + elif (self.dtype.type is np.object_ and + isinstance(dout, np.ndarray) and + dout is not masked): + # If masked, turn into a MaskedArray, with everything masked. + if mout: + return MaskedArray(dout, mask=True) + else: + return dout + + # Just a scalar + else: + if mout: + return masked + else: + return dout + else: + # Force dout to MA + dout = dout.view(type(self)) + # Inherit attributes from self + dout._update_from(self) + # Check the fill_value + if is_string_or_list_of_strings(indx): + if self._fill_value is not None: + dout._fill_value = self._fill_value[indx] + + # Something like gh-15895 has happened if this check fails. + # _fill_value should always be an ndarray. + if not isinstance(dout._fill_value, np.ndarray): + raise RuntimeError('Internal NumPy error.') + # If we're indexing a multidimensional field in a + # structured array (such as dtype("(2,)i2,(2,)i1")), + # dimensionality goes up (M[field].ndim == M.ndim + + # M.dtype[field].ndim). That's fine for + # M[field] but problematic for M[field].fill_value + # which should have shape () to avoid breaking several + # methods. There is no great way out, so set to + # first element. See issue #6723. + if dout._fill_value.ndim > 0: + if not (dout._fill_value == + dout._fill_value.flat[0]).all(): + warnings.warn( + "Upon accessing multidimensional field " + f"{indx!s}, need to keep dimensionality " + "of fill_value at 0. Discarding " + "heterogeneous fill_value and setting " + f"all to {dout._fill_value[0]!s}.", + stacklevel=2) + # Need to use `.flat[0:1].squeeze(...)` instead of just + # `.flat[0]` to ensure the result is a 0d array and not + # a scalar. + dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0) + dout._isfield = True + # Update the mask if needed + if mout is not nomask: + # set shape to match that of data; this is needed for matrices + dout._mask = reshape(mout, dout.shape) + dout._sharedmask = True + # Note: Don't try to check for m.any(), that'll take too long + return dout + + def __setitem__(self, indx, value): + """ + x.__setitem__(i, y) <==> x[i]=y + + Set item described by index. If value is masked, masks those + locations. + + """ + if self is masked: + raise MaskError('Cannot alter the masked element.') + _data = self._data + _mask = self._mask + if isinstance(indx, str): + _data[indx] = value + if _mask is nomask: + self._mask = _mask = make_mask_none(self.shape, self.dtype) + _mask[indx] = getmask(value) + return + + _dtype = _data.dtype + + if value is masked: + # The mask wasn't set: create a full version. + if _mask is nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + # Now, set the mask to its value. + if _dtype.names is not None: + _mask[indx] = tuple([True] * len(_dtype.names)) + else: + _mask[indx] = True + return + + # Get the _data part of the new value + dval = getattr(value, '_data', value) + # Get the _mask part of the new value + mval = getmask(value) + if _dtype.names is not None and mval is nomask: + mval = tuple([False] * len(_dtype.names)) + if _mask is nomask: + # Set the data, then the mask + _data[indx] = dval + if mval is not nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + _mask[indx] = mval + elif not self._hardmask: + # Set the data, then the mask + _data[indx] = dval + _mask[indx] = mval + elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): + indx = indx * umath.logical_not(_mask) + _data[indx] = dval + else: + if _dtype.names is not None: + err_msg = "Flexible 'hard' masks are not yet supported." + raise NotImplementedError(err_msg) + mindx = mask_or(_mask[indx], mval, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + np.copyto(dindx, dval, where=~mindx) + elif mindx is nomask: + dindx = dval + _data[indx] = dindx + _mask[indx] = mindx + return + + # Define so that we can overwrite the setter. + @property + def dtype(self): + return super().dtype + + @dtype.setter + def dtype(self, dtype): + super(MaskedArray, type(self)).dtype.__set__(self, dtype) + if self._mask is not nomask: + self._mask = self._mask.view(make_mask_descr(dtype), ndarray) + # Try to reset the shape of the mask (if we don't have a void). + # This raises a ValueError if the dtype change won't work. + try: + self._mask.shape = self.shape + except (AttributeError, TypeError): + pass + + @property + def shape(self): + return super().shape + + @shape.setter + def shape(self, shape): + super(MaskedArray, type(self)).shape.__set__(self, shape) + # Cannot use self._mask, since it may not (yet) exist when a + # masked matrix sets the shape. + if getmask(self) is not nomask: + self._mask.shape = self.shape + + def __setmask__(self, mask, copy=False): + """ + Set the mask. + + """ + idtype = self.dtype + current_mask = self._mask + if mask is masked: + mask = True + + if current_mask is nomask: + # Make sure the mask is set + # Just don't do anything if there's nothing to do. + if mask is nomask: + return + current_mask = self._mask = make_mask_none(self.shape, idtype) + + if idtype.names is None: + # No named fields. + # Hardmask: don't unmask the data + if self._hardmask: + current_mask |= mask + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool_, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + else: + # Named fields w/ + mdtype = current_mask.dtype + mask = np.array(mask, copy=False) + # Mask is a singleton + if not mask.ndim: + # It's a boolean : make a record + if mask.dtype.kind == 'b': + mask = np.array(tuple([mask.item()] * len(mdtype)), + dtype=mdtype) + # It's a record: make sure the dtype is correct + else: + mask = mask.astype(mdtype) + # Mask is a sequence + else: + # Make sure the new mask is a ndarray with the proper dtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Hardmask: don't unmask the data + if self._hardmask: + for n in idtype.names: + current_mask[n] |= mask[n] + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool_, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + # Reshape if needed + if current_mask.shape: + current_mask.shape = self.shape + return + + _set_mask = __setmask__ + + @property + def mask(self): + """ Current mask. """ + + # We could try to force a reshape, but that wouldn't work in some + # cases. + # Return a view so that the dtype and shape cannot be changed in place + # This still preserves nomask by identity + return self._mask.view() + + @mask.setter + def mask(self, value): + self.__setmask__(value) + + @property + def recordmask(self): + """ + Get or set the mask of the array if it has no named fields. For + structured arrays, returns a ndarray of booleans where entries are + ``True`` if **all** the fields are masked, ``False`` otherwise: + + >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], + ... dtype=[('a', int), ('b', int)]) + >>> x.recordmask + array([False, False, True, False, False]) + """ + + _mask = self._mask.view(ndarray) + if _mask.dtype.names is None: + return _mask + return np.all(flatten_structured_array(_mask), axis=-1) + + @recordmask.setter + def recordmask(self, mask): + raise NotImplementedError("Coming soon: setting the mask per records!") + + def harden_mask(self): + """ + Force the mask to hard. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `harden_mask` sets + `~ma.MaskedArray.hardmask` to ``True``. + + See Also + -------- + ma.MaskedArray.hardmask + + """ + self._hardmask = True + return self + + def soften_mask(self): + """ + Force the mask to soft. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `soften_mask` sets + `~ma.MaskedArray.hardmask` to ``False``. + + See Also + -------- + ma.MaskedArray.hardmask + + """ + self._hardmask = False + return self + + @property + def hardmask(self): + """ Hardness of the mask """ + return self._hardmask + + def unshare_mask(self): + """ + Copy the mask and set the sharedmask flag to False. + + Whether the mask is shared between masked arrays can be seen from + the `sharedmask` property. `unshare_mask` ensures the mask is not shared. + A copy of the mask is only made if it was shared. + + See Also + -------- + sharedmask + + """ + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + return self + + @property + def sharedmask(self): + """ Share status of the mask (read-only). """ + return self._sharedmask + + def shrink_mask(self): + """ + Reduce a mask to nomask when possible. + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) + >>> x.mask + array([[False, False], + [False, False]]) + >>> x.shrink_mask() + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> x.mask + False + + """ + self._mask = _shrink_mask(self._mask) + return self + + @property + def baseclass(self): + """ Class of the underlying data (read-only). """ + return self._baseclass + + def _get_data(self): + """ + Returns the underlying data, as a view of the masked array. + + If the underlying data is a subclass of :class:`numpy.ndarray`, it is + returned as such. + + >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.data + matrix([[1, 2], + [3, 4]]) + + The type of the data can be accessed through the :attr:`baseclass` + attribute. + """ + return ndarray.view(self, self._baseclass) + + _data = property(fget=_get_data) + data = property(fget=_get_data) + + @property + def flat(self): + """ Return a flat iterator, or set a flattened version of self to value. """ + return MaskedIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + @property + def fill_value(self): + """ + The filling value of the masked array is a scalar. When setting, None + will set to a default based on the data type. + + Examples + -------- + >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: + ... np.ma.array([0, 1], dtype=dt).get_fill_value() + ... + 999999 + 999999 + 1e+20 + (1e+20+0j) + + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.fill_value + -inf + >>> x.fill_value = np.pi + >>> x.fill_value + 3.1415926535897931 # may vary + + Reset to default: + + >>> x.fill_value = None + >>> x.fill_value + 1e+20 + + """ + if self._fill_value is None: + self._fill_value = _check_fill_value(None, self.dtype) + + # Temporary workaround to account for the fact that str and bytes + # scalars cannot be indexed with (), whereas all other numpy + # scalars can. See issues #7259 and #7267. + # The if-block can be removed after #7267 has been fixed. + if isinstance(self._fill_value, ndarray): + return self._fill_value[()] + return self._fill_value + + @fill_value.setter + def fill_value(self, value=None): + target = _check_fill_value(value, self.dtype) + if not target.ndim == 0: + # 2019-11-12, 1.18.0 + warnings.warn( + "Non-scalar arrays for the fill value are deprecated. Use " + "arrays with scalar values instead. The filled function " + "still supports any array as `fill_value`.", + DeprecationWarning, stacklevel=2) + + _fill_value = self._fill_value + if _fill_value is None: + # Create the attribute if it was undefined + self._fill_value = target + else: + # Don't overwrite the attribute, just fill it (for propagation) + _fill_value[()] = target + + # kept for compatibility + get_fill_value = fill_value.fget + set_fill_value = fill_value.fset + + def filled(self, fill_value=None): + """ + Return a copy of self, with masked values filled with a given value. + **However**, if there are no masked values to fill, self will be + returned instead as an ndarray. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or non-scalar. + If non-scalar, the resulting ndarray must be broadcastable over + input array. Default is None, in which case, the `fill_value` + attribute of the array is used instead. + + Returns + ------- + filled_array : ndarray + A copy of ``self`` with invalid entries replaced by *fill_value* + (be it the function argument or the attribute of ``self``), or + ``self`` itself as an ndarray if there are no invalid entries to + be replaced. + + Notes + ----- + The result is **not** a MaskedArray! + + Examples + -------- + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([ 1, 2, -999, 4, -999]) + >>> x.filled(fill_value=1000) + array([ 1, 2, 1000, 4, 1000]) + >>> type(x.filled()) + + + Subclassing is preserved. This means that if, e.g., the data part of + the masked array is a recarray, `filled` returns a recarray: + + >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) + >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) + >>> m.filled() + rec.array([(999999, 2), ( -3, 999999)], + dtype=[('f0', '>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) + >>> x.compressed() + array([0, 1]) + >>> type(x.compressed()) + + + """ + data = ndarray.ravel(self._data) + if self._mask is not nomask: + data = data.compress(np.logical_not(ndarray.ravel(self._mask))) + return data + + def compress(self, condition, axis=None, out=None): + """ + Return `a` where condition is ``True``. + + If condition is a `~ma.MaskedArray`, missing values are considered + as ``False``. + + Parameters + ---------- + condition : var + Boolean 1-d array selecting which entries to return. If len(condition) + is less than the size of a along the axis, then output is truncated + to length of condition array. + axis : {None, int}, optional + Axis along which the operation must be performed. + out : {None, ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + result : MaskedArray + A :class:`~ma.MaskedArray` object. + + Notes + ----- + Please note the difference with :meth:`compressed` ! + The output of :meth:`compress` has a mask, the output of + :meth:`compressed` does not. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.compress([1, 0, 1]) + masked_array(data=[1, 3], + mask=[False, False], + fill_value=999999) + + >>> x.compress([1, 0, 1], axis=1) + masked_array( + data=[[1, 3], + [--, --], + [7, 9]], + mask=[[False, False], + [ True, True], + [False, False]], + fill_value=999999) + + """ + # Get the basic components + (_data, _mask) = (self._data, self._mask) + + # Force the condition to a regular ndarray and forget the missing + # values. + condition = np.asarray(condition) + + _new = _data.compress(condition, axis=axis, out=out).view(type(self)) + _new._update_from(self) + if _mask is not nomask: + _new._mask = _mask.compress(condition, axis=axis) + return _new + + def _insert_masked_print(self): + """ + Replace masked values with masked_print_option, casting all innermost + dtypes to object. + """ + if masked_print_option.enabled(): + mask = self._mask + if mask is nomask: + res = self._data + else: + # convert to object array to make filled work + data = self._data + # For big arrays, to avoid a costly conversion to the + # object dtype, extract the corners before the conversion. + print_width = (self._print_width if self.ndim > 1 + else self._print_width_1d) + for axis in range(self.ndim): + if data.shape[axis] > print_width: + ind = print_width // 2 + arr = np.split(data, (ind, -ind), axis=axis) + data = np.concatenate((arr[0], arr[2]), axis=axis) + arr = np.split(mask, (ind, -ind), axis=axis) + mask = np.concatenate((arr[0], arr[2]), axis=axis) + + rdtype = _replace_dtype_fields(self.dtype, "O") + res = data.astype(rdtype) + _recursive_printoption(res, mask, masked_print_option) + else: + res = self.filled(self.fill_value) + return res + + def __str__(self): + return str(self._insert_masked_print()) + + def __repr__(self): + """ + Literal string representation. + + """ + if self._baseclass is np.ndarray: + name = 'array' + else: + name = self._baseclass.__name__ + + + # 2016-11-19: Demoted to legacy format + if np.get_printoptions()['legacy'] == '1.13': + is_long = self.ndim > 1 + parameters = dict( + name=name, + nlen=" " * len(name), + data=str(self), + mask=str(self._mask), + fill=str(self.fill_value), + dtype=str(self.dtype) + ) + is_structured = bool(self.dtype.names) + key = '{}_{}'.format( + 'long' if is_long else 'short', + 'flx' if is_structured else 'std' + ) + return _legacy_print_templates[key] % parameters + + prefix = f"masked_{name}(" + + dtype_needed = ( + not np.core.arrayprint.dtype_is_implied(self.dtype) or + np.all(self.mask) or + self.size == 0 + ) + + # determine which keyword args need to be shown + keys = ['data', 'mask', 'fill_value'] + if dtype_needed: + keys.append('dtype') + + # array has only one row (non-column) + is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) + + # choose what to indent each keyword with + min_indent = 2 + if is_one_row: + # first key on the same line as the type, remaining keys + # aligned by equals + indents = {} + indents[keys[0]] = prefix + for k in keys[1:]: + n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) + indents[k] = ' ' * n + prefix = '' # absorbed into the first indent + else: + # each key on its own line, indented by two spaces + indents = {k: ' ' * min_indent for k in keys} + prefix = prefix + '\n' # first key on the next line + + # format the field values + reprs = {} + reprs['data'] = np.array2string( + self._insert_masked_print(), + separator=", ", + prefix=indents['data'] + 'data=', + suffix=',') + reprs['mask'] = np.array2string( + self._mask, + separator=", ", + prefix=indents['mask'] + 'mask=', + suffix=',') + reprs['fill_value'] = repr(self.fill_value) + if dtype_needed: + reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype) + + # join keys with values and indentations + result = ',\n'.join( + '{}{}={}'.format(indents[k], k, reprs[k]) + for k in keys + ) + return prefix + result + ')' + + def _delegate_binop(self, other): + # This emulates the logic in + # private/binop_override.h:forward_binop_should_defer + if isinstance(other, type(self)): + return False + array_ufunc = getattr(other, "__array_ufunc__", False) + if array_ufunc is False: + other_priority = getattr(other, "__array_priority__", -1000000) + return self.__array_priority__ < other_priority + else: + # If array_ufunc is not None, it will be called inside the ufunc; + # None explicitly tells us to not call the ufunc, i.e., defer. + return array_ufunc is None + + def _comparison(self, other, compare): + """Compare self with other using operator.eq or operator.ne. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + omask = getmask(other) + smask = self.mask + mask = mask_or(smask, omask, copy=True) + + odata = getdata(other) + if mask.dtype.names is not None: + # For possibly masked structured arrays we need to be careful, + # since the standard structured array comparison will use all + # fields, masked or not. To avoid masked fields influencing the + # outcome, we set all masked fields in self to other, so they'll + # count as equal. To prepare, we ensure we have the right shape. + broadcast_shape = np.broadcast(self, odata).shape + sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) + sbroadcast._mask = mask + sdata = sbroadcast.filled(odata) + # Now take care of the mask; the merged mask should have an item + # masked if all fields were masked (in one and/or other). + mask = (mask == np.ones((), mask.dtype)) + + else: + # For regular arrays, just use the data as they come. + sdata = self.data + + check = compare(sdata, odata) + + if isinstance(check, (np.bool_, bool)): + return masked if mask else check + + if mask is not nomask: + # Adjust elements that were masked, which should be treated + # as equal if masked in both, unequal if masked in one. + # Note that this works automatically for structured arrays too. + check = np.where(mask, compare(smask, omask), check) + if mask.shape != check.shape: + # Guarantee consistency of the shape, making a copy since the + # the mask may need to get written to later. + mask = np.broadcast_to(mask, check.shape).copy() + + check = check.view(type(self)) + check._update_from(self) + check._mask = mask + + # Cast fill value to bool_ if needed. If it cannot be cast, the + # default boolean fill value is used. + if check._fill_value is not None: + try: + fill = _check_fill_value(check._fill_value, np.bool_) + except (TypeError, ValueError): + fill = _check_fill_value(None, np.bool_) + check._fill_value = fill + + return check + + def __eq__(self, other): + """Check whether other equals self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.eq) + + def __ne__(self, other): + """Check whether other does not equal self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.ne) + + def __add__(self, other): + """ + Add self to other, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return add(self, other) + + def __radd__(self, other): + """ + Add other to self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other + self`. + return add(other, self) + + def __sub__(self, other): + """ + Subtract other from self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return subtract(self, other) + + def __rsub__(self, other): + """ + Subtract self from other, and return a new masked array. + + """ + return subtract(other, self) + + def __mul__(self, other): + "Multiply self by other, and return a new masked array." + if self._delegate_binop(other): + return NotImplemented + return multiply(self, other) + + def __rmul__(self, other): + """ + Multiply other by self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other * self`. + return multiply(other, self) + + def __div__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return divide(self, other) + + def __truediv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return true_divide(self, other) + + def __rtruediv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return true_divide(other, self) + + def __floordiv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return floor_divide(self, other) + + def __rfloordiv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return floor_divide(other, self) + + def __pow__(self, other): + """ + Raise self to the power other, masking the potential NaNs/Infs + + """ + if self._delegate_binop(other): + return NotImplemented + return power(self, other) + + def __rpow__(self, other): + """ + Raise other to the power self, masking the potential NaNs/Infs + + """ + return power(other, self) + + def __iadd__(self, other): + """ + Add other to self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + else: + if m is not nomask: + self._mask += m + self._data.__iadd__(np.where(self._mask, self.dtype.type(0), + getdata(other))) + return self + + def __isub__(self, other): + """ + Subtract other from self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + self._data.__isub__(np.where(self._mask, self.dtype.type(0), + getdata(other))) + return self + + def __imul__(self, other): + """ + Multiply self by other in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + self._data.__imul__(np.where(self._mask, self.dtype.type(1), + getdata(other))) + return self + + def __idiv__(self, other): + """ + Divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.divide] + other_data = np.where(dom_mask, fval, other_data) + self._mask |= new_mask + self._data.__idiv__(np.where(self._mask, self.dtype.type(1), + other_data)) + return self + + def __ifloordiv__(self, other): + """ + Floor divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.floor_divide] + other_data = np.where(dom_mask, fval, other_data) + self._mask |= new_mask + self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1), + other_data)) + return self + + def __itruediv__(self, other): + """ + True divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.true_divide] + other_data = np.where(dom_mask, fval, other_data) + self._mask |= new_mask + self._data.__itruediv__(np.where(self._mask, self.dtype.type(1), + other_data)) + return self + + def __ipow__(self, other): + """ + Raise self to the power other, in place. + + """ + other_data = getdata(other) + other_mask = getmask(other) + with np.errstate(divide='ignore', invalid='ignore'): + self._data.__ipow__(np.where(self._mask, self.dtype.type(1), + other_data)) + invalid = np.logical_not(np.isfinite(self._data)) + if invalid.any(): + if self._mask is not nomask: + self._mask |= invalid + else: + self._mask = invalid + np.copyto(self._data, self.fill_value, where=invalid) + new_mask = mask_or(other_mask, invalid) + self._mask = mask_or(self._mask, new_mask) + return self + + def __float__(self): + """ + Convert to float. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) + return np.nan + return float(self.item()) + + def __int__(self): + """ + Convert to int. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python int.') + return int(self.item()) + + @property + def imag(self): + """ + The imaginary part of the masked array. + + This property is a view on the imaginary part of this `MaskedArray`. + + See Also + -------- + real + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.imag + masked_array(data=[1.0, --, 1.6], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.imag.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_imag = imag.fget + + @property + def real(self): + """ + The real part of the masked array. + + This property is a view on the real part of this `MaskedArray`. + + See Also + -------- + imag + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.real + masked_array(data=[1.0, --, 3.45], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.real.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_real = real.fget + + def count(self, axis=None, keepdims=np._NoValue): + """ + Count the non-masked elements of the array along the given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis or axes along which the count is performed. + The default, None, performs the count over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.10.0 + + If this is a tuple of ints, the count is performed on multiple + axes, instead of a single axis or all the axes as before. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + result : ndarray or scalar + An array with the same shape as the input array, with the specified + axis removed. If the array is a 0-d array, or if `axis` is None, a + scalar is returned. + + See Also + -------- + ma.count_masked : Count masked elements in array or along a given axis. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(6).reshape((2, 3)) + >>> a[1, :] = ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, --, --]], + mask=[[False, False, False], + [ True, True, True]], + fill_value=999999) + >>> a.count() + 3 + + When the `axis` keyword is specified an array of appropriate size is + returned. + + >>> a.count(axis=0) + array([1, 1, 1]) + >>> a.count(axis=1) + array([3, 0]) + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + m = self._mask + # special case for matrices (we assume no other subclasses modify + # their dimensions) + if isinstance(self.data, np.matrix): + if m is nomask: + m = np.zeros(self.shape, dtype=np.bool_) + m = m.view(type(self.data)) + + if m is nomask: + # compare to _count_reduce_items in _methods.py + + if self.shape == (): + if axis not in (None, 0): + raise np.AxisError(axis=axis, ndim=self.ndim) + return 1 + elif axis is None: + if kwargs.get('keepdims', False): + return np.array(self.size, dtype=np.intp, ndmin=self.ndim) + return self.size + + axes = normalize_axis_tuple(axis, self.ndim) + items = 1 + for ax in axes: + items *= self.shape[ax] + + if kwargs.get('keepdims', False): + out_dims = list(self.shape) + for a in axes: + out_dims[a] = 1 + else: + out_dims = [d for n, d in enumerate(self.shape) + if n not in axes] + # make sure to return a 0-d array if axis is supplied + return np.full(out_dims, items, dtype=np.intp) + + # take care of the masked singleton + if self is masked: + return 0 + + return (~m).sum(axis=axis, dtype=np.intp, **kwargs) + + def ravel(self, order='C'): + """ + Returns a 1D version of self, as a view. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `a` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + MaskedArray + Output view is of shape ``(self.size,)`` (or + ``(np.ma.product(self.shape),)``). + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.ravel() + masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], + mask=[False, True, False, True, False, True, False, True, + False], + fill_value=999999) + + """ + r = ndarray.ravel(self._data, order=order).view(type(self)) + r._update_from(self) + if self._mask is not nomask: + r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) + else: + r._mask = nomask + return r + + + def reshape(self, *s, **kwargs): + """ + Give a new shape to the array without changing its data. + + Returns a masked array containing the same data, but with a new shape. + The result is a view on the original array; if this is not possible, a + ValueError is raised. + + Parameters + ---------- + shape : int or tuple of ints + The new shape should be compatible with the original shape. If an + integer is supplied, then the result will be a 1-D array of that + length. + order : {'C', 'F'}, optional + Determines whether the array data should be viewed as in C + (row-major) or FORTRAN (column-major) order. + + Returns + ------- + reshaped_array : array + A new view on the array. + + See Also + -------- + reshape : Equivalent function in the masked array module. + numpy.ndarray.reshape : Equivalent method on ndarray object. + numpy.reshape : Equivalent function in the NumPy module. + + Notes + ----- + The reshaping operation cannot guarantee that a copy will not be made, + to modify the shape in place, use ``a.shape = s`` + + Examples + -------- + >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) + >>> x + masked_array( + data=[[--, 2], + [3, --]], + mask=[[ True, False], + [False, True]], + fill_value=999999) + >>> x = x.reshape((4,1)) + >>> x + masked_array( + data=[[--], + [2], + [3], + [--]], + mask=[[ True], + [False], + [False], + [ True]], + fill_value=999999) + + """ + kwargs.update(order=kwargs.get('order', 'C')) + result = self._data.reshape(*s, **kwargs).view(type(self)) + result._update_from(self) + mask = self._mask + if mask is not nomask: + result._mask = mask.reshape(*s, **kwargs) + return result + + def resize(self, newshape, refcheck=True, order=False): + """ + .. warning:: + + This method does nothing, except raise a ValueError exception. A + masked array does not own its data and therefore cannot safely be + resized in place. Use the `numpy.ma.resize` function instead. + + This method is difficult to implement safely and may be deprecated in + future releases of NumPy. + + """ + # Note : the 'order' keyword looks broken, let's just drop it + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) + + def put(self, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + Sets self._data.flat[n] = values[n] for each n in indices. + If `values` is shorter than `indices` then it will repeat. + If `values` has some masked values, the initial mask is updated + in consequence, else the corresponding values are unmasked. + + Parameters + ---------- + indices : 1-D array_like + Target indices, interpreted as integers. + values : array_like + Values to place in self._data copy at target indices. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + 'raise' : raise an error. + 'wrap' : wrap around. + 'clip' : clip to the range. + + Notes + ----- + `values` can be a scalar or length 1 array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.put([0,4,8],[10,20,30]) + >>> x + masked_array( + data=[[10, --, 3], + [--, 20, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + >>> x.put(4,999) + >>> x + masked_array( + data=[[10, --, 3], + [--, 999, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + """ + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = narray(indices, copy=False) + values = narray(values, copy=False, subok=True) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + + self._data.put(indices, values, mode=mode) + + # short circuit if neither self nor values are masked + if self._mask is nomask and getmask(values) is nomask: + return + + m = getmaskarray(self) + + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, shrink=True) + self._mask = m + return + + def ids(self): + """ + Return the addresses of the data and mask areas. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) + >>> x.ids() + (166670640, 166659832) # may vary + + If the array has no mask, the address of `nomask` is returned. This address + is typically not close to the data in memory: + + >>> x = np.ma.array([1, 2, 3]) + >>> x.ids() + (166691080, 3083169284) # may vary + + """ + if self._mask is nomask: + return (self.ctypes.data, id(nomask)) + return (self.ctypes.data, self._mask.ctypes.data) + + def iscontiguous(self): + """ + Return a boolean indicating whether the data is contiguous. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3]) + >>> x.iscontiguous() + True + + `iscontiguous` returns one of the flags of the masked array: + + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : True + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + UPDATEIFCOPY : False + + """ + return self.flags['CONTIGUOUS'] + + def all(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if all elements evaluate to True. + + The output array is masked where all the values along the given axis + are masked: if the output would have been a scalar and that all the + values are masked, then the output is `masked`. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.ndarray.all : corresponding function for ndarrays + numpy.all : equivalent function + + Examples + -------- + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + return masked + return d + self.filled(True).all(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def any(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if any of the elements of `a` evaluate to True. + + Masked values are considered as False during computation. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.ndarray.any : corresponding function for ndarrays + numpy.any : equivalent function + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + d = masked + return d + self.filled(False).any(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def nonzero(self): + """ + Return the indices of unmasked elements that are not zero. + + Returns a tuple of arrays, one for each dimension, containing the + indices of the non-zero elements in that dimension. The corresponding + non-zero values can be obtained with:: + + a[a.nonzero()] + + To group the indices by element, rather than dimension, use + instead:: + + np.transpose(a.nonzero()) + + The result of this is always a 2d array, with a row for each non-zero + element. + + Parameters + ---------- + None + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + numpy.nonzero : + Function operating on ndarrays. + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + numpy.ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.array(np.eye(3)) + >>> x + masked_array( + data=[[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]], + mask=False, + fill_value=1e+20) + >>> x.nonzero() + (array([0, 1, 2]), array([0, 1, 2])) + + Masked elements are ignored. + + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[1.0, 0.0, 0.0], + [0.0, --, 0.0], + [0.0, 0.0, 1.0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1e+20) + >>> x.nonzero() + (array([0, 2]), array([0, 2])) + + Indices can also be grouped by element. + + >>> np.transpose(x.nonzero()) + array([[0, 0], + [2, 2]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, ma.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) + >>> a > 3 + masked_array( + data=[[False, False, False], + [ True, True, True], + [ True, True, True]], + mask=False, + fill_value=True) + >>> ma.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + The ``nonzero`` method of the condition array can also be called. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return narray(self.filled(0), copy=False).nonzero() + + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + (this docstring should be overwritten) + """ + #!!!: implement out + test! + m = self._mask + if m is nomask: + result = super().trace(offset=offset, axis1=axis1, axis2=axis2, + out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).filled(0).sum(axis=-1, out=out) + trace.__doc__ = ndarray.trace.__doc__ + + def dot(self, b, out=None, strict=False): + """ + a.dot(b, out=None) + + Masked dot product of two arrays. Note that `out` and `strict` are + located in different positions than in `ma.dot`. In order to + maintain compatibility with the functional version, it is + recommended that the optional arguments be treated as keyword only. + At some point that may be mandatory. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + b : masked_array_like + Inputs array. + out : masked_array, optional + Output argument. This must have the exact kind that would be + returned if it was not used. In particular, it must have the + right type, must be C-contiguous, and its dtype must be the + dtype that would be returned for `ma.dot(a,b)`. This is a + performance feature. Therefore, if these conditions are not + met, an exception is raised, instead of attempting to be + flexible. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) + for the computation. Default is False. Propagating the mask + means that if a masked value appears in a row or column, the + whole row or column is considered masked. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.ma.dot : equivalent function + + """ + return dot(self, b, out=out, strict=strict) + + def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the sum of the array elements over the given axis. + + Masked elements are set to 0 internally. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.ndarray.sum : corresponding function for ndarrays + numpy.sum : equivalent function + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.sum() + 25 + >>> x.sum(axis=1) + masked_array(data=[4, 5, 16], + mask=[False, False, False], + fill_value=999999) + >>> x.sum(axis=0) + masked_array(data=[8, 5, 12], + mask=[False, False, False], + fill_value=999999) + >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) + + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(0).sum(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + + def cumsum(self, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the array elements over the given axis. + + Masked values are set to 0 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumsum` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid :class:`ma.MaskedArray` ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumsum : corresponding function for ndarrays + numpy.cumsum : equivalent function + + Examples + -------- + >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) + >>> marr.cumsum() + masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], + mask=[False, False, False, True, True, True, False, False, + False, False], + fill_value=999999) + + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self.mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the product of the array elements over the given axis. + + Masked elements are set to 1 internally for computation. + + Refer to `numpy.prod` for full documentation. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is raised + on overflow. + + See Also + -------- + numpy.ndarray.prod : corresponding function for ndarrays + numpy.prod : equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(1).prod(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + product = prod + + def cumprod(self, axis=None, dtype=None, out=None): + """ + Return the cumulative product of the array elements over the given axis. + + Masked values are set to 1 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumprod` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid MaskedArray ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumprod : corresponding function for ndarrays + numpy.cumprod : equivalent function + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Returns the average of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.ndarray.mean : corresponding function for ndarrays + numpy.mean : Equivalent function + numpy.ma.average : Weighted average. + + Examples + -------- + >>> a = np.ma.array([1,2,3], mask=[False, False, True]) + >>> a + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.mean() + 1.5 + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + if self._mask is nomask: + result = super().mean(axis=axis, dtype=dtype, **kwargs)[()] + else: + dsum = self.sum(axis=axis, dtype=dtype, **kwargs) + cnt = self.count(axis=axis, **kwargs) + if cnt.shape == () and (cnt == 0): + result = masked + else: + result = dsum * 1. / cnt + if out is not None: + out.flat = result + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = getmask(result) + return out + return result + + def anom(self, axis=None, dtype=None): + """ + Compute the anomalies (deviations from the arithmetic mean) + along the given axis. + + Returns an array of anomalies, with the same shape as the input and + where the arithmetic mean is computed along the given axis. + + Parameters + ---------- + axis : int, optional + Axis over which the anomalies are taken. + The default is to use the mean of the flattened array as reference. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default is float32; for arrays of float types it is the same as + the array type. + + See Also + -------- + mean : Compute the mean of the array. + + Examples + -------- + >>> a = np.ma.array([1,2,3]) + >>> a.anom() + masked_array(data=[-1., 0., 1.], + mask=False, + fill_value=1e+20) + + """ + m = self.mean(axis, dtype) + if m is masked: + return m + + if not axis: + return self - m + else: + return self - expand_dims(m, axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue): + """ + Returns the variance of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.ndarray.var : corresponding function for ndarrays + numpy.var : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + # Easy case: nomask, business as usual + if self._mask is nomask: + ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs)[()] + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(nomask) + return out + return ret + + # Some data are masked, yay! + cnt = self.count(axis=axis, **kwargs) - ddof + danom = self - self.mean(axis, dtype, keepdims=True) + if iscomplexobj(self): + danom = umath.absolute(danom) ** 2 + else: + danom *= danom + dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) + # Apply the mask if it's not a scalar + if dvar.ndim: + dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) + dvar._update_from(self) + elif getmask(dvar): + # Make sure that masked is returned when the scalar is masked. + dvar = masked + if out is not None: + if isinstance(out, MaskedArray): + out.flat = 0 + out.__setmask__(True) + elif out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or "\ + "more location." + raise MaskError(errmsg) + else: + out.flat = np.nan + return out + # In case with have an explicit output + if out is not None: + # Set the data + out.flat = dvar + # Set the mask if needed + if isinstance(out, MaskedArray): + out.__setmask__(dvar.mask) + return out + return dvar + var.__doc__ = np.var.__doc__ + + def std(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue): + """ + Returns the standard deviation of the array elements along given axis. + + Masked entries are ignored. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.ndarray.std : corresponding function for ndarrays + numpy.std : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + dvar = self.var(axis, dtype, out, ddof, **kwargs) + if dvar is not masked: + if out is not None: + np.power(out, 0.5, out=out, casting='unsafe') + return out + dvar = sqrt(dvar) + return dvar + + def round(self, decimals=0, out=None): + """ + Return each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.ndarray.round : corresponding function for ndarrays + numpy.around : equivalent function + """ + result = self._data.round(decimals=decimals, out=out).view(type(self)) + if result.ndim > 0: + result._mask = self._mask + result._update_from(self) + elif self._mask: + # Return masked when the scalar is masked + result = masked + # No explicit output: we're done + if out is None: + return result + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + + def argsort(self, axis=np._NoValue, kind=None, order=None, + endwith=True, fill_value=None): + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + `fill_value`. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. If None, the default, the flattened array + is used. + + .. versionchanged:: 1.13.0 + Previously, the default was documented to be -1, but that was + in error. At some future date, the default will change to -1, as + originally intended. + Until then, the axis should be given explicitly when + ``arr.ndim > 1``, to avoid a FutureWarning. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + ma.MaskedArray.sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + numpy.ndarray.sort : Inplace sort. + + Notes + ----- + See `sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.ma.array([3,2,1], mask=[False, False, True]) + >>> a + masked_array(data=[3, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.argsort() + array([1, 0, 2]) + + """ + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(self) + + if fill_value is None: + if endwith: + # nan > inf + if np.issubdtype(self.dtype, np.floating): + fill_value = np.nan + else: + fill_value = minimum_fill_value(self) + else: + fill_value = maximum_fill_value(self) + + filled = self.filled(fill_value) + return filled.argsort(axis=axis, kind=kind, order=order) + + def argmin(self, axis=None, fill_value=None, out=None): + """ + Return array of indices to the minimum values along the given axis. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + minimum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + ndarray or scalar + If multi-dimension input, returns a new ndarray of indices to the + minimum values along the given axis. Otherwise, returns a scalar + of index to the minimum values along the given axis. + + Examples + -------- + >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) + >>> x.shape = (2,2) + >>> x + masked_array( + data=[[--, --], + [2, 3]], + mask=[[ True, True], + [False, False]], + fill_value=999999) + >>> x.argmin(axis=0, fill_value=-1) + array([0, 0]) + >>> x.argmin(axis=0, fill_value=9) + array([1, 1]) + + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + return d.argmin(axis, out=out) + + def argmax(self, axis=None, fill_value=None, out=None): + """ + Returns array of indices of the maximum values along the given axis. + Masked values are treated as if they had the value fill_value. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + maximum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + index_array : {integer_array} + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + >>> a.argmax() + 5 + >>> a.argmax(0) + array([1, 1, 1]) + >>> a.argmax(1) + array([2, 2]) + + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + return d.argmax(axis, out=out) + + def sort(self, axis=-1, kind=None, order=None, + endwith=True, fill_value=None): + """ + Sort the array, in-place + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values sorting at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + numpy.ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Default + >>> a.sort() + >>> a + masked_array(data=[1, 3, 5, --, --], + mask=[False, False, False, True, True], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Put missing values in the front + >>> a.sort(endwith=False) + >>> a + masked_array(data=[--, --, 1, 3, 5], + mask=[ True, True, False, False, False], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # fill_value takes over endwith + >>> a.sort(endwith=False, fill_value=3) + >>> a + masked_array(data=[1, --, --, 3, 5], + mask=[False, True, True, False, False], + fill_value=999999) + + """ + if self._mask is nomask: + ndarray.sort(self, axis=axis, kind=kind, order=order) + return + + if self is masked: + return + + sidx = self.argsort(axis=axis, kind=kind, order=order, + fill_value=fill_value, endwith=endwith) + + self[...] = np.take_along_axis(self, sidx, axis=axis) + + def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the minimum along a given axis. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + out : array_like, optional + Alternative output array in which to place the result. Must be of + the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of `minimum_fill_value`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amin : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.minimum_fill_value + Returns the minimum filling value for a given datatype. + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = minimum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).min( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).min(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + # unique to masked arrays + def mini(self, axis=None): + """ + Return the array minimum along the specified axis. + + .. deprecated:: 1.13.0 + This function is identical to both: + + * ``self.min(keepdims=True, axis=axis).squeeze(axis=axis)`` + * ``np.ma.minimum.reduce(self, axis=axis)`` + + Typically though, ``self.min(axis=axis)`` is sufficient. + + Parameters + ---------- + axis : int, optional + The axis along which to find the minima. Default is None, in which case + the minimum value in the whole array is returned. + + Returns + ------- + min : scalar or MaskedArray + If `axis` is None, the result is a scalar. Otherwise, if `axis` is + given and the array is at least 2-D, the result is a masked array with + dimension one smaller than the array on which `mini` is called. + + Examples + -------- + >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) + >>> x + masked_array( + data=[[0, --], + [2, 3], + [4, --]], + mask=[[False, True], + [False, False], + [False, True]], + fill_value=999999) + >>> x.mini() + masked_array(data=0, + mask=False, + fill_value=999999) + >>> x.mini(axis=0) + masked_array(data=[0, 3], + mask=[False, False], + fill_value=999999) + >>> x.mini(axis=1) + masked_array(data=[0, 2, 4], + mask=[False, False, False], + fill_value=999999) + + There is a small difference between `mini` and `min`: + + >>> x[:,1].mini(axis=0) + masked_array(data=3, + mask=False, + fill_value=999999) + >>> x[:,1].min(axis=0) + 3 + """ + + # 2016-04-13, 1.13.0, gh-8764 + warnings.warn( + "`mini` is deprecated; use the `min` method or " + "`np.ma.minimum.reduce instead.", + DeprecationWarning, stacklevel=2) + return minimum.reduce(self, axis) + + def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the maximum along a given axis. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + out : array_like, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of maximum_fill_value(). + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amax : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.maximum_fill_value + Returns the maximum filling value for a given datatype. + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = maximum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).max( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).max(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): + """ + Return (maximum - minimum) along the given dimension + (i.e. peak-to-peak value). + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to find the peaks. If None (default) the + flattened array is used. + out : {None, array_like}, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + fill_value : scalar or None, optional + Value used to fill in the masked values. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + ptp : ndarray. + A new array holding the result, unless ``out`` was + specified, in which case a reference to ``out`` is returned. + + Examples + -------- + >>> x = np.ma.MaskedArray([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> x.ptp(axis=1) + masked_array(data=[8, 6], + mask=False, + fill_value=999999) + + >>> x.ptp(axis=0) + masked_array(data=[2, 0, 5, 2], + mask=False, + fill_value=999999) + + >>> x.ptp() + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.ma.MaskedArray([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> y.ptp(axis=1) + masked_array(data=[ 126, 127, -128, -127], + mask=False, + fill_value=999999, + dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> y.ptp(axis=1).view(np.uint8) + masked_array(data=[126, 127, 128, 129], + mask=False, + fill_value=999999, + dtype=uint8) + """ + if out is None: + result = self.max(axis=axis, fill_value=fill_value, + keepdims=keepdims) + result -= self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + return result + out.flat = self.max(axis=axis, out=out, fill_value=fill_value, + keepdims=keepdims) + min_value = self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + np.subtract(out, min_value, out=out, casting='unsafe') + return out + + def partition(self, *args, **kwargs): + warnings.warn("Warning: 'partition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().partition(*args, **kwargs) + + def argpartition(self, *args, **kwargs): + warnings.warn("Warning: 'argpartition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().argpartition(*args, **kwargs) + + def take(self, indices, axis=None, out=None, mode='raise'): + """ + """ + (_data, _mask) = (self._data, self._mask) + cls = type(self) + # Make sure the indices are not masked + maskindices = getmask(indices) + if maskindices is not nomask: + indices = indices.filled(0) + # Get the data, promoting scalars to 0d arrays with [...] so that + # .view works correctly + if out is None: + out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) + else: + np.take(_data, indices, axis=axis, mode=mode, out=out) + # Get the mask + if isinstance(out, MaskedArray): + if _mask is nomask: + outmask = maskindices + else: + outmask = _mask.take(indices, axis=axis, mode=mode) + outmask |= maskindices + out.__setmask__(outmask) + # demote 0d arrays back to scalars, for consistency with ndarray.take + return out[()] + + # Array methods + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + flatten = _arraymethod('flatten') + repeat = _arraymethod('repeat') + squeeze = _arraymethod('squeeze') + swapaxes = _arraymethod('swapaxes') + T = property(fget=lambda self: self.transpose()) + transpose = _arraymethod('transpose') + + def tolist(self, fill_value=None): + """ + Return the data portion of the masked array as a hierarchical Python list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to `fill_value`. If `fill_value` is None, + the corresponding entries in the output list will be ``None``. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries. Default is None. + + Returns + ------- + result : list + The Python list representation of the masked array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) + >>> x.tolist() + [[1, None, 3], [None, 5, None], [7, None, 9]] + >>> x.tolist(-999) + [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] + + """ + _mask = self._mask + # No mask ? Just return .data.tolist ? + if _mask is nomask: + return self._data.tolist() + # Explicit fill_value: fill the array and get the list + if fill_value is not None: + return self.filled(fill_value).tolist() + # Structured array. + names = self.dtype.names + if names: + result = self._data.astype([(_, object) for _ in names]) + for n in names: + result[n][_mask[n]] = None + return result.tolist() + # Standard arrays. + if _mask is nomask: + return [None] + # Set temps to save time when dealing w/ marrays. + inishape = self.shape + result = np.array(self._data.ravel(), dtype=object) + result[_mask.ravel()] = None + result.shape = inishape + return result.tolist() + + def tostring(self, fill_value=None, order='C'): + r""" + A compatibility alias for `tobytes`, with exactly the same behavior. + + Despite its name, it returns `bytes` not `str`\ s. + + .. deprecated:: 1.19.0 + """ + # 2020-03-30, Numpy 1.19.0 + warnings.warn( + "tostring() is deprecated. Use tobytes() instead.", + DeprecationWarning, stacklevel=2) + + return self.tobytes(fill_value, order=order) + + def tobytes(self, fill_value=None, order='C'): + """ + Return the array data as a string containing the raw bytes in the array. + + The array is filled with a fill value before the string conversion. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fill_value : scalar, optional + Value used to fill in the masked values. Default is None, in which + case `MaskedArray.fill_value` is used. + order : {'C','F','A'}, optional + Order of the data item in the copy. Default is 'C'. + + - 'C' -- C order (row major). + - 'F' -- Fortran order (column major). + - 'A' -- Any, current order of array. + - None -- Same as 'A'. + + See Also + -------- + numpy.ndarray.tobytes + tolist, tofile + + Notes + ----- + As for `ndarray.tobytes`, information about the shape, dtype, etc., + but also about `fill_value`, will be lost. + + Examples + -------- + >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.tobytes() + b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + + """ + return self.filled(fill_value).tobytes(order=order) + + def tofile(self, fid, sep="", format="%s"): + """ + Save a masked array to a file in binary format. + + .. warning:: + This function is not implemented yet. + + Raises + ------ + NotImplementedError + When `tofile` is called. + + """ + raise NotImplementedError("MaskedArray.tofile() not implemented yet.") + + def toflex(self): + """ + Transforms a masked array into a flexible-type array. + + The flexible type array that is returned will have two fields: + + * the ``_data`` field stores the ``_data`` part of the array. + * the ``_mask`` field stores the ``_mask`` part of the array. + + Parameters + ---------- + None + + Returns + ------- + record : ndarray + A new flexible-type `ndarray` with two fields: the first element + containing a value, the second element containing the corresponding + mask boolean. The returned record shape matches self.shape. + + Notes + ----- + A side-effect of transforming a masked array into a flexible `ndarray` is + that meta information (``fill_value``, ...) will be lost. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.toflex() + array([[(1, False), (2, True), (3, False)], + [(4, True), (5, False), (6, True)], + [(7, False), (8, True), (9, False)]], + dtype=[('_data', 'i2", (2,))]) + # x = A[0]; y = x["A"]; then y.mask["A"].size==2 + # and we can not say masked/unmasked. + # The result is no longer mvoid! + # See also issue #6724. + return masked_array( + data=self._data[indx], mask=m[indx], + fill_value=self._fill_value[indx], + hard_mask=self._hardmask) + if m is not nomask and m[indx]: + return masked + return self._data[indx] + + def __setitem__(self, indx, value): + self._data[indx] = value + if self._hardmask: + self._mask[indx] |= getattr(value, "_mask", False) + else: + self._mask[indx] = getattr(value, "_mask", False) + + def __str__(self): + m = self._mask + if m is nomask: + return str(self._data) + + rdtype = _replace_dtype_fields(self._data.dtype, "O") + data_arr = super()._data + res = data_arr.astype(rdtype) + _recursive_printoption(res, self._mask, masked_print_option) + return str(res) + + __repr__ = __str__ + + def __iter__(self): + "Defines an iterator for mvoid" + (_data, _mask) = (self._data, self._mask) + if _mask is nomask: + yield from _data + else: + for (d, m) in zip(_data, _mask): + if m: + yield masked + else: + yield d + + def __len__(self): + return self._data.__len__() + + def filled(self, fill_value=None): + """ + Return a copy with masked fields filled with a given value. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or + non-scalar. If latter is the case, the filled array should + be broadcastable over input array. Default is None, in + which case the `fill_value` attribute is used instead. + + Returns + ------- + filled_void + A `np.void` object + + See Also + -------- + MaskedArray.filled + + """ + return asarray(self).filled(fill_value)[()] + + def tolist(self): + """ + Transforms the mvoid object into a tuple. + + Masked fields are replaced by None. + + Returns + ------- + returned_tuple + Tuple of fields + """ + _mask = self._mask + if _mask is nomask: + return self._data.tolist() + result = [] + for (d, m) in zip(self._data, self._mask): + if m: + result.append(None) + else: + # .item() makes sure we return a standard Python object + result.append(d.item()) + return tuple(result) + + +############################################################################## +# Shortcuts # +############################################################################## + + +def isMaskedArray(x): + """ + Test whether input is an instance of MaskedArray. + + This function returns True if `x` is an instance of MaskedArray + and returns False otherwise. Any object is accepted as input. + + Parameters + ---------- + x : object + Object to test. + + Returns + ------- + result : bool + True if `x` is a MaskedArray. + + See Also + -------- + isMA : Alias to isMaskedArray. + isarray : Alias to isMaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.eye(3, 3) + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> m = ma.masked_values(a, 0) + >>> m + masked_array( + data=[[1.0, --, --], + [--, 1.0, --], + [--, --, 1.0]], + mask=[[False, True, True], + [ True, False, True], + [ True, True, False]], + fill_value=0.0) + >>> ma.isMaskedArray(a) + False + >>> ma.isMaskedArray(m) + True + >>> ma.isMaskedArray([0, 1, 2]) + False + + """ + return isinstance(x, MaskedArray) + + +isarray = isMaskedArray +isMA = isMaskedArray # backward compatibility + + +class MaskedConstant(MaskedArray): + # the lone np.ma.masked instance + __singleton = None + + @classmethod + def __has_singleton(cls): + # second case ensures `cls.__singleton` is not just a view on the + # superclass singleton + return cls.__singleton is not None and type(cls.__singleton) is cls + + def __new__(cls): + if not cls.__has_singleton(): + # We define the masked singleton as a float for higher precedence. + # Note that it can be tricky sometimes w/ type comparison + data = np.array(0.) + mask = np.array(True) + + # prevent any modifications + data.flags.writeable = False + mask.flags.writeable = False + + # don't fall back on MaskedArray.__new__(MaskedConstant), since + # that might confuse it - this way, the construction is entirely + # within our control + cls.__singleton = MaskedArray(data, mask=mask).view(cls) + + return cls.__singleton + + def __array_finalize__(self, obj): + if not self.__has_singleton(): + # this handles the `.view` in __new__, which we want to copy across + # properties normally + return super().__array_finalize__(obj) + elif self is self.__singleton: + # not clear how this can happen, play it safe + pass + else: + # everywhere else, we want to downcast to MaskedArray, to prevent a + # duplicate maskedconstant. + self.__class__ = MaskedArray + MaskedArray.__array_finalize__(self, obj) + + def __array_prepare__(self, obj, context=None): + return self.view(MaskedArray).__array_prepare__(obj, context) + + def __array_wrap__(self, obj, context=None): + return self.view(MaskedArray).__array_wrap__(obj, context) + + def __str__(self): + return str(masked_print_option._display) + + def __repr__(self): + if self is MaskedConstant.__singleton: + return 'masked' + else: + # it's a subclass, or something is wrong, make it obvious + return object.__repr__(self) + + def __format__(self, format_spec): + # Replace ndarray.__format__ with the default, which supports no format characters. + # Supporting format characters is unwise here, because we do not know what type + # the user was expecting - better to not guess. + try: + return object.__format__(self, format_spec) + except TypeError: + # 2020-03-23, NumPy 1.19.0 + warnings.warn( + "Format strings passed to MaskedConstant are ignored, but in future may " + "error or produce different behavior", + FutureWarning, stacklevel=2 + ) + return object.__format__(self, "") + + def __reduce__(self): + """Override of MaskedArray's __reduce__. + """ + return (self.__class__, ()) + + # inplace operations have no effect. We have to override them to avoid + # trying to modify the readonly data and mask arrays + def __iop__(self, other): + return self + __iadd__ = \ + __isub__ = \ + __imul__ = \ + __ifloordiv__ = \ + __itruediv__ = \ + __ipow__ = \ + __iop__ + del __iop__ # don't leave this around + + def copy(self, *args, **kwargs): + """ Copy is a no-op on the maskedconstant, as it is a scalar """ + # maskedconstant is a scalar, so copy doesn't need to copy. There's + # precedent for this with `np.bool_` scalars. + return self + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + def __setattr__(self, attr, value): + if not self.__has_singleton(): + # allow the singleton to be initialized + return super().__setattr__(attr, value) + elif self is self.__singleton: + raise AttributeError( + f"attributes of {self!r} are not writeable") + else: + # duplicate instance - we can end up here from __array_finalize__, + # where we set the __class__ attribute + return super().__setattr__(attr, value) + + +masked = masked_singleton = MaskedConstant() +masked_array = MaskedArray + + +def array(data, dtype=None, copy=False, order=None, + mask=nomask, fill_value=None, keep_mask=True, + hard_mask=False, shrink=True, subok=True, ndmin=0): + """ + Shortcut to MaskedArray. + + The options are in a different order for convenience and backwards + compatibility. + + """ + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, + subok=subok, keep_mask=keep_mask, + hard_mask=hard_mask, fill_value=fill_value, + ndmin=ndmin, shrink=shrink, order=order) +array.__doc__ = masked_array.__doc__ + + +def is_masked(x): + """ + Determine whether input has masked values. + + Accepts any object as input, but always returns False unless the + input is a MaskedArray containing masked values. + + Parameters + ---------- + x : array_like + Array to check for masked values. + + Returns + ------- + result : bool + True if `x` is a MaskedArray with masked values, False otherwise. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> x + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_masked(x) + True + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) + >>> x + masked_array(data=[0, 1, 0, 2, 3], + mask=False, + fill_value=42) + >>> ma.is_masked(x) + False + + Always returns False if `x` isn't a MaskedArray. + + >>> x = [False, True, False] + >>> ma.is_masked(x) + False + >>> x = 'a string' + >>> ma.is_masked(x) + False + + """ + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +############################################################################## +# Extrema functions # +############################################################################## + + +class _extrema_operation(_MaskedUFunc): + """ + Generic class for maximum/minimum functions. + + .. note:: + This is the base class for `_maximum_operation` and + `_minimum_operation`. + + """ + def __init__(self, ufunc, compare, fill_value): + super().__init__(ufunc) + self.compare = compare + self.fill_value_func = fill_value + + def __call__(self, a, b=None): + "Executes the call behavior." + if b is None: + # 2016-04-13, 1.13.0 + warnings.warn( + f"Single-argument form of np.ma.{self.__name__} is deprecated. Use " + f"np.ma.{self.__name__}.reduce instead.", + DeprecationWarning, stacklevel=2) + return self.reduce(a) + return where(self.compare(a, b), a, b) + + def reduce(self, target, axis=np._NoValue): + "Reduce target along the given axis." + target = narray(target, copy=False, subok=True) + m = getmask(target) + + if axis is np._NoValue and target.ndim > 1: + # 2017-05-06, Numpy 1.13.0: warn on axis default + warnings.warn( + f"In the future the default for ma.{self.__name__}.reduce will be axis=0, " + f"not the current None, to match np.{self.__name__}.reduce. " + "Explicitly pass 0 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=2) + axis = None + + if axis is not np._NoValue: + kwargs = dict(axis=axis) + else: + kwargs = dict() + + if m is nomask: + t = self.f.reduce(target, **kwargs) + else: + target = target.filled( + self.fill_value_func(target)).view(type(target)) + t = self.f.reduce(target, **kwargs) + m = umath.logical_and.reduce(m, **kwargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + + def outer(self, a, b): + "Return the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.f.outer(filled(a), filled(b)) + if not isinstance(result, MaskedArray): + result = result.view(MaskedArray) + result._mask = m + return result + +def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a min method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).min(axis=axis, fill_value=fill_value, + out=out, **kwargs) +min.__doc__ = MaskedArray.min.__doc__ + +def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a max method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).max(axis=axis, fill_value=fill_value, + out=out, **kwargs) +max.__doc__ = MaskedArray.max.__doc__ + + +def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + try: + return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a ptp method or if the method doesn't accept + # a fill_value argument + return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, + out=out, **kwargs) +ptp.__doc__ = MaskedArray.ptp.__doc__ + + +############################################################################## +# Definition of functions from the corresponding methods # +############################################################################## + + +class _frommethod: + """ + Define functions from existing MaskedArray methods. + + Parameters + ---------- + methodname : str + Name of the method to transform. + + """ + + def __init__(self, methodname, reversed=False): + self.__name__ = methodname + self.__doc__ = self.getdoc() + self.reversed = reversed + + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + meth = getattr(MaskedArray, self.__name__, None) or\ + getattr(np, self.__name__, None) + signature = self.__name__ + get_object_signature(meth) + if meth is not None: + doc = """ %s\n%s""" % ( + signature, getattr(meth, '__doc__', None)) + return doc + + def __call__(self, a, *args, **params): + if self.reversed: + args = list(args) + a, args[0] = args[0], a + + marr = asanyarray(a) + method_name = self.__name__ + method = getattr(type(marr), method_name, None) + if method is None: + # use the corresponding np function + method = getattr(np, method_name) + + return method(marr, *args, **params) + + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +compress = _frommethod('compress', reversed=True) +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') +ids = _frommethod('ids') +maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) +mean = _frommethod('mean') +minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') +product = _frommethod('prod') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +#take = _frommethod('take') +trace = _frommethod('trace') +var = _frommethod('var') + +count = _frommethod('count') + +def take(a, indices, axis=None, out=None, mode='raise'): + """ + """ + a = masked_array(a) + return a.take(indices, axis=axis, out=out, mode=mode) + + +def power(a, b, third=None): + """ + Returns element-wise base array raised to power from second array. + + This is the masked array version of `numpy.power`. For details see + `numpy.power`. + + See Also + -------- + numpy.power + + Notes + ----- + The *out* argument to `numpy.power` is not supported, `third` has to be + None. + + """ + if third is not None: + raise MaskError("3-argument power not supported.") + # Get the masks + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + # Get the rawdata + fa = getdata(a) + fb = getdata(b) + # Get the type of the result (so that we preserve subclasses) + if isinstance(a, MaskedArray): + basetype = type(a) + else: + basetype = MaskedArray + # Get the result and view it as a (subclass of) MaskedArray + with np.errstate(divide='ignore', invalid='ignore'): + result = np.where(m, fa, umath.power(fa, fb)).view(basetype) + result._update_from(a) + # Find where we're in trouble w/ NaNs and Infs + invalid = np.logical_not(np.isfinite(result.view(ndarray))) + # Add the initial mask + if m is not nomask: + if not result.ndim: + return masked + result._mask = np.logical_or(m, invalid) + # Fix the invalid parts + if invalid.any(): + if not result.ndim: + return masked + elif result._mask is nomask: + result._mask = invalid + result._data[invalid] = result.fill_value + return result + +argmin = _frommethod('argmin') +argmax = _frommethod('argmax') + +def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None): + "Function version of the eponymous method." + a = np.asanyarray(a) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(a) + + if isinstance(a, MaskedArray): + return a.argsort(axis=axis, kind=kind, order=order, + endwith=endwith, fill_value=fill_value) + else: + return a.argsort(axis=axis, kind=kind, order=order) +argsort.__doc__ = MaskedArray.argsort.__doc__ + +def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None): + """ + Return a sorted copy of the masked array. + + Equivalent to creating a copy of the array + and applying the MaskedArray ``sort()`` method. + + Refer to ``MaskedArray.sort`` for the full documentation + + See Also + -------- + MaskedArray.sort : equivalent method + """ + a = np.array(a, copy=True, subok=True) + if axis is None: + a = a.flatten() + axis = 0 + + if isinstance(a, MaskedArray): + a.sort(axis=axis, kind=kind, order=order, + endwith=endwith, fill_value=fill_value) + else: + a.sort(axis=axis, kind=kind, order=order) + return a + + +def compressed(x): + """ + Return all the non-masked data as a 1-D array. + + This function is equivalent to calling the "compressed" method of a + `ma.MaskedArray`, see `ma.MaskedArray.compressed` for details. + + See Also + -------- + ma.MaskedArray.compressed : Equivalent method. + + """ + return asanyarray(x).compressed() + + +def concatenate(arrays, axis=0): + """ + Concatenate a sequence of arrays along the given axis. + + Parameters + ---------- + arrays : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. Default is 0. + + Returns + ------- + result : MaskedArray + The concatenated array with any masked entries preserved. + + See Also + -------- + numpy.concatenate : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(3) + >>> a[1] = ma.masked + >>> b = ma.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + masked_array(data=[2, 3, 4], + mask=False, + fill_value=999999) + >>> ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + d = np.concatenate([getdata(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + # Check whether one of the arrays has a non-empty mask. + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + # OK, so we have to concatenate the masks + dm = np.concatenate([getmaskarray(a) for a in arrays], axis) + dm = dm.reshape(d.shape) + + # If we decide to keep a '_shrinkmask' option, we want to check that + # all of them are True, and then check for dm.any() + data._mask = _shrink_mask(dm) + return data + + +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + This function is the equivalent of `numpy.diag` that takes masked + values into account, see `numpy.diag` for details. + + See Also + -------- + numpy.diag : Equivalent function for ndarrays. + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + +def left_shift(a, n): + """ + Shift the bits of an integer to the left. + + This is the masked array version of `numpy.left_shift`, for details + see that function. + + See Also + -------- + numpy.left_shift + + """ + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def right_shift(a, n): + """ + Shift the bits of an integer to the right. + + This is the masked array version of `numpy.right_shift`, for details + see that function. + + See Also + -------- + numpy.right_shift + + """ + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def put(a, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + This function is equivalent to `MaskedArray.put`, see that method + for details. + + See Also + -------- + MaskedArray.put + + """ + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return narray(a, copy=False).put(indices, values, mode=mode) + + +def putmask(a, mask, values): # , mode='raise'): + """ + Changes elements of an array based on conditional and input values. + + This is the masked array version of `numpy.putmask`, for details see + `numpy.putmask`. + + See Also + -------- + numpy.putmask + + Notes + ----- + Using a masked array as `values` will **not** transform a `ndarray` into + a `MaskedArray`. + + """ + # We can't use 'frommethod', the order of arguments is different + if not isinstance(a, MaskedArray): + a = a.view(MaskedArray) + (valdata, valmask) = (getdata(values), getmask(values)) + if getmask(a) is nomask: + if valmask is not nomask: + a._sharedmask = True + a._mask = make_mask_none(a.shape, a.dtype) + np.copyto(a._mask, valmask, where=mask) + elif a._hardmask: + if valmask is not nomask: + m = a._mask.copy() + np.copyto(m, valmask, where=mask) + a.mask |= m + else: + if valmask is nomask: + valmask = getmaskarray(values) + np.copyto(a._mask, valmask, where=mask) + np.copyto(a._data, valdata, where=mask) + return + + +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + This function is exactly equivalent to `numpy.transpose`. + + See Also + -------- + numpy.transpose : Equivalent function in top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.arange(4).reshape((2,2)) + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[0, 1], + [2, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + + >>> ma.transpose(x) + masked_array( + data=[[0, 2], + [1, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + """ + # We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return narray(a, copy=False).transpose(axes).view(MaskedArray) + + +def reshape(a, new_shape, order='C'): + """ + Returns an array containing the same data with a new shape. + + Refer to `MaskedArray.reshape` for full documentation. + + See Also + -------- + MaskedArray.reshape : equivalent function + + """ + # We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape, order=order) + except AttributeError: + _tmp = narray(a, copy=False).reshape(new_shape, order=order) + return _tmp.view(MaskedArray) + + +def resize(x, new_shape): + """ + Return a new masked array with the specified size and shape. + + This is the masked equivalent of the `numpy.resize` function. The new + array is filled with repeated copies of `x` (in the order that the + data are stored in memory). If `x` is masked, the new array will be + masked, and the new mask will be a repetition of the old one. + + See Also + -------- + numpy.resize : Equivalent function in the top level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.array([[1, 2] ,[3, 4]]) + >>> a[0, 1] = ma.masked + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + >>> np.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, --, 3], + [4, 1, --], + [3, 4, 1]], + mask=[[False, True, False], + [False, False, True], + [False, False, False]], + fill_value=999999) + + A MaskedArray is always returned, regardless of the input type. + + >>> a = np.array([[1, 2] ,[3, 4]]) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = np.resize(m, new_shape) + result = np.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +def ndim(obj): + """ + maskedarray version of the numpy function. + + """ + return np.ndim(getdata(obj)) + +ndim.__doc__ = np.ndim.__doc__ + + +def shape(obj): + "maskedarray version of the numpy function." + return np.shape(getdata(obj)) +shape.__doc__ = np.shape.__doc__ + + +def size(obj, axis=None): + "maskedarray version of the numpy function." + return np.size(getdata(obj), axis) +size.__doc__ = np.size.__doc__ + + +############################################################################## +# Extra functions # +############################################################################## + + +def where(condition, x=_NoValue, y=_NoValue): + """ + Return a masked array with elements from `x` or `y`, depending on condition. + + .. note:: + When only `condition` is provided, this function is identical to + `nonzero`. The rest of this documentation covers only the case where + all three arguments are provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like, optional + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : MaskedArray + An masked array with `masked` elements where the condition is masked, + elements from `x` where `condition` is True, and elements from `y` + elsewhere. + + See Also + -------- + numpy.where : Equivalent function in the top-level NumPy module. + nonzero : The function that is called when x and y are omitted + + Examples + -------- + >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], + ... [1, 0, 1], + ... [0, 1, 0]]) + >>> x + masked_array( + data=[[0.0, --, 2.0], + [--, 4.0, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + >>> np.ma.where(x > 5, x, -3.1416) + masked_array( + data=[[-3.1416, --, -3.1416], + [--, -3.1416, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + + """ + + # handle the single-argument case + missing = (x is _NoValue, y is _NoValue).count(True) + if missing == 1: + raise ValueError("Must provide both 'x' and 'y' or neither.") + if missing == 2: + return nonzero(condition) + + # we only care if the condition is true - false or masked pick y + cf = filled(condition, False) + xd = getdata(x) + yd = getdata(y) + + # we need the full arrays here for correct final dimensions + cm = getmaskarray(condition) + xm = getmaskarray(x) + ym = getmaskarray(y) + + # deal with the fact that masked.dtype == float64, but we don't actually + # want to treat it as that. + if x is masked and y is not masked: + xd = np.zeros((), dtype=yd.dtype) + xm = np.ones((), dtype=ym.dtype) + elif y is masked and x is not masked: + yd = np.zeros((), dtype=xd.dtype) + ym = np.ones((), dtype=xm.dtype) + + data = np.where(cf, xd, yd) + mask = np.where(cf, xm, ym) + mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) + + # collapse the mask, for backwards compatibility + mask = _shrink_mask(mask) + + return masked_array(data, mask=mask) + + +def choose(indices, choices, out=None, mode='raise'): + """ + Use an index array to construct a new array from a list of choices. + + Given an array of integers and a list of n choice arrays, this method + will create a new array that merges each of the choice arrays. Where a + value in `index` is i, the new array will have the value that choices[i] + contains in the same place. + + Parameters + ---------- + indices : ndarray of ints + This array must contain integers in ``[0, n-1]``, where n is the + number of choices. + choices : sequence of arrays + Choice arrays. The index array and all of the choices should be + broadcastable to the same shape. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and `dtype`. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' : raise an error + * 'wrap' : wrap around + * 'clip' : clip to the range + + Returns + ------- + merged_array : array + + See Also + -------- + choose : equivalent function + + Examples + -------- + >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) + >>> a = np.array([2, 1, 0]) + >>> np.ma.choose(a, choice) + masked_array(data=[3, 2, 1], + mask=False, + fill_value=999999) + + """ + def fmask(x): + "Returns the filled array, or True if masked." + if x is masked: + return True + return filled(x) + + def nmask(x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return True + return getmask(x) + # Get the indices. + c = filled(indices, 0) + # Get the masks. + masks = [nmask(x) for x in choices] + data = [fmask(x) for x in choices] + # Construct the mask + outputmask = np.choose(c, masks, mode=mode) + outputmask = make_mask(mask_or(outputmask, getmask(indices)), + copy=False, shrink=True) + # Get the choices. + d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(outputmask) + return out + d.__setmask__(outputmask) + return d + + +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + """ + if out is None: + return np.round_(a, decimals, out) + else: + np.round_(getdata(a), decimals, out) + if hasattr(out, '_mask'): + out._mask = getmask(a) + return out +round = round_ + + +# Needed by dot, so move here from extras.py. It will still be exported +# from extras.py for compatibility. +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked). The result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> ma.mask_rowcols(a) + masked_array( + data=[[0, --, 0], + [--, --, --], + [0, --, 0]], + mask=[[False, True, False], + [ True, True, True], + [False, True, False]], + fill_value=1) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + + +# Include masked dot here to avoid import problems in getting it from +# extras.py. Note that it is not included in __all__, but rather exported +# from extras in order to avoid backward compatibility problems. +def dot(a, b, strict=False, out=None): + """ + Return the dot product of two arrays. + + This function is the equivalent of `numpy.dot` that takes masked values + into account. Note that `strict` and `out` are in different position + than in the method version. In order to maintain compatibility with the + corresponding method, it is recommended that the optional arguments be + treated as keyword only. At some point that may be mandatory. + + .. note:: + Works only with 2-D arrays at the moment. + + + Parameters + ---------- + a, b : masked_array_like + Inputs arrays. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) for + the computation. Default is False. Propagating the mask means that + if a masked value appears in a row or column, the whole row or + column is considered masked. + out : masked_array, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.dot : Equivalent function for ndarrays. + + Examples + -------- + >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> np.ma.dot(a, b) + masked_array( + data=[[21, 26], + [45, 64]], + mask=[[False, False], + [False, False]], + fill_value=999999) + >>> np.ma.dot(a, b, strict=True) + masked_array( + data=[[--, --], + [--, 64]], + mask=[[ True, True], + [ True, False]], + fill_value=999999) + + """ + # !!!: Works only with 2D arrays. There should be a way to get it to run + # with higher dimension + if strict and (a.ndim == 2) and (b.ndim == 2): + a = mask_rowcols(a, 0) + b = mask_rowcols(b, 1) + am = ~getmaskarray(a) + bm = ~getmaskarray(b) + + if out is None: + d = np.dot(filled(a, 0), filled(b, 0)) + m = ~np.dot(am, bm) + if d.ndim == 0: + d = np.asarray(d) + r = d.view(get_masked_subclass(a, b)) + r.__setmask__(m) + return r + else: + d = np.dot(filled(a, 0), filled(b, 0), out._data) + if out.mask.shape != d.shape: + out._mask = np.empty(d.shape, MaskType) + np.dot(am, bm, out._mask) + np.logical_not(out._mask, out._mask) + return out + + +def inner(a, b): + """ + Returns the inner product of a and b for arrays of floating point types. + + Like the generic NumPy equivalent the product sum is over the last dimension + of a and b. The first argument is not conjugated. + + """ + fa = filled(a, 0) + fb = filled(b, 0) + if fa.ndim == 0: + fa.shape = (1,) + if fb.ndim == 0: + fb.shape = (1,) + return np.inner(fa, fb).view(MaskedArray) +inner.__doc__ = doc_note(np.inner.__doc__, + "Masked values are replaced by 0.") +innerproduct = inner + + +def outer(a, b): + "maskedarray version of the numpy function." + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = np.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) + return masked_array(d, mask=m) +outer.__doc__ = doc_note(np.outer.__doc__, + "Masked values are replaced by 0.") +outerproduct = outer + + +def _convolve_or_correlate(f, a, v, mode, propagate_mask): + """ + Helper function for ma.correlate and ma.convolve + """ + if propagate_mask: + # results which are contributed to by either item in any pair being invalid + mask = ( + f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) + | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) + ) + data = f(getdata(a), getdata(v), mode=mode) + else: + # results which are not contributed to by any pair of valid elements + mask = ~f(~getmaskarray(a), ~getmaskarray(v)) + data = f(filled(a, 0), filled(v, 0), mode=mode) + + return masked_array(data, mask=mask) + + +def correlate(a, v, mode='valid', propagate_mask=True): + """ + Cross-correlation of two 1-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + propagate_mask : bool + If True, then a result element is masked if any masked element contributes towards it. + If False, then a result element is only masked if no non-masked element + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + numpy.correlate : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) + + +def convolve(a, v, mode='full', propagate_mask=True): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. + propagate_mask : bool + If True, then if any masked element is included in the sum for a result + element, then the result is masked. + If False, then the result element is only masked if no non-masked cells + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + numpy.convolve : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) + + +def allequal(a, b, fill_value=True): + """ + Return True if all entries of a and b are equal, using + fill_value as a truth value where either or both are masked. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : bool, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, + then False is returned. + + See Also + -------- + all, any + numpy.ma.allclose + + Examples + -------- + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + + >>> b = np.array([1e10, 1e-7, -42.0]) + >>> b + array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) + >>> np.ma.allequal(a, b, fill_value=False) + False + >>> np.ma.allequal(a, b) + True + + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + + +def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This function is equivalent to `allclose` except that masked values + are treated as equal (default) or unequal, depending on the `masked_equal` + argument. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + masked_equal : bool, optional + Whether masked values in `a` and `b` are considered equal (True) or not + (False). They are considered equal by default. + rtol : float, optional + Relative tolerance. The relative difference is equal to ``rtol * b``. + Default is 1e-5. + atol : float, optional + Absolute tolerance. The absolute difference is equal to `atol`. + Default is 1e-8. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any + numpy.allclose : the non-masked `allclose`. + + Notes + ----- + If the following equation is element-wise True, then `allclose` returns + True:: + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of `a` and `b` are equal subject to + given tolerances. + + Examples + -------- + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + False + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + Masked values are not compared directly. + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + """ + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dtype = np.result_type(y, 1.) + if y.dtype != dtype: + y = masked_array(y, dtype=dtype, copy=False) + + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + return np.all(d) + + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + + return np.all(d) + + +def asarray(a, dtype=None, order=None): + """ + Convert the input to a masked array of the given data-type. + + No copy is performed if the input is already an `ndarray`. If `a` is + a subclass of `MaskedArray`, a base class `MaskedArray` is returned. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to a masked array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists, ndarrays and masked arrays. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + Masked array interpretation of `a`. + + See Also + -------- + asanyarray : Similar to `asarray`, but conserves subclasses. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asarray(x)) + + + """ + order = order or 'C' + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, + subok=False, order=order) + + +def asanyarray(a, dtype=None): + """ + Convert the input to a masked array, conserving subclasses. + + If `a` is a subclass of `MaskedArray`, its class is conserved. + No copy is performed if the input is already an `ndarray`. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + MaskedArray interpretation of `a`. + + See Also + -------- + asarray : Similar to `asanyarray`, but does not conserve subclass. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asanyarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asanyarray(x)) + + + """ + # workaround for #8666, to preserve identity. Ideally the bottom line + # would handle this for us. + if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + return a + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + + +############################################################################## +# Pickling # +############################################################################## + +def _pickle_warn(method): + # NumPy 1.15.0, 2017-12-10 + warnings.warn( + f"np.ma.{method} is deprecated, use pickle.{method} instead", + DeprecationWarning, stacklevel=3) + + +def fromfile(file, dtype=float, count=-1, sep=''): + raise NotImplementedError( + "fromfile() not yet implemented for a MaskedArray.") + + +def fromflex(fxarray): + """ + Build a masked array from a suitable flexible-type array. + + The input array has to have a data-type with ``_data`` and ``_mask`` + fields. This type of array is output by `MaskedArray.toflex`. + + Parameters + ---------- + fxarray : ndarray + The structured input array, containing ``_data`` and ``_mask`` + fields. If present, other fields are discarded. + + Returns + ------- + result : MaskedArray + The constructed masked array. + + See Also + -------- + MaskedArray.toflex : Build a flexible-type array from a masked array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) + >>> rec = x.toflex() + >>> rec + array([[(0, False), (1, True), (2, False)], + [(3, True), (4, False), (5, True)], + [(6, False), (7, True), (8, False)]], + dtype=[('_data', '>> x2 = np.ma.fromflex(rec) + >>> x2 + masked_array( + data=[[0, --, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + Extra fields can be present in the structured array but are discarded: + + >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) + >>> rec2 + array([[(0, False, 0.), (0, False, 0.)], + [(0, False, 0.), (0, False, 0.)]], + dtype=[('_data', '>> y = np.ma.fromflex(rec2) + >>> y + masked_array( + data=[[0, 0], + [0, 0]], + mask=[[False, False], + [False, False]], + fill_value=999999, + dtype=int32) + + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + +class _convert2ma: + + """ + Convert functions from numpy to numpy.ma. + + Parameters + ---------- + _methodname : string + Name of the method to transform. + + """ + __doc__ = None + + def __init__(self, funcname, params=None): + self._func = getattr(np, funcname) + self.__doc__ = self.getdoc() + self._extras = params or {} + + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + doc = getattr(self._func, '__doc__', None) + sig = get_object_signature(self._func) + if doc: + # Add the signature of the function at the beginning of the doc + if sig: + sig = "%s%s\n" % (self._func.__name__, sig) + doc = sig + doc + return doc + + def __call__(self, *args, **params): + # Find the common parameters to the call and the definition + _extras = self._extras + common_params = set(params).intersection(_extras) + # Drop the common parameters from the call + for p in common_params: + _extras[p] = params.pop(p) + # Get the result + result = self._func.__call__(*args, **params).view(MaskedArray) + if "fill_value" in common_params: + result.fill_value = _extras.get("fill_value", None) + if "hardmask" in common_params: + result._hardmask = bool(_extras.get("hard_mask", False)) + return result + +arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False)) +clip = np.clip +diff = np.diff +empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False)) +empty_like = _convert2ma('empty_like') +frombuffer = _convert2ma('frombuffer') +fromfunction = _convert2ma('fromfunction') +identity = _convert2ma( + 'identity', params=dict(fill_value=None, hardmask=False)) +indices = np.indices +ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False)) +ones_like = np.ones_like +squeeze = np.squeeze +zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False)) +zeros_like = np.zeros_like + + +def append(a, b, axis=None): + """Append values to the end of an array. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Values are appended to a copy of this array. + b : array_like + These values are appended to a copy of `a`. It must be of the + correct shape (the same shape as `a`, excluding `axis`). If `axis` + is not specified, `b` can be any shape and will be flattened + before use. + axis : int, optional + The axis along which `v` are appended. If `axis` is not given, + both `a` and `b` are flattened before use. + + Returns + ------- + append : MaskedArray + A copy of `a` with `b` appended to `axis`. Note that `append` + does not occur in-place: a new array is allocated and filled. If + `axis` is None, the result is a flattened array. + + See Also + -------- + numpy.append : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_values([1, 2, 3], 2) + >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + >>> ma.append(a, b) + masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], + mask=[False, True, False, False, False, False, True, False, + False], + fill_value=999999) + """ + return concatenate([a, b], axis) diff --git a/MLPY/Lib/site-packages/numpy/ma/core.pyi b/MLPY/Lib/site-packages/numpy/ma/core.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c14f9b2ad5360bef1f0a2aa7208fc8883e5f1e00 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/core.pyi @@ -0,0 +1,468 @@ +from typing import Any, List, TypeVar, Callable +from numpy import ndarray, dtype, float64 + +from numpy import ( + amax as amax, + amin as amin, + bool_ as bool_, + expand_dims as expand_dims, + diff as diff, + clip as clip, + indices as indices, + ones_like as ones_like, + squeeze as squeeze, + zeros_like as zeros_like, +) + +from numpy.lib.function_base import ( + angle as angle, +) + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) + +__all__: List[str] + +MaskType = bool_ +nomask: bool_ + +class MaskedArrayFutureWarning(FutureWarning): ... +class MAError(Exception): ... +class MaskError(MAError): ... + +def default_fill_value(obj): ... +def minimum_fill_value(obj): ... +def maximum_fill_value(obj): ... +def set_fill_value(a, fill_value): ... +def common_fill_value(a, b): ... +def filled(a, fill_value=...): ... +def getdata(a, subok=...): ... +get_data = getdata + +def fix_invalid(a, mask=..., copy=..., fill_value=...): ... + +class _MaskedUFunc: + f: Any + __doc__: Any + __name__: Any + def __init__(self, ufunc): ... + +class _MaskedUnaryOperation(_MaskedUFunc): + fill: Any + domain: Any + def __init__(self, mufunc, fill=..., domain=...): ... + def __call__(self, a, *args, **kwargs): ... + +class _MaskedBinaryOperation(_MaskedUFunc): + fillx: Any + filly: Any + def __init__(self, mbfunc, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + def reduce(self, target, axis=..., dtype=...): ... + def outer(self, a, b): ... + def accumulate(self, target, axis=...): ... + +class _DomainedBinaryOperation(_MaskedUFunc): + domain: Any + fillx: Any + filly: Any + def __init__(self, dbfunc, domain, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + +exp: _MaskedUnaryOperation +conjugate: _MaskedUnaryOperation +sin: _MaskedUnaryOperation +cos: _MaskedUnaryOperation +arctan: _MaskedUnaryOperation +arcsinh: _MaskedUnaryOperation +sinh: _MaskedUnaryOperation +cosh: _MaskedUnaryOperation +tanh: _MaskedUnaryOperation +abs: _MaskedUnaryOperation +absolute: _MaskedUnaryOperation +fabs: _MaskedUnaryOperation +negative: _MaskedUnaryOperation +floor: _MaskedUnaryOperation +ceil: _MaskedUnaryOperation +around: _MaskedUnaryOperation +logical_not: _MaskedUnaryOperation +sqrt: _MaskedUnaryOperation +log: _MaskedUnaryOperation +log2: _MaskedUnaryOperation +log10: _MaskedUnaryOperation +tan: _MaskedUnaryOperation +arcsin: _MaskedUnaryOperation +arccos: _MaskedUnaryOperation +arccosh: _MaskedUnaryOperation +arctanh: _MaskedUnaryOperation + +add: _MaskedBinaryOperation +subtract: _MaskedBinaryOperation +multiply: _MaskedBinaryOperation +arctan2: _MaskedBinaryOperation +equal: _MaskedBinaryOperation +not_equal: _MaskedBinaryOperation +less_equal: _MaskedBinaryOperation +greater_equal: _MaskedBinaryOperation +less: _MaskedBinaryOperation +greater: _MaskedBinaryOperation +logical_and: _MaskedBinaryOperation +alltrue: _MaskedBinaryOperation +logical_or: _MaskedBinaryOperation +sometrue: Callable[..., Any] +logical_xor: _MaskedBinaryOperation +bitwise_and: _MaskedBinaryOperation +bitwise_or: _MaskedBinaryOperation +bitwise_xor: _MaskedBinaryOperation +hypot: _MaskedBinaryOperation +divide: _MaskedBinaryOperation +true_divide: _MaskedBinaryOperation +floor_divide: _MaskedBinaryOperation +remainder: _MaskedBinaryOperation +fmod: _MaskedBinaryOperation +mod: _MaskedBinaryOperation + +def make_mask_descr(ndtype): ... +def getmask(a): ... +get_mask = getmask + +def getmaskarray(arr): ... +def is_mask(m): ... +def make_mask(m, copy=..., shrink=..., dtype=...): ... +def make_mask_none(newshape, dtype=...): ... +def mask_or(m1, m2, copy=..., shrink=...): ... +def flatten_mask(mask): ... +def masked_where(condition, a, copy=...): ... +def masked_greater(x, value, copy=...): ... +def masked_greater_equal(x, value, copy=...): ... +def masked_less(x, value, copy=...): ... +def masked_less_equal(x, value, copy=...): ... +def masked_not_equal(x, value, copy=...): ... +def masked_equal(x, value, copy=...): ... +def masked_inside(x, v1, v2, copy=...): ... +def masked_outside(x, v1, v2, copy=...): ... +def masked_object(x, value, copy=..., shrink=...): ... +def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... +def masked_invalid(a, copy=...): ... + +class _MaskedPrintOption: + def __init__(self, display): ... + def display(self): ... + def set_display(self, s): ... + def enabled(self): ... + def enable(self, shrink=...): ... + +masked_print_option: _MaskedPrintOption + +def flatten_structured_array(a): ... + +class MaskedIterator: + ma: Any + dataiter: Any + maskiter: Any + def __init__(self, ma): ... + def __iter__(self): ... + def __getitem__(self, indx): ... + def __setitem__(self, index, value): ... + def __next__(self): ... + +class MaskedArray(ndarray[_ShapeType, _DType_co]): + __array_priority__: Any + def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... + def __array_finalize__(self, obj): ... + def __array_wrap__(self, obj, context=...): ... + def view(self, dtype=..., type=..., fill_value=...): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + @property + def dtype(self): ... + @dtype.setter + def dtype(self, dtype): ... + @property + def shape(self): ... + @shape.setter + def shape(self, shape): ... + def __setmask__(self, mask, copy=...): ... + @property + def mask(self): ... + @mask.setter + def mask(self, value): ... + @property + def recordmask(self): ... + @recordmask.setter + def recordmask(self, mask): ... + def harden_mask(self): ... + def soften_mask(self): ... + @property + def hardmask(self): ... + def unshare_mask(self): ... + @property + def sharedmask(self): ... + def shrink_mask(self): ... + @property + def baseclass(self): ... + data: Any + @property + def flat(self): ... + @flat.setter + def flat(self, value): ... + @property + def fill_value(self): ... + @fill_value.setter + def fill_value(self, value=...): ... + get_fill_value: Any + set_fill_value: Any + def filled(self, fill_value=...): ... + def compressed(self): ... + def compress(self, condition, axis=..., out=...): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __add__(self, other): ... + def __radd__(self, other): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __div__(self, other): ... + def __truediv__(self, other): ... + def __rtruediv__(self, other): ... + def __floordiv__(self, other): ... + def __rfloordiv__(self, other): ... + def __pow__(self, other): ... + def __rpow__(self, other): ... + def __iadd__(self, other): ... + def __isub__(self, other): ... + def __imul__(self, other): ... + def __idiv__(self, other): ... + def __ifloordiv__(self, other): ... + def __itruediv__(self, other): ... + def __ipow__(self, other): ... + def __float__(self): ... + def __int__(self): ... + @property # type: ignore[misc] + def imag(self): ... + get_imag: Any + @property # type: ignore[misc] + def real(self): ... + get_real: Any + def count(self, axis=..., keepdims=...): ... + def ravel(self, order=...): ... + def reshape(self, *s, **kwargs): ... + def resize(self, newshape, refcheck=..., order=...): ... + def put(self, indices, values, mode=...): ... + def ids(self): ... + def iscontiguous(self): ... + def all(self, axis=..., out=..., keepdims=...): ... + def any(self, axis=..., out=..., keepdims=...): ... + def nonzero(self): ... + def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... + def dot(self, b, out=..., strict=...): ... + def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... + def cumsum(self, axis=..., dtype=..., out=...): ... + def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... + product: Any + def cumprod(self, axis=..., dtype=..., out=...): ... + def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... + def anom(self, axis=..., dtype=...): ... + def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def round(self, decimals=..., out=...): ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... + def argmin(self, axis=..., fill_value=..., out=...): ... + def argmax(self, axis=..., fill_value=..., out=...): ... + def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... + def min(self, axis=..., out=..., fill_value=..., keepdims=...): ... + # NOTE: deprecated + # def mini(self, axis=...): ... + # def tostring(self, fill_value=..., order=...): ... + def max(self, axis=..., out=..., fill_value=..., keepdims=...): ... + def ptp(self, axis=..., out=..., fill_value=..., keepdims=...): ... + def partition(self, *args, **kwargs): ... + def argpartition(self, *args, **kwargs): ... + def take(self, indices, axis=..., out=..., mode=...): ... + copy: Any + diagonal: Any + flatten: Any + repeat: Any + squeeze: Any + swapaxes: Any + T: Any + transpose: Any + def tolist(self, fill_value=...): ... + def tobytes(self, fill_value=..., order=...): ... + def tofile(self, fid, sep=..., format=...): ... + def toflex(self): ... + torecords: Any + def __reduce__(self): ... + def __deepcopy__(self, memo=...): ... + +class mvoid(MaskedArray[_ShapeType, _DType_co]): + def __new__( + self, + data, + mask=..., + dtype=..., + fill_value=..., + hardmask=..., + copy=..., + subok=..., + ): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def __iter__(self): ... + def __len__(self): ... + def filled(self, fill_value=...): ... + def tolist(self): ... + +def isMaskedArray(x): ... +isarray = isMaskedArray +isMA = isMaskedArray + +# 0D float64 array +class MaskedConstant(MaskedArray[Any, dtype[float64]]): + def __new__(cls): ... + __class__: Any + def __array_finalize__(self, obj): ... + def __array_prepare__(self, obj, context=...): ... + def __array_wrap__(self, obj, context=...): ... + def __format__(self, format_spec): ... + def __reduce__(self): ... + def __iop__(self, other): ... + __iadd__: Any + __isub__: Any + __imul__: Any + __ifloordiv__: Any + __itruediv__: Any + __ipow__: Any + def copy(self, *args, **kwargs): ... + def __copy__(self): ... + def __deepcopy__(self, memo): ... + def __setattr__(self, attr, value): ... + +masked: MaskedConstant +masked_singleton: MaskedConstant +masked_array = MaskedArray + +def array( + data, + dtype=..., + copy=..., + order=..., + mask=..., + fill_value=..., + keep_mask=..., + hard_mask=..., + shrink=..., + subok=..., + ndmin=..., +): ... +def is_masked(x): ... + +class _extrema_operation(_MaskedUFunc): + compare: Any + fill_value_func: Any + def __init__(self, ufunc, compare, fill_value): ... + # NOTE: in practice `b` has a default value, but users should + # explicitly provide a value here as the default is deprecated + def __call__(self, a, b): ... + def reduce(self, target, axis=...): ... + def outer(self, a, b): ... + +def min(obj, axis=..., out=..., fill_value=..., keepdims=...): ... +def max(obj, axis=..., out=..., fill_value=..., keepdims=...): ... +def ptp(obj, axis=..., out=..., fill_value=..., keepdims=...): ... + +class _frommethod: + __name__: Any + __doc__: Any + reversed: Any + def __init__(self, methodname, reversed=...): ... + def getdoc(self): ... + def __call__(self, a, *args, **params): ... + +all: _frommethod +anomalies: _frommethod +anom: _frommethod +any: _frommethod +compress: _frommethod +cumprod: _frommethod +cumsum: _frommethod +copy: _frommethod +diagonal: _frommethod +harden_mask: _frommethod +ids: _frommethod +mean: _frommethod +nonzero: _frommethod +prod: _frommethod +product: _frommethod +ravel: _frommethod +repeat: _frommethod +soften_mask: _frommethod +std: _frommethod +sum: _frommethod +swapaxes: _frommethod +trace: _frommethod +var: _frommethod +count: _frommethod +argmin: _frommethod +argmax: _frommethod + +minimum: _extrema_operation +maximum: _extrema_operation + +def take(a, indices, axis=..., out=..., mode=...): ... +def power(a, b, third=...): ... +def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... +def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... +def compressed(x): ... +def concatenate(arrays, axis=...): ... +def diag(v, k=...): ... +def left_shift(a, n): ... +def right_shift(a, n): ... +def put(a, indices, values, mode=...): ... +def putmask(a, mask, values): ... +def transpose(a, axes=...): ... +def reshape(a, new_shape, order=...): ... +def resize(x, new_shape): ... +def ndim(obj): ... +def shape(obj): ... +def size(obj, axis=...): ... +def where(condition, x=..., y=...): ... +def choose(indices, choices, out=..., mode=...): ... +def round_(a, decimals=..., out=...): ... +round = round_ + +def inner(a, b): ... +innerproduct = inner + +def outer(a, b): ... +outerproduct = outer + +def correlate(a, v, mode=..., propagate_mask=...): ... +def convolve(a, v, mode=..., propagate_mask=...): ... +def allequal(a, b, fill_value=...): ... +def allclose(a, b, masked_equal=..., rtol=..., atol=...): ... +def asarray(a, dtype=..., order=...): ... +def asanyarray(a, dtype=...): ... +def fromflex(fxarray): ... + +class _convert2ma: + __doc__: Any + def __init__(self, funcname, params=...): ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +arange: _convert2ma +empty: _convert2ma +empty_like: _convert2ma +frombuffer: _convert2ma +fromfunction: _convert2ma +identity: _convert2ma +ones: _convert2ma +zeros: _convert2ma + +def append(a, b, axis=...): ... +def dot(a, b, strict=..., out=...): ... +def mask_rowcols(a, axis=...): ... diff --git a/MLPY/Lib/site-packages/numpy/ma/extras.py b/MLPY/Lib/site-packages/numpy/ma/extras.py new file mode 100644 index 0000000000000000000000000000000000000000..1ea03ca5963164f3116152a78e9241542e72e430 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/extras.py @@ -0,0 +1,1923 @@ +""" +Masked arrays add-ons. + +A collection of utilities for `numpy.ma`. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +__all__ = [ + 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', + 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', + 'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols', + 'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', + 'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges', + 'hsplit', 'hstack', 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', + 'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_', + 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', + 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', + ] + +import itertools +import warnings + +from . import core as ma +from .core import ( + MaskedArray, MAError, add, array, asarray, concatenate, filled, count, + getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, + nomask, ones, sort, zeros, getdata, get_masked_subclass, dot, + mask_rowcols + ) + +import numpy as np +from numpy import ndarray, array as nxarray +import numpy.core.umath as umath +from numpy.core.multiarray import normalize_axis_index +from numpy.core.numeric import normalize_axis_tuple +from numpy.lib.function_base import _ureduce +from numpy.lib.index_tricks import AxisConcatenator + + +def issequence(seq): + """ + Is seq a sequence (ndarray, list or tuple)? + + """ + return isinstance(seq, (ndarray, tuple, list)) + + +def count_masked(arr, axis=None): + """ + Count the number of masked elements along the given axis. + + Parameters + ---------- + arr : array_like + An array with (possibly) masked elements. + axis : int, optional + Axis along which to count. If None (default), a flattened + version of the array is used. + + Returns + ------- + count : int, ndarray + The total number of masked elements (axis=None) or the number + of masked elements along each slice of the given axis. + + See Also + -------- + MaskedArray.count : Count non-masked elements. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(9).reshape((3,3)) + >>> a = ma.array(a) + >>> a[1, 0] = ma.masked + >>> a[1, 2] = ma.masked + >>> a[2, 1] = ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> ma.count_masked(a) + 3 + + When the `axis` keyword is used an array is returned. + + >>> ma.count_masked(a, axis=0) + array([1, 1, 1]) + >>> ma.count_masked(a, axis=1) + array([0, 2, 1]) + + """ + m = getmaskarray(arr) + return m.sum(axis) + + +def masked_all(shape, dtype=float): + """ + Empty masked array with all elements masked. + + Return an empty masked array of the given shape and dtype, where all the + data are masked. + + Parameters + ---------- + shape : tuple + Shape of the required MaskedArray. + dtype : dtype, optional + Data type of the output. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + See Also + -------- + masked_all_like : Empty masked array modelled on an existing array. + + Examples + -------- + >>> import numpy.ma as ma + >>> ma.masked_all((3, 3)) + masked_array( + data=[[--, --, --], + [--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float64) + + The `dtype` parameter defines the underlying data type. + + >>> a = ma.masked_all((3, 3)) + >>> a.dtype + dtype('float64') + >>> a = ma.masked_all((3, 3), dtype=np.int32) + >>> a.dtype + dtype('int32') + + """ + a = masked_array(np.empty(shape, dtype), + mask=np.ones(shape, make_mask_descr(dtype))) + return a + + +def masked_all_like(arr): + """ + Empty masked array with the properties of an existing array. + + Return an empty masked array of the same shape and dtype as + the array `arr`, where all the data are masked. + + Parameters + ---------- + arr : ndarray + An array describing the shape and dtype of the required MaskedArray. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + Raises + ------ + AttributeError + If `arr` doesn't have a shape attribute (i.e. not an ndarray) + + See Also + -------- + masked_all : Empty masked array with all elements masked. + + Examples + -------- + >>> import numpy.ma as ma + >>> arr = np.zeros((2, 3), dtype=np.float32) + >>> arr + array([[0., 0., 0.], + [0., 0., 0.]], dtype=float32) + >>> ma.masked_all_like(arr) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float32) + + The dtype of the masked array matches the dtype of `arr`. + + >>> arr.dtype + dtype('float32') + >>> ma.masked_all_like(arr).dtype + dtype('float32') + + """ + a = np.empty_like(arr).view(MaskedArray) + a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) + return a + + +#####-------------------------------------------------------------------------- +#---- --- Standard functions --- +#####-------------------------------------------------------------------------- +class _fromnxfunction: + """ + Defines a wrapper to adapt NumPy functions to masked arrays. + + + An instance of `_fromnxfunction` can be called with the same parameters + as the wrapped NumPy function. The docstring of `newfunc` is adapted from + the wrapped function as well, see `getdoc`. + + This class should not be used directly. Instead, one of its extensions that + provides support for a specific type of input should be used. + + Parameters + ---------- + funcname : str + The name of the function to be adapted. The function should be + in the NumPy namespace (i.e. ``np.funcname``). + + """ + + def __init__(self, funcname): + self.__name__ = funcname + self.__doc__ = self.getdoc() + + def getdoc(self): + """ + Retrieve the docstring and signature from the function. + + The ``__doc__`` attribute of the function is used as the docstring for + the new masked array version of the function. A note on application + of the function to the mask is appended. + + Parameters + ---------- + None + + """ + npfunc = getattr(np, self.__name__, None) + doc = getattr(npfunc, '__doc__', None) + if doc: + sig = self.__name__ + ma.get_object_signature(npfunc) + doc = ma.doc_note(doc, "The function is applied to both the _data " + "and the _mask, if any.") + return '\n\n'.join((sig, doc)) + return + + def __call__(self, *args, **params): + pass + + +class _fromnxfunction_single(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single array + argument followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + if isinstance(x, ndarray): + _d = func(x.__array__(), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + else: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_seq(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single sequence + of arrays followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + _d = func(tuple([np.asarray(a) for a in x]), *args, **params) + _m = func(tuple([getmaskarray(a) for a in x]), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_args(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. The first non-array-like input marks the beginning of the + arguments that are passed verbatim for both the data and mask calls. + Array arguments are processed independently and the results are + returned in a list. If only one array is found, the return value is + just the processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + arrays = [] + args = list(args) + while len(args) > 0 and issequence(args[0]): + arrays.append(args.pop(0)) + res = [] + for x in arrays: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + res.append(masked_array(_d, mask=_m)) + if len(arrays) == 1: + return res[0] + return res + + +class _fromnxfunction_allargs(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. Similar to `_fromnxfunction_args` except that all args + are converted to arrays even if they are not so already. This makes + it possible to process scalars as 1-D arrays. Only keyword arguments + are passed through verbatim for the data and mask calls. Arrays + arguments are processed independently and the results are returned + in a list. If only one arg is present, the return value is just the + processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + res = [] + for x in args: + _d = func(np.asarray(x), **params) + _m = func(getmaskarray(x), **params) + res.append(masked_array(_d, mask=_m)) + if len(args) == 1: + return res[0] + return res + + +atleast_1d = _fromnxfunction_allargs('atleast_1d') +atleast_2d = _fromnxfunction_allargs('atleast_2d') +atleast_3d = _fromnxfunction_allargs('atleast_3d') + +vstack = row_stack = _fromnxfunction_seq('vstack') +hstack = _fromnxfunction_seq('hstack') +column_stack = _fromnxfunction_seq('column_stack') +dstack = _fromnxfunction_seq('dstack') +stack = _fromnxfunction_seq('stack') + +hsplit = _fromnxfunction_single('hsplit') + +diagflat = _fromnxfunction_single('diagflat') + + +#####-------------------------------------------------------------------------- +#---- +#####-------------------------------------------------------------------------- +def flatten_inplace(seq): + """Flatten a sequence in place.""" + k = 0 + while (k != len(seq)): + while hasattr(seq[k], '__iter__'): + seq[k:(k + 1)] = seq[k] + k += 1 + return seq + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + (This docstring should be overwritten) + """ + arr = array(arr, copy=False, subok=True) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + ind = [0] * (nd - 1) + i = np.zeros(nd, 'O') + indlist = list(range(nd)) + indlist.remove(axis) + i[axis] = slice(None, None) + outshape = np.asarray(arr.shape).take(indlist) + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + # if res is a number, then we have a smaller output array + asscalar = np.isscalar(res) + if not asscalar: + try: + len(res) + except TypeError: + asscalar = True + # Note: we shouldn't set the dtype of the output from the first result + # so we force the type to object, and build a list of dtypes. We'll + # just take the largest, to avoid some downcasting + dtypes = [] + if asscalar: + dtypes.append(np.asarray(res).dtype) + outarr = zeros(outshape, object) + outarr[tuple(ind)] = res + Ntot = np.product(outshape) + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= outshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(ind)] = res + dtypes.append(asarray(res).dtype) + k += 1 + else: + res = array(res, copy=False, subok=True) + j = i.copy() + j[axis] = ([slice(None, None)] * res.ndim) + j.put(indlist, ind) + Ntot = np.product(outshape) + holdshape = outshape + outshape = list(arr.shape) + outshape[axis] = res.shape + dtypes.append(asarray(res).dtype) + outshape = flatten_inplace(outshape) + outarr = zeros(outshape, object) + outarr[tuple(flatten_inplace(j.tolist()))] = res + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + j.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(flatten_inplace(j.tolist()))] = res + dtypes.append(asarray(res).dtype) + k += 1 + max_dtypes = np.dtype(np.asarray(dtypes).max()) + if not hasattr(arr, '_mask'): + result = np.asarray(outarr, dtype=max_dtypes) + else: + result = asarray(outarr, dtype=max_dtypes) + result.fill_value = ma.default_fill_value(result) + return result +apply_along_axis.__doc__ = np.apply_along_axis.__doc__ + + +def apply_over_axes(func, a, axes): + """ + (This docstring will be overwritten) + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = ma.expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + +if apply_over_axes.__doc__ is not None: + apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ + :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ + """ + + Examples + -------- + >>> a = np.ma.arange(24).reshape(2,3,4) + >>> a[:,0,1] = np.ma.masked + >>> a[:,1,:] = np.ma.masked + >>> a + masked_array( + data=[[[0, --, 2, 3], + [--, --, --, --], + [8, 9, 10, 11]], + [[12, --, 14, 15], + [--, --, --, --], + [20, 21, 22, 23]]], + mask=[[[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]], + [[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]]], + fill_value=999999) + >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2]) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1)) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + """ + + +def average(a, axis=None, weights=None, returned=False): + """ + Return the weighted average of array over the given axis. + + Parameters + ---------- + a : array_like + Data to be averaged. + Masked entries are not taken into account in the computation. + axis : int, optional + Axis along which to average `a`. If None, averaging is done over + the flattened array. + weights : array_like, optional + The importance that each element has in the computation of the average. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If ``weights=None``, then all data in `a` are assumed to have a + weight equal to one. The 1-D calculation is:: + + avg = sum(a * weights) / sum(weights) + + The only constraint on `weights` is that `sum(weights)` must not be 0. + returned : bool, optional + Flag indicating whether a tuple ``(result, sum of weights)`` + should be returned as output (True), or just the result (False). + Default is False. + + Returns + ------- + average, [sum_of_weights] : (tuple of) scalar or MaskedArray + The average along the specified axis. When returned is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. The return type is `np.float64` + if `a` is of integer type and floats smaller than `float64`, or the + input data-type, otherwise. If returned, `sum_of_weights` is always + `float64`. + + Examples + -------- + >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) + >>> np.ma.average(a, weights=[3, 1, 0, 0]) + 1.25 + + >>> x = np.ma.arange(6.).reshape(3, 2) + >>> x + masked_array( + data=[[0., 1.], + [2., 3.], + [4., 5.]], + mask=False, + fill_value=1e+20) + >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], + ... returned=True) + >>> avg + masked_array(data=[2.6666666666666665, 3.6666666666666665], + mask=[False, False], + fill_value=1e+20) + + """ + a = asarray(a) + m = getmask(a) + + # inspired by 'average' in numpy/lib/function_base.py + + if weights is None: + avg = a.mean(axis) + scl = avg.dtype.type(a.count(axis)) + else: + wgt = np.asanyarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool_)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ.") + if wgt.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis.") + + # setup wgt to broadcast along axis + wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape, subok=True) + wgt = wgt.swapaxes(-1, axis) + + if m is not nomask: + wgt = wgt*(~a.mask) + + scl = wgt.sum(axis=axis, dtype=result_dtype) + avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl + + if returned: + if scl.shape != avg.shape: + scl = np.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg + + +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int, optional + Axis along which the medians are computed. The default (None) is + to compute the median along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True, and the input + is not already an `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + .. versionadded:: 1.10.0 + + Returns + ------- + median : ndarray + A new array holding the result is returned unless out is + specified, in which case a reference to out is returned. + Return data-type is `float64` for integers and floats smaller than + `float64`, or the input data-type, otherwise. + + See Also + -------- + mean + + Notes + ----- + Given a vector ``V`` with ``N`` non masked values, the median of ``V`` + is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. + ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` + when ``N`` is even. + + Examples + -------- + >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) + >>> np.ma.median(x) + 1.5 + + >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + >>> np.ma.median(x) + 2.5 + >>> np.ma.median(x, axis=-1, overwrite_input=True) + masked_array(data=[2.0, 5.0], + mask=[False, False], + fill_value=1e+20) + + """ + if not hasattr(a, 'mask'): + m = np.median(getdata(a, subok=True), axis=axis, + out=out, overwrite_input=overwrite_input, + keepdims=keepdims) + if isinstance(m, np.ndarray) and 1 <= m.ndim: + return masked_array(m, copy=False) + else: + return m + + r, k = _ureduce(a, func=_median, axis=axis, out=out, + overwrite_input=overwrite_input) + if keepdims: + return r.reshape(k) + else: + return r + +def _median(a, axis=None, out=None, overwrite_input=False): + # when an unmasked NaN is present return it, so we need to sort the NaN + # values behind the mask + if np.issubdtype(a.dtype, np.inexact): + fill_value = np.inf + else: + fill_value = None + if overwrite_input: + if axis is None: + asorted = a.ravel() + asorted.sort(fill_value=fill_value) + else: + a.sort(axis=axis, fill_value=fill_value) + asorted = a + else: + asorted = sort(a, axis=axis, fill_value=fill_value) + + if axis is None: + axis = 0 + else: + axis = normalize_axis_index(axis, asorted.ndim) + + if asorted.shape[axis] == 0: + # for empty axis integer indices fail so use slicing to get same result + # as median (which is mean of empty slice = nan) + indexer = [slice(None)] * asorted.ndim + indexer[axis] = slice(0, 0) + indexer = tuple(indexer) + return np.ma.mean(asorted[indexer], axis=axis, out=out) + + if asorted.ndim == 1: + counts = count(asorted) + idx, odd = divmod(count(asorted), 2) + mid = asorted[idx + odd - 1:idx + 1] + if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0: + # avoid inf / x = masked + s = mid.sum(out=out) + if not odd: + s = np.true_divide(s, 2., casting='safe', out=out) + s = np.lib.utils._median_nancheck(asorted, s, axis, out) + else: + s = mid.mean(out=out) + + # if result is masked either the input contained enough + # minimum_fill_value so that it would be the median or all values + # masked + if np.ma.is_masked(s) and not np.all(asorted.mask): + return np.ma.minimum_fill_value(asorted) + return s + + counts = count(asorted, axis=axis, keepdims=True) + h = counts // 2 + + # duplicate high if odd number of elements so mean does nothing + odd = counts % 2 == 1 + l = np.where(odd, h, h-1) + + lh = np.concatenate([l,h], axis=axis) + + # get low and high median + low_high = np.take_along_axis(asorted, lh, axis=axis) + + def replace_masked(s): + # Replace masked entries with minimum_full_value unless it all values + # are masked. This is required as the sort order of values equal or + # larger than the fill value is undefined and a valid value placed + # elsewhere, e.g. [4, --, inf]. + if np.ma.is_masked(s): + rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask + s.data[rep] = np.ma.minimum_fill_value(asorted) + s.mask[rep] = False + + replace_masked(low_high) + + if np.issubdtype(asorted.dtype, np.inexact): + # avoid inf / x = masked + s = np.ma.sum(low_high, axis=axis, out=out) + np.true_divide(s.data, 2., casting='unsafe', out=s.data) + + s = np.lib.utils._median_nancheck(asorted, s, axis, out) + else: + s = np.ma.mean(low_high, axis=axis, out=out) + + return s + + +def compress_nd(x, axis=None): + """Suppress slices from multiple dimensions which contain masked values. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with `mask` + set to `nomask`. + axis : tuple of ints or int, optional + Which dimensions to suppress slices from can be configured with this + parameter. + - If axis is a tuple of ints, those are the axes to suppress slices from. + - If axis is an int, then that is the only axis to suppress slices from. + - If axis is None, all axis are selected. + + Returns + ------- + compress_array : ndarray + The compressed array. + """ + x = asarray(x) + m = getmask(x) + # Set axis to tuple of ints + if axis is None: + axis = tuple(range(x.ndim)) + else: + axis = normalize_axis_tuple(axis, x.ndim) + + # Nothing is masked: return x + if m is nomask or not m.any(): + return x._data + # All is masked: return empty + if m.all(): + return nxarray([]) + # Filter elements through boolean indexing + data = x._data + for ax in axis: + axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) + data = data[(slice(None),)*ax + (~m.any(axis=axes),)] + return data + +def compress_rowcols(x, axis=None): + """ + Suppress the rows and/or columns of a 2-D array that contain + masked values. + + The suppression behavior is selected with the `axis` parameter. + + - If axis is None, both rows and columns are suppressed. + - If axis is 0, only rows are suppressed. + - If axis is 1 or -1, only columns are suppressed. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. Default is None. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x + masked_array( + data=[[--, 1, 2], + [--, 4, 5], + [6, 7, 8]], + mask=[[ True, False, False], + [ True, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.compress_rowcols(x) + array([[7, 8]]) + >>> np.ma.compress_rowcols(x, 0) + array([[6, 7, 8]]) + >>> np.ma.compress_rowcols(x, 1) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + if asarray(x).ndim != 2: + raise NotImplementedError("compress_rowcols works for 2D arrays only.") + return compress_nd(x, axis=axis) + + +def compress_rows(a): + """ + Suppress whole rows of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see + `compress_rowcols` for details. + + See Also + -------- + compress_rowcols + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_rows works for 2D arrays only.") + return compress_rowcols(a, 0) + +def compress_cols(a): + """ + Suppress whole columns of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see + `compress_rowcols` for details. + + See Also + -------- + compress_rowcols + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_cols works for 2D arrays only.") + return compress_rowcols(a, 1) + +def mask_rows(a, axis=np._NoValue): + """ + Mask rows of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + + >>> ma.mask_rows(a) + masked_array( + data=[[0, 0, 0], + [--, --, --], + [0, 0, 0]], + mask=[[False, False, False], + [ True, True, True], + [False, False, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 0) + +def mask_cols(a, axis=np._NoValue): + """ + Mask columns of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> ma.mask_cols(a) + masked_array( + data=[[0, --, 0], + [0, --, 0], + [0, --, 0]], + mask=[[False, True, False], + [False, True, False], + [False, True, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 1) + + +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- + +def ediff1d(arr, to_end=None, to_begin=None): + """ + Compute the differences between consecutive elements of an array. + + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account, see `numpy.ediff1d` for details. + + See Also + -------- + numpy.ediff1d : Equivalent function for ndarrays. + + """ + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] + # + if to_begin is not None: + arrays.insert(0, to_begin) + if to_end is not None: + arrays.append(to_end) + # + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) + # + return ed + + +def unique(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). The output array + is always a masked array. See `numpy.unique` for more details. + + See Also + -------- + numpy.unique : Equivalent function for ndarrays. + + """ + output = np.unique(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) + else: + output = output.view(MaskedArray) + return output + + +def intersect1d(ar1, ar2, assume_unique=False): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See `numpy.intersect1d` for more details. + + See Also + -------- + numpy.intersect1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> np.ma.intersect1d(x, y) + masked_array(data=[1, 3, --], + mask=[False, False, True], + fill_value=999999) + + """ + if assume_unique: + aux = ma.concatenate((ar1, ar2)) + else: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique(ar1), unique(ar2))) + aux.sort() + return aux[:-1][aux[1:] == aux[:-1]] + + +def setxor1d(ar1, ar2, assume_unique=False): + """ + Set exclusive-or of 1-D arrays with unique elements. + + The output is always a masked array. See `numpy.setxor1d` for more details. + + See Also + -------- + numpy.setxor1d : Equivalent function for ndarrays. + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = ma.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + + +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of an array is also present in a second + array. + + The output is always a masked array. See `numpy.in1d` for more details. + + We recommend using :func:`isin` instead of `in1d` for new code. + + See Also + -------- + isin : Version of this function that preserves the shape of ar1. + numpy.in1d : Equivalent function for ndarrays. + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if not assume_unique: + ar1, rev_idx = unique(ar1, return_inverse=True) + ar2 = unique(ar2) + + ar = ma.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = ma.concatenate((bool_ar, [invert])) + indx = order.argsort(kind='mergesort')[:len(ar1)] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] + + +def isin(element, test_elements, assume_unique=False, invert=False): + """ + Calculates `element in test_elements`, broadcasting over + `element` only. + + The output is always a masked array of the same shape as `element`. + See `numpy.isin` for more details. + + See Also + -------- + in1d : Flattened version of this function. + numpy.isin : Equivalent function for ndarrays. + + Notes + ----- + .. versionadded:: 1.13.0 + + """ + element = ma.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + +def union1d(ar1, ar2): + """ + Union of two arrays. + + The output is always a masked array. See `numpy.union1d` for more details. + + See Also + -------- + numpy.union1d : Equivalent function for ndarrays. + + """ + return unique(ma.concatenate((ar1, ar2), axis=None)) + + +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Set difference of 1D arrays with unique elements. + + The output is always a masked array. See `numpy.setdiff1d` for more + details. + + See Also + -------- + numpy.setdiff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) + >>> np.ma.setdiff1d(x, [1, 2]) + masked_array(data=[3, --], + mask=[False, True], + fill_value=999999) + + """ + if assume_unique: + ar1 = ma.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] + + +############################################################################### +# Covariance # +############################################################################### + + +def _covhelper(x, y=None, rowvar=True, allow_masked=True): + """ + Private function for the computation of covariance and correlation + coefficients. + + """ + x = ma.array(x, ndmin=2, copy=True, dtype=float) + xmask = ma.getmaskarray(x) + # Quick exit if we can't process masked data + if not allow_masked and xmask.any(): + raise ValueError("Cannot process masked data.") + # + if x.shape[0] == 1: + rowvar = True + # Make sure that rowvar is either 0 or 1 + rowvar = int(bool(rowvar)) + axis = 1 - rowvar + if rowvar: + tup = (slice(None), None) + else: + tup = (None, slice(None)) + # + if y is None: + xnotmask = np.logical_not(xmask).astype(int) + else: + y = array(y, copy=False, ndmin=2, dtype=float) + ymask = ma.getmaskarray(y) + if not allow_masked and ymask.any(): + raise ValueError("Cannot process masked data.") + if xmask.any() or ymask.any(): + if y.shape == x.shape: + # Define some common mask + common_mask = np.logical_or(xmask, ymask) + if common_mask is not nomask: + xmask = x._mask = y._mask = ymask = common_mask + x._sharedmask = False + y._sharedmask = False + x = ma.concatenate((x, y), axis) + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) + x -= x.mean(axis=rowvar)[tup] + return (x, xnotmask, rowvar) + + +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): + """ + Estimate the covariance matrix. + + Except for the handling of missing data this function does the same as + `numpy.cov`. For more details and examples, see `numpy.cov`. + + By default, masked values are recognized as such. If `x` and `y` have the + same shape, a common mask is allocated: if ``x[i,j]`` is masked, then + ``y[i,j]`` will also be masked. + Setting `allow_masked` to False will raise an exception if values are + missing in either of the input arrays. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N-1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. This keyword can be overridden by + the keyword ``ddof`` in numpy versions >= 1.5. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises a `ValueError` exception when some values are missing. + ddof : {None, int}, optional + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + .. versionadded:: 1.5 + + Raises + ------ + ValueError + Raised if some values are missing and `allow_masked` is False. + + See Also + -------- + numpy.cov + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError("ddof must be an integer") + # Set up ddof + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof + result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof + result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + return result + + +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, + ddof=np._NoValue): + """ + Return Pearson product-moment correlation coefficients. + + Except for the handling of missing data this function does the same as + `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. Because `bias` is deprecated, this + argument needs to be treated as keyword only to avoid a warning. + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + + See Also + -------- + numpy.corrcoef : Equivalent function in top-level NumPy module. + cov : Estimate the covariance matrix. + + Notes + ----- + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + """ + msg = 'bias and ddof have no effect and are deprecated' + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn(msg, DeprecationWarning, stacklevel=2) + # Get the data + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + # Compute the covariance matrix + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. + c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. + c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + # Check whether we have a scalar + try: + diag = ma.diagonal(c) + except ValueError: + return 1 + # + if xnotmask.all(): + _denom = ma.sqrt(ma.multiply.outer(diag, diag)) + else: + _denom = diagflat(diag) + _denom._sharedmask = False # We know return is always a copy + n = x.shape[1 - rowvar] + if rowvar: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols(vstack((x[i], x[j]))).var(axis=1) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + else: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols( + vstack((x[:, i], x[:, j]))).var(axis=1) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + return c / _denom + +#####-------------------------------------------------------------------------- +#---- --- Concatenation helpers --- +#####-------------------------------------------------------------------------- + +class MAxisConcatenator(AxisConcatenator): + """ + Translate slice objects to concatenation along an axis. + + For documentation on usage, see `mr_class`. + + See Also + -------- + mr_class + + """ + concatenate = staticmethod(concatenate) + + @classmethod + def makemat(cls, arr): + # There used to be a view as np.matrix here, but we may eventually + # deprecate that class. In preparation, we use the unmasked version + # to construct the matrix (with copy=False for backwards compatibility + # with the .view) + data = super().makemat(arr.data, copy=False) + return array(data, mask=arr.mask) + + def __getitem__(self, key): + # matrix builder syntax, like 'a, b; c, d' + if isinstance(key, str): + raise MAError("Unavailable for masked array.") + + return super().__getitem__(key) + + +class mr_class(MAxisConcatenator): + """ + Translate slice objects to concatenation along the first axis. + + This is the masked array version of `lib.index_tricks.RClass`. + + See Also + -------- + lib.index_tricks.RClass + + Examples + -------- + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + masked_array(data=[1, 2, 3, ..., 4, 5, 6], + mask=False, + fill_value=999999) + + """ + def __init__(self): + MAxisConcatenator.__init__(self, 0) + +mr_ = mr_class() + +#####-------------------------------------------------------------------------- +#---- Find unmasked data --- +#####-------------------------------------------------------------------------- + +def flatnotmasked_edges(a): + """ + Find the indices of the first and last unmasked values. + + Expects a 1-D `MaskedArray`, returns None if all values are masked. + + Parameters + ---------- + a : array_like + Input 1-D `MaskedArray` + + Returns + ------- + edges : ndarray or None + The indices of first and last non-masked value in the array. + Returns None if all values are masked. + + See Also + -------- + flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 1-D arrays. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_edges(a) + array([0, 9]) + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_edges(a) + array([3, 8]) + + >>> a[:] = np.ma.masked + >>> print(np.ma.flatnotmasked_edges(a)) + None + + """ + m = getmask(a) + if m is nomask or not np.any(m): + return np.array([0, a.size - 1]) + unmasked = np.flatnonzero(~m) + if len(unmasked) > 0: + return unmasked[[0, -1]] + else: + return None + + +def notmasked_edges(a, axis=None): + """ + Find the indices of the first and last unmasked values along an axis. + + If all values are masked, return None. Otherwise, return a list + of two tuples, corresponding to the indices of the first and last + unmasked values respectively. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array. + + Returns + ------- + edges : ndarray or list + An array of start and end indexes if there are any masked data in + the array. If there are no masked data in the array, `edges` is a + list of the first and last index. + + See Also + -------- + flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous + clump_masked, clump_unmasked + + Examples + -------- + >>> a = np.arange(9).reshape((3, 3)) + >>> m = np.zeros_like(a) + >>> m[1:, 1:] = 1 + + >>> am = np.ma.array(a, mask=m) + >>> np.array(am[~am.mask]) + array([0, 1, 2, 3, 6]) + + >>> np.ma.notmasked_edges(am) + array([0, 6]) + + """ + a = asarray(a) + if axis is None or a.ndim == 1: + return flatnotmasked_edges(a) + m = getmaskarray(a) + idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) + return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), + tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] + + +def flatnotmasked_contiguous(a): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : narray + The input array. + + Returns + ------- + slice_list : list + A sorted sequence of `slice` objects (start index, end index). + + .. versionchanged:: 1.15.0 + Now returns an empty list instead of None for a fully masked array + + See Also + -------- + flatnotmasked_edges, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_contiguous(a) + [slice(0, 10, None)] + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_contiguous(a) + [slice(3, 5, None), slice(6, 9, None)] + >>> a[:] = np.ma.masked + >>> np.ma.flatnotmasked_contiguous(a) + [] + + """ + m = getmask(a) + if m is nomask: + return [slice(0, a.size)] + i = 0 + result = [] + for (k, g) in itertools.groupby(m.ravel()): + n = len(list(g)) + if not k: + result.append(slice(i, i + n)) + i += n + return result + +def notmasked_contiguous(a, axis=None): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array, and this + is the same as `flatnotmasked_contiguous`. + + Returns + ------- + endpoints : list + A list of slices (start and end indexes) of unmasked indexes + in the array. + + If the input is 2d and axis is specified, the result is a list of lists. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.arange(12).reshape((3, 4)) + >>> mask = np.zeros_like(a) + >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 + >>> ma = np.ma.array(a, mask=mask) + >>> ma + masked_array( + data=[[0, --, 2, 3], + [--, --, --, 7], + [8, --, --, 11]], + mask=[[False, True, False, False], + [ True, True, True, False], + [False, True, True, False]], + fill_value=999999) + >>> np.array(ma[~ma.mask]) + array([ 0, 2, 3, 7, 8, 11]) + + >>> np.ma.notmasked_contiguous(ma) + [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)] + + >>> np.ma.notmasked_contiguous(ma, axis=0) + [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]] + + >>> np.ma.notmasked_contiguous(ma, axis=1) + [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] + + """ + a = asarray(a) + nd = a.ndim + if nd > 2: + raise NotImplementedError("Currently limited to atmost 2D array.") + if axis is None or nd == 1: + return flatnotmasked_contiguous(a) + # + result = [] + # + other = (axis + 1) % 2 + idx = [0, 0] + idx[axis] = slice(None, None) + # + for i in range(a.shape[other]): + idx[other] = i + result.append(flatnotmasked_contiguous(a[tuple(idx)])) + return result + + +def _ezclump(mask): + """ + Finds the clumps (groups of data with the same values) for a 1D bool array. + + Returns a series of slices. + """ + if mask.ndim > 1: + mask = mask.ravel() + idx = (mask[1:] ^ mask[:-1]).nonzero() + idx = idx[0] + 1 + + if mask[0]: + if len(idx) == 0: + return [slice(0, mask.size)] + + r = [slice(0, idx[0])] + r.extend((slice(left, right) + for left, right in zip(idx[1:-1:2], idx[2::2]))) + else: + if len(idx) == 0: + return [] + + r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] + + if mask[-1]: + r.append(slice(idx[-1], mask.size)) + return r + + +def clump_unmasked(a): + """ + Return list of slices corresponding to the unmasked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of unmasked + elements in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_masked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_unmasked(a) + [slice(3, 6, None), slice(7, 8, None)] + + """ + mask = getattr(a, '_mask', nomask) + if mask is nomask: + return [slice(0, a.size)] + return _ezclump(~mask) + + +def clump_masked(a): + """ + Returns a list of slices corresponding to the masked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of masked elements + in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_unmasked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_masked(a) + [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] + + """ + mask = ma.getmask(a) + if mask is nomask: + return [] + return _ezclump(mask) + + +############################################################################### +# Polynomial fit # +############################################################################### + + +def vander(x, n=None): + """ + Masked values in the input array result in rows of zeros. + + """ + _vander = np.vander(x, n) + m = getmask(x) + if m is not nomask: + _vander[m] = 0 + return _vander + +vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Any masked values in x is propagated in y, and vice-versa. + + """ + x = asarray(x) + y = asarray(y) + + m = getmask(x) + if y.ndim == 1: + m = mask_or(m, getmask(y)) + elif y.ndim == 2: + my = getmask(mask_rows(y)) + if my is not nomask: + m = mask_or(m, my[:, 0]) + else: + raise TypeError("Expected a 1D or 2D array for y!") + + if w is not None: + w = asarray(w) + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + m = mask_or(m, getmask(w)) + + if m is not nomask: + not_m = ~m + if w is not None: + w = w[not_m] + return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov) + else: + return np.polyfit(x, y, deg, rcond, full, w, cov) + +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/MLPY/Lib/site-packages/numpy/ma/extras.pyi b/MLPY/Lib/site-packages/numpy/ma/extras.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f52f8244cc0d1d8960d7318cafa6d4b0039ef757 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/extras.pyi @@ -0,0 +1,84 @@ +from typing import Any, List +from numpy.lib.index_tricks import AxisConcatenator + +from numpy.ma.core import ( + dot as dot, + mask_rowcols as mask_rowcols, +) + +__all__: List[str] + +def count_masked(arr, axis=...): ... +def masked_all(shape, dtype = ...): ... +def masked_all_like(arr): ... + +class _fromnxfunction: + __name__: Any + __doc__: Any + def __init__(self, funcname): ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +class _fromnxfunction_single(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_seq(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_allargs(_fromnxfunction): + def __call__(self, *args, **params): ... + +atleast_1d: _fromnxfunction_allargs +atleast_2d: _fromnxfunction_allargs +atleast_3d: _fromnxfunction_allargs + +vstack: _fromnxfunction_seq +row_stack: _fromnxfunction_seq +hstack: _fromnxfunction_seq +column_stack: _fromnxfunction_seq +dstack: _fromnxfunction_seq +stack: _fromnxfunction_seq + +hsplit: _fromnxfunction_single +diagflat: _fromnxfunction_single + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def average(a, axis=..., weights=..., returned=...): ... +def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def compress_nd(x, axis=...): ... +def compress_rowcols(x, axis=...): ... +def compress_rows(a): ... +def compress_cols(a): ... +def mask_rows(a, axis = ...): ... +def mask_cols(a, axis = ...): ... +def ediff1d(arr, to_end=..., to_begin=...): ... +def unique(ar1, return_index=..., return_inverse=...): ... +def intersect1d(ar1, ar2, assume_unique=...): ... +def setxor1d(ar1, ar2, assume_unique=...): ... +def in1d(ar1, ar2, assume_unique=..., invert=...): ... +def isin(element, test_elements, assume_unique=..., invert=...): ... +def union1d(ar1, ar2): ... +def setdiff1d(ar1, ar2, assume_unique=...): ... +def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... +def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... + +class MAxisConcatenator(AxisConcatenator): + concatenate: Any + @classmethod + def makemat(cls, arr): ... + def __getitem__(self, key): ... + +class mr_class(MAxisConcatenator): + def __init__(self): ... + +mr_: mr_class + +def flatnotmasked_edges(a): ... +def notmasked_edges(a, axis=...): ... +def flatnotmasked_contiguous(a): ... +def notmasked_contiguous(a, axis=...): ... +def clump_unmasked(a): ... +def clump_masked(a): ... +def vander(x, n=...): ... +def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... diff --git a/MLPY/Lib/site-packages/numpy/ma/mrecords.py b/MLPY/Lib/site-packages/numpy/ma/mrecords.py new file mode 100644 index 0000000000000000000000000000000000000000..13bf7fe7f6e0c103c166a4bae997cb1c857e8622 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/mrecords.py @@ -0,0 +1,774 @@ +""":mod:`numpy.ma..mrecords` + +Defines the equivalent of :class:`numpy.recarrays` for masked arrays, +where fields can be accessed as attributes. +Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes +and the masking of individual fields. + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# We should make sure that no field is called '_mask','mask','_fieldmask', +# or whatever restricted keywords. An idea would be to no bother in the +# first place, and then rename the invalid fields with a trailing +# underscore. Maybe we could just overload the parser function ? + +from numpy.ma import ( + MAError, MaskedArray, masked, nomask, masked_array, getdata, + getmaskarray, filled +) +import numpy.ma as ma +import warnings + +import numpy as np +from numpy import ( + bool_, dtype, ndarray, recarray, array as narray +) +from numpy.core.records import ( + fromarrays as recfromarrays, fromrecords as recfromrecords +) + +_byteorderconv = np.core.records._byteorderconv + + +_check_fill_value = ma.core._check_fill_value + + +__all__ = [ + 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', + 'fromtextfile', 'addfield', +] + +reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] + + +def _checknames(descr, names=None): + """ + Checks that field names ``descr`` are not reserved keywords. + + If this is the case, a default 'f%i' is substituted. If the argument + `names` is not None, updates the field names to valid names. + + """ + ndescr = len(descr) + default_names = ['f%i' % i for i in range(ndescr)] + if names is None: + new_names = default_names + else: + if isinstance(names, (tuple, list)): + new_names = names + elif isinstance(names, str): + new_names = names.split(',') + else: + raise NameError(f'illegal input names {names!r}') + nnames = len(new_names) + if nnames < ndescr: + new_names += default_names[nnames:] + ndescr = [] + for (n, d, t) in zip(new_names, default_names, descr.descr): + if n in reserved_fields: + if t[0] in reserved_fields: + ndescr.append((d, t[1])) + else: + ndescr.append(t) + else: + ndescr.append((n, t[1])) + return np.dtype(ndescr) + + +def _get_fieldmask(self): + mdescr = [(n, '|b1') for n in self.dtype.names] + fdmask = np.empty(self.shape, dtype=mdescr) + fdmask.flat = tuple([False] * len(mdescr)) + return fdmask + + +class MaskedRecords(MaskedArray): + """ + + Attributes + ---------- + _data : recarray + Underlying data, as a record array. + _mask : boolean array + Mask of the records. A record is masked when all its fields are + masked. + _fieldmask : boolean recarray + Record array of booleans, setting the mask of each individual field + of each record. + _fill_value : record + Filling values for each field. + + """ + + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False, + mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, + copy=False, + **options): + + self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) + + mdtype = ma.make_mask_descr(self.dtype) + if mask is nomask or not np.size(mask): + if not keep_mask: + self._mask = tuple([False] * len(mdtype)) + else: + mask = np.array(mask, copy=copy) + if mask.shape != self.shape: + (nd, nm) = (self.size, mask.size) + if nm == 1: + mask = np.resize(mask, self.shape) + elif nm == nd: + mask = np.reshape(mask, self.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MAError(msg % (nd, nm)) + copy = True + if not keep_mask: + self.__setmask__(mask) + self._sharedmask = True + else: + if mask.dtype == mdtype: + _mask = mask + else: + _mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + self._mask = _mask + return self + + def __array_finalize__(self, obj): + # Make sure we have a _fieldmask by default + _mask = getattr(obj, '_mask', None) + if _mask is None: + objmask = getattr(obj, '_mask', nomask) + _dtype = ndarray.__getattribute__(self, 'dtype') + if objmask is nomask: + _mask = ma.make_mask_none(self.shape, dtype=_dtype) + else: + mdescr = ma.make_mask_descr(_dtype) + _mask = narray([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(recarray) + # Update some of the attributes + _dict = self.__dict__ + _dict.update(_mask=_mask) + self._update_from(obj) + if _dict['_baseclass'] == ndarray: + _dict['_baseclass'] = recarray + return + + @property + def _data(self): + """ + Returns the data as a recarray. + + """ + return ndarray.view(self, recarray) + + @property + def _fieldmask(self): + """ + Alias to mask. + + """ + return self._mask + + def __len__(self): + """ + Returns the length + + """ + # We have more than one record + if self.ndim: + return len(self._data) + # We have only one record: return the nb of fields + return len(self.dtype) + + def __getattribute__(self, attr): + try: + return object.__getattribute__(self, attr) + except AttributeError: + # attr must be a fieldname + pass + fielddict = ndarray.__getattribute__(self, 'dtype').fields + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + # So far, so good + _localdict = ndarray.__getattribute__(self, '__dict__') + _data = ndarray.view(self, _localdict['_baseclass']) + obj = _data.getfield(*res) + if obj.dtype.names is not None: + raise NotImplementedError("MaskedRecords is currently limited to" + "simple records.") + # Get some special attributes + # Reset the object's mask + hasmasked = False + _mask = _localdict.get('_mask', None) + if _mask is not None: + try: + _mask = _mask[attr] + except IndexError: + # Couldn't find a mask: use the default (nomask) + pass + tp_len = len(_mask.dtype) + hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any() + if (obj.shape or hasmasked): + obj = obj.view(MaskedArray) + obj._baseclass = ndarray + obj._isfield = True + obj._mask = _mask + # Reset the field values + _fill_value = _localdict.get('_fill_value', None) + if _fill_value is not None: + try: + obj._fill_value = _fill_value[attr] + except ValueError: + obj._fill_value = None + else: + obj = obj.item() + return obj + + def __setattr__(self, attr, val): + """ + Sets the attribute attr to the value val. + + """ + # Should we call __setmask__ first ? + if attr in ['mask', 'fieldmask']: + self.__setmask__(val) + return + # Create a shortcut (so that we don't have to call getattr all the time) + _localdict = object.__getattribute__(self, '__dict__') + # Check whether we're creating a new field + newattr = attr not in _localdict + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except Exception: + # Not a generic attribute: exit if it's not a valid field + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = ndarray.__getattribute__(self, '_optinfo') or {} + if not (attr in fielddict or attr in optinfo): + raise + else: + # Get the list of names + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + # Check the attribute + if attr not in fielddict: + return ret + if newattr: + # We just added this one or this setattr worked on an + # internal attribute. + try: + object.__delattr__(self, attr) + except Exception: + return ret + # Let's try to set the field + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + + if val is masked: + _fill_value = _localdict['_fill_value'] + if _fill_value is not None: + dval = _localdict['_fill_value'][attr] + else: + dval = val + mval = True + else: + dval = filled(val) + mval = getmaskarray(val) + obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) + _localdict['_mask'].__setitem__(attr, mval) + return obj + + def __getitem__(self, indx): + """ + Returns all the fields sharing the same fieldname base. + + The fieldname base is either `_data` or `_mask`. + + """ + _localdict = self.__dict__ + _mask = ndarray.__getattribute__(self, '_mask') + _data = ndarray.view(self, _localdict['_baseclass']) + # We want a field + if isinstance(indx, str): + # Make sure _sharedmask is True to propagate back to _fieldmask + # Don't use _set_mask, there are some copies being made that + # break propagation Don't force the mask to nomask, that wreaks + # easy masking + obj = _data[indx].view(MaskedArray) + obj._mask = _mask[indx] + obj._sharedmask = True + fval = _localdict['_fill_value'] + if fval is not None: + obj._fill_value = fval[indx] + # Force to masked if the mask is True + if not obj.ndim and obj._mask: + return masked + return obj + # We want some elements. + # First, the data. + obj = np.array(_data[indx], copy=False).view(mrecarray) + obj._mask = np.array(_mask[indx], copy=False).view(recarray) + return obj + + def __setitem__(self, indx, value): + """ + Sets the given record to value. + + """ + MaskedArray.__setitem__(self, indx, value) + if isinstance(indx, str): + self._mask[indx] = ma.getmaskarray(value) + + def __str__(self): + """ + Calculates the string representation. + + """ + if self.size > 1: + mstr = [f"({','.join([str(i) for i in s])})" + for s in zip(*[getattr(self, f) for f in self.dtype.names])] + return f"[{', '.join(mstr)}]" + else: + mstr = [f"{','.join([str(i) for i in s])}" + for s in zip([getattr(self, f) for f in self.dtype.names])] + return f"({', '.join(mstr)})" + + def __repr__(self): + """ + Calculates the repr representation. + + """ + _names = self.dtype.names + fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) + reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] + reprstr.insert(0, 'masked_records(') + reprstr.extend([fmt % (' fill_value', self.fill_value), + ' )']) + return str("\n".join(reprstr)) + + def view(self, dtype=None, type=None): + """ + Returns a view of the mrecarray. + + """ + # OK, basic copy-paste from MaskedArray.view. + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + # Here again. + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + # OK, there's the change + except TypeError: + dtype = np.dtype(dtype) + # we need to revert to MaskedArray, but keeping the possibility + # of subclasses (eg, TimeSeriesRecords), so we'll force a type + # set to the first parent + if dtype.fields is None: + basetype = self.__class__.__bases__[0] + output = self.__array__().view(dtype, basetype) + output._update_from(self) + else: + output = ndarray.view(self, dtype) + output._fill_value = None + else: + output = ndarray.view(self, dtype, type) + # Update the mask, just like in MaskedArray.view + if (getattr(output, '_mask', nomask) is not nomask): + mdtype = ma.make_mask_descr(output.dtype) + output._mask = self._mask.view(mdtype, ndarray) + output._mask.shape = output.shape + return output + + def harden_mask(self): + """ + Forces the mask to hard. + + """ + self._hardmask = True + + def soften_mask(self): + """ + Forces the mask to soft + + """ + self._hardmask = False + + def copy(self): + """ + Returns a copy of the masked record. + + """ + copied = self._data.copy().view(type(self)) + copied._mask = self._mask.copy() + return copied + + def tolist(self, fill_value=None): + """ + Return the data portion of the array as a list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to fill_value. If fill_value is None, + the corresponding entries in the output list will be ``None``. + + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = narray(self.filled().tolist(), dtype=object) + mask = narray(self._mask.tolist()) + result[mask] = None + return result.tolist() + + def __getstate__(self): + """Return the internal state of the masked array. + + This is for pickling. + + """ + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tobytes(), + self._mask.tobytes(), + self._fill_value, + ) + return state + + def __setstate__(self, state): + """ + Restore the internal state of the masked array. + + This is for pickling. ``state`` is typically the output of the + ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (ver, shp, typ, isf, raw, msk, flv) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr]) + self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) + self.fill_value = flv + + def __reduce__(self): + """ + Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mrreconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + + +def _mrreconstruct(subtype, baseclass, baseshape, basetype,): + """ + Build a new MaskedArray from the information stored in a pickle. + + """ + _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) + _mask = ndarray.__new__(ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + + +mrecarray = MaskedRecords + + +############################################################################### +# Constructors # +############################################################################### + + +def fromarrays(arraylist, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, + fill_value=None): + """ + Creates a mrecarray from a (flat) list of masked arrays. + + Parameters + ---------- + arraylist : sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None, integer}, optional + Number of records. If None, shape is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + datalist = [getdata(x) for x in arraylist] + masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] + _array = recfromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) + _array._mask.flat = list(zip(*masklist)) + if fill_value is not None: + _array.fill_value = fill_value + return _array + + +def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None, + fill_value=None, mask=nomask): + """ + Creates a MaskedRecords from a list of records. + + Parameters + ---------- + reclist : sequence + A list of records. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None,int}, optional + Number of records. If None, ``shape`` is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + mask : {nomask, sequence}, optional. + External mask to apply on the data. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + # Grab the initial _fieldmask, if needed: + _mask = getattr(reclist, '_mask', None) + # Get the list of records. + if isinstance(reclist, ndarray): + # Make sure we don't have some hidden mask + if isinstance(reclist, MaskedArray): + reclist = reclist.filled().view(ndarray) + # Grab the initial dtype, just in case + if dtype is None: + dtype = reclist.dtype + reclist = reclist.tolist() + mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, + aligned=aligned, byteorder=byteorder).view(mrecarray) + # Set the fill_value if needed + if fill_value is not None: + mrec.fill_value = fill_value + # Now, let's deal w/ the mask + if mask is not nomask: + mask = np.array(mask, copy=False) + maskrecordlength = len(mask.dtype) + if maskrecordlength: + mrec._mask.flat = mask + elif mask.ndim == 2: + mrec._mask.flat = [tuple(m) for m in mask] + else: + mrec.__setmask__(mask) + if _mask is not None: + mrec._mask[:] = _mask + return mrec + + +def _guessvartypes(arr): + """ + Tries to guess the dtypes of the str_ ndarray `arr`. + + Guesses by testing element-wise conversion. Returns a list of dtypes. + The array is first converted to ndarray. If the array is 2D, the test + is performed on the first line. An exception is raised if the file is + 3D or more. + + """ + vartypes = [] + arr = np.asarray(arr) + if arr.ndim == 2: + arr = arr[0] + elif arr.ndim > 2: + raise ValueError("The array should be 2D at most!") + # Start the conversion loop. + for f in arr: + try: + int(f) + except (ValueError, TypeError): + try: + float(f) + except (ValueError, TypeError): + try: + complex(f) + except (ValueError, TypeError): + vartypes.append(arr.dtype) + else: + vartypes.append(np.dtype(complex)) + else: + vartypes.append(np.dtype(float)) + else: + vartypes.append(np.dtype(int)) + return vartypes + + +def openfile(fname): + """ + Opens the file handle of file `fname`. + + """ + # A file handle + if hasattr(fname, 'readline'): + return fname + # Try to open the file and guess its type + try: + f = open(fname) + except IOError as e: + raise IOError(f"No such file: '{fname}'") from e + if f.readline()[:2] != "\\x": + f.seek(0, 0) + return f + f.close() + raise NotImplementedError("Wow, binary file") + + +def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', + varnames=None, vartypes=None): + """ + Creates a mrecarray from data stored in the file `filename`. + + Parameters + ---------- + fname : {file name/handle} + Handle of an opened file. + delimitor : {None, string}, optional + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + commentchar : {'#', string}, optional + Alphanumeric character used to mark the start of a comment. + missingchar : {'', string}, optional + String indicating missing data, and used to create the masks. + varnames : {None, sequence}, optional + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + vartypes : {None, sequence}, optional + Sequence of the variables dtypes. If None, it will be estimated from + the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + # Try to open the file. + ftext = openfile(fname) + + # Get the first non-empty line as the varnames + while True: + line = ftext.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimitor) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + + # Get the data. + _variables = masked_array([line.strip().split(delimitor) for line in ftext + if line[0] != commentchar and len(line) > 1]) + (_, nfields) = _variables.shape + ftext.close() + + # Try to guess the dtype. + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [np.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = "Attempting to %i dtypes for %i fields!" + msg += " Reverting to default." + warnings.warn(msg % (len(vartypes), nfields), stacklevel=2) + vartypes = _guessvartypes(_variables[0]) + + # Construct the descriptor. + mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] + mfillv = [ma.default_fill_value(f) for f in vartypes] + + # Get the data and the mask. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) + for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] + + return fromarrays(_datalist, dtype=mdescr) + + +def addfield(mrecord, newfield, newfieldname=None): + """Adds a new field to the masked record array + + Uses `newfield` as data and `newfieldname` as name. If `newfieldname` + is None, the new field name is set to 'fi', where `i` is the number of + existing fields. + + """ + _data = mrecord._data + _mask = mrecord._mask + if newfieldname is None or newfieldname in reserved_fields: + newfieldname = 'f%i' % len(_data.dtype) + newfield = ma.array(newfield) + # Get the new data. + # Create a new empty recarray + newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) + newdata = recarray(_data.shape, newdtype) + # Add the existing field + [newdata.setfield(_data.getfield(*f), *f) + for f in _data.dtype.fields.values()] + # Add the new field + newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) + newdata = newdata.view(MaskedRecords) + # Get the new mask + # Create a new empty recarray + newmdtype = np.dtype([(n, bool_) for n in newdtype.names]) + newmask = recarray(_data.shape, newmdtype) + # Add the old masks + [newmask.setfield(_mask.getfield(*f), *f) + for f in _mask.dtype.fields.values()] + # Add the mask of the new field + newmask.setfield(getmaskarray(newfield), + *newmask.dtype.fields[newfieldname]) + newdata._mask = newmask + return newdata diff --git a/MLPY/Lib/site-packages/numpy/ma/mrecords.pyi b/MLPY/Lib/site-packages/numpy/ma/mrecords.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5fdf3928218da9939f0090931d8b2665cb0bc6e2 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/mrecords.pyi @@ -0,0 +1,88 @@ +from typing import List, Any, TypeVar + +from numpy import dtype +from numpy.ma import MaskedArray + +__all__: List[str] + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) + +class MaskedRecords(MaskedArray[_ShapeType, _DType_co]): + def __new__( + cls, + shape, + dtype=..., + buf=..., + offset=..., + strides=..., + formats=..., + names=..., + titles=..., + byteorder=..., + aligned=..., + mask=..., + hard_mask=..., + fill_value=..., + keep_mask=..., + copy=..., + **options, + ): ... + _mask: Any + _fill_value: Any + @property + def _data(self): ... + @property + def _fieldmask(self): ... + def __array_finalize__(self, obj): ... + def __len__(self): ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def view(self, dtype=..., type=...): ... + def harden_mask(self): ... + def soften_mask(self): ... + def copy(self): ... + def tolist(self, fill_value=...): ... + def __reduce__(self): ... + +mrecarray = MaskedRecords + +def fromarrays( + arraylist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., +): ... + +def fromrecords( + reclist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., + mask=..., +): ... + +def fromtextfile( + fname, + delimitor=..., + commentchar=..., + missingchar=..., + varnames=..., + vartypes=..., +): ... + +def addfield(mrecord, newfield, newfieldname=...): ... diff --git a/MLPY/Lib/site-packages/numpy/ma/setup.py b/MLPY/Lib/site-packages/numpy/ma/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..9b7f75602e42e9bf3e3844e82f9e930eca14d20b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/setup.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('ma', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_files('*.pyi') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + config = configuration(top_path='').todict() + setup(**config) diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/__init__.py b/MLPY/Lib/site-packages/numpy/ma/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d0b78e16ebec344dce1e711f4882f4a25c1927f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_core.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_core.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17ac80136062036b97b4f035efbb1f749e8e7fe2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_core.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6756f9a5a370d8a5414faf59fbc2398a2606f026 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_deprecations.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4f66084c84533a3d98f372ffecad9a85e63f160 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_extras.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ab17ca0c05a7ee7a2b18e7c8382f08eaa4db1d8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_mrecords.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e138104cd8dcd66d247db02df2903f63d669e066 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_old_ma.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58fe97f68e0bb1af0e7e35c757dd4b36111a3aa8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a810d8be01a08e9826d8d8b5265d473eda6c0c5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/test_core.py b/MLPY/Lib/site-packages/numpy/ma/tests/test_core.py new file mode 100644 index 0000000000000000000000000000000000000000..61c9b420ae119db344ba70cd4026c5bbade3a6e7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/tests/test_core.py @@ -0,0 +1,5364 @@ +# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +""" +__author__ = "Pierre GF Gerard-Marchant" + +import sys +import warnings +import operator +import itertools +import textwrap +import pytest + +from functools import reduce + + +import numpy as np +import numpy.ma.core +import numpy.core.fromnumeric as fromnumeric +import numpy.core.umath as umath +from numpy.testing import ( + assert_raises, assert_warns, suppress_warnings + ) +from numpy import ndarray +from numpy.compat import asbytes +from numpy.ma.testutils import ( + assert_, assert_array_equal, assert_equal, assert_almost_equal, + assert_equal_records, fail_if_equal, assert_not_equal, + assert_mask_equal + ) +from numpy.ma.core import ( + MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, + allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2, + arcsin, arctan, argsort, array, asarray, choose, concatenate, + conjugate, cos, cosh, count, default_fill_value, diag, divide, doc_note, + empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid, + flatten_structured_array, fromflex, getmask, getmaskarray, greater, + greater_equal, identity, inner, isMaskedArray, less, less_equal, log, + log10, make_mask, make_mask_descr, mask_or, masked, masked_array, + masked_equal, masked_greater, masked_greater_equal, masked_inside, + masked_less, masked_less_equal, masked_not_equal, masked_outside, + masked_print_option, masked_values, masked_where, max, maximum, + maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, + mvoid, nomask, not_equal, ones, outer, power, product, put, putmask, + ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt, + subtract, sum, take, tan, tanh, transpose, where, zeros, + ) +from numpy.compat import pickle + +pi = np.pi + + +suppress_copy_mask_on_assignment = suppress_warnings() +suppress_copy_mask_on_assignment.filter( + numpy.ma.core.MaskedArrayFutureWarning, + "setting an item on a masked array which has a shared mask will not copy") + + +# For parametrized numeric testing +num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] +num_ids = [dt_.char for dt_ in num_dts] + + +class TestMaskedArray: + # Base test class for MaskedArrays. + + def setup(self): + # Base data definition. + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1e+20, x) + xm.set_fill_value(1e+20) + self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + + def test_basicattributes(self): + # Tests some basic array attributes. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a.ndim, 1) + assert_equal(b.ndim, 1) + assert_equal(a.size, 3) + assert_equal(b.size, 3) + assert_equal(a.shape, (3,)) + assert_equal(b.shape, (3,)) + + def test_basic0d(self): + # Checks masking a scalar + x = masked_array(0) + assert_equal(str(x), '0') + x = masked_array(0, mask=True) + assert_equal(str(x), str(masked_print_option)) + x = masked_array(0, mask=False) + assert_equal(str(x), '0') + x = array(0, mask=1) + assert_(x.filled().dtype is x._data.dtype) + + def test_basic1d(self): + # Test of basic array creation and properties in 1 dimension. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_((xm - ym).filled(0).any()) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) + s = x.shape + assert_equal(np.shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.dtype, x.dtype) + assert_equal(zm.dtype, z.dtype) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_array_equal(xm, xf) + assert_array_equal(filled(xm, 1.e20), xf) + assert_array_equal(x, xm) + + def test_basic2d(self): + # Test of basic array creation and properties in 2 dimensions. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm, xf) + assert_equal(filled(xm, 1.e20), xf) + assert_equal(x, xm) + + def test_concatenate_basic(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # basic concatenation + assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) + assert_equal(np.concatenate((x, y)), concatenate((x, y))) + assert_equal(np.concatenate((x, y)), concatenate((xm, y))) + assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) + + def test_concatenate_alongaxis(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # Concatenation along an axis + s = (3, 4) + x.shape = y.shape = xm.shape = ym.shape = s + assert_equal(xm.mask, np.reshape(m1, s)) + assert_equal(ym.mask, np.reshape(m2, s)) + xmym = concatenate((xm, ym), 1) + assert_equal(np.concatenate((x, y), 1), xmym) + assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) + + x = zeros(2) + y = array(ones(2), mask=[False, True]) + z = concatenate((x, y)) + assert_array_equal(z, [0, 0, 1, 1]) + assert_array_equal(z.mask, [False, False, False, True]) + z = concatenate((y, x)) + assert_array_equal(z, [1, 1, 0, 0]) + assert_array_equal(z.mask, [False, True, False, False]) + + def test_concatenate_flexible(self): + # Tests the concatenation on flexible arrays. + data = masked_array(list(zip(np.random.rand(10), + np.arange(10))), + dtype=[('a', float), ('b', int)]) + + test = concatenate([data[:5], data[5:]]) + assert_equal_records(test, data) + + def test_creation_ndmin(self): + # Check the use of ndmin + x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) + assert_equal(x.shape, (1, 3)) + assert_equal(x._data, [[1, 2, 3]]) + assert_equal(x._mask, [[1, 0, 0]]) + + def test_creation_ndmin_from_maskedarray(self): + # Make sure we're not losing the original mask w/ ndmin + x = array([1, 2, 3]) + x[-1] = masked + xx = array(x, ndmin=2, dtype=float) + assert_equal(x.shape, x._mask.shape) + assert_equal(xx.shape, xx._mask.shape) + + def test_creation_maskcreation(self): + # Tests how masks are initialized at the creation of Maskedarrays. + data = arange(24, dtype=float) + data[[3, 6, 15]] = masked + dma_1 = MaskedArray(data) + assert_equal(dma_1.mask, data.mask) + dma_2 = MaskedArray(dma_1) + assert_equal(dma_2.mask, dma_1.mask) + dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) + fail_if_equal(dma_3.mask, dma_1.mask) + + x = array([1, 2, 3], mask=True) + assert_equal(x._mask, [True, True, True]) + x = array([1, 2, 3], mask=False) + assert_equal(x._mask, [False, False, False]) + y = array([1, 2, 3], mask=x._mask, copy=False) + assert_(np.may_share_memory(x.mask, y.mask)) + y = array([1, 2, 3], mask=x._mask, copy=True) + assert_(not np.may_share_memory(x.mask, y.mask)) + + def test_masked_singleton_array_creation_warns(self): + # The first works, but should not (ideally), there may be no way + # to solve this, however, as long as `np.ma.masked` is an ndarray. + np.array(np.ma.masked) + with pytest.warns(UserWarning): + # Tries to create a float array, using `float(np.ma.masked)`. + # We may want to define this is invalid behaviour in the future! + # (requiring np.ma.masked to be a known NumPy scalar probably + # with a DType.) + np.array([3., np.ma.masked]) + + def test_creation_with_list_of_maskedarrays(self): + # Tests creating a masked array from a list of masked arrays. + x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) + + x.mask = nomask + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_(data.mask is nomask) + + def test_creation_with_list_of_maskedarrays_no_bool_cast(self): + # Tests the regression in gh-18551 + masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) + normal_int = np.arange(2) + res = np.ma.asarray([masked_str, normal_int], dtype="U21") + assert_array_equal(res.mask, [[True, False], [False, False]]) + + # The above only failed due a long chain of oddity, try also with + # an object array that cannot be converted to bool always: + class NotBool(): + def __bool__(self): + raise ValueError("not a bool!") + masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) + # Check that the NotBool actually fails like we would expect: + with pytest.raises(ValueError, match="not a bool!"): + np.asarray([masked_obj], dtype=bool) + + res = np.ma.asarray([masked_obj, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + + def test_creation_from_ndarray_with_padding(self): + x = np.array([('A', 0)], dtype={'names':['f0','f1'], + 'formats':['S4','i8'], + 'offsets':[0,8]}) + array(x) # used to fail due to 'V' padding field in x.dtype.descr + + def test_unknown_keyword_parameter(self): + with pytest.raises(TypeError, match="unexpected keyword argument"): + MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. + + def test_asarray(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm.fill_value = -9999 + xm._hardmask = True + xmm = asarray(xm) + assert_equal(xmm._data, xm._data) + assert_equal(xmm._mask, xm._mask) + assert_equal(xmm.fill_value, xm.fill_value) + assert_equal(xmm._hardmask, xm._hardmask) + + def test_asarray_default_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m) + assert_(new_m.flags.c_contiguous) + + def test_asarray_enforce_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m, order='C') + assert_(new_m.flags.c_contiguous) + + def test_fix_invalid(self): + # Checks fix_invalid. + with np.errstate(invalid='ignore'): + data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) + data_fixed = fix_invalid(data) + assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) + assert_equal(data_fixed._mask, [1., 0., 1.]) + + def test_maskedelement(self): + # Test of masked element + x = arange(6) + x[1] = masked + assert_(str(masked) == '--') + assert_(x[1] is masked) + assert_equal(filled(x[1], 0), 0) + + def test_set_element_as_object(self): + # Tests setting elements with object + a = empty(1, dtype=object) + x = (1, 2, 3, 4, 5) + a[0] = x + assert_equal(a[0], x) + assert_(a[0] is x) + + import datetime + dt = datetime.datetime.now() + a[0] = dt + assert_(a[0] is dt) + + def test_indexing(self): + # Tests conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_equal(np.sort(x1), sort(x2, endwith=False)) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_equal(x1[2], x2[2]) + assert_equal(x1[2:5], x2[2:5]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[2] = 9 + x2[2] = 9 + assert_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + assert_equal(x1, x2) + x2[1] = masked + assert_equal(x1, x2) + x2[1:3] = masked + assert_equal(x1, x2) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_equal(3.0, x2.fill_value) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + @suppress_copy_mask_on_assignment + def test_copy(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_equal(y1._data.__array_interface__, x1.__array_interface__) + assert_(allequal(x1, y1.data)) + assert_equal(y1._mask.__array_interface__, m.__array_interface__) + + y1a = array(y1) + # Default for masked array is not to copy; see gh-10318. + assert_(y1a._data.__array_interface__ == + y1._data.__array_interface__) + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3) + assert_(y2._data.__array_interface__ == x1.__array_interface__) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._data.__array_interface__ != x1.__array_interface__) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_equal(concatenate([x4, x4]), y4) + assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = repeat(x4, 2, axis=0) + assert_equal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert_equal(y5, y7) + y8 = x4.repeat(2, 0) + assert_equal(y5, y8) + + y9 = x4.copy() + assert_equal(y9._data, x4._data) + assert_equal(y9._mask, x4._mask) + + x = masked_array([1, 2, 3], mask=[0, 1, 0]) + # Copy is False by default + y = masked_array(x) + assert_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) + y = masked_array(x, copy=True) + assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + + def test_copy_0d(self): + # gh-9430 + x = np.ma.array(43, mask=True) + xc = x.copy() + assert_equal(xc.mask, True) + + def test_copy_on_python_builtins(self): + # Tests copy works on python builtins (issue#8019) + assert_(isMaskedArray(np.ma.copy([1,2,3]))) + assert_(isMaskedArray(np.ma.copy((1,2,3)))) + + def test_copy_immutable(self): + # Tests that the copy method is immutable, GitHub issue #5247 + a = np.ma.array([1, 2, 3]) + b = np.ma.array([4, 5, 6]) + a_copy_method = a.copy + b.copy + assert_equal(a_copy_method(), [1, 2, 3]) + + def test_deepcopy(self): + from copy import deepcopy + a = array([0, 1, 2], mask=[False, True, False]) + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + assert_not_equal(id(a._mask), id(copied._mask)) + + copied[1] = 1 + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + copied.mask[1] = False + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + def test_format(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(format(a), "[0 -- 2]") + assert_equal(format(masked), "--") + assert_equal(format(masked, ""), "--") + + # Postponed from PR #15410, perhaps address in the future. + # assert_equal(format(masked, " >5"), " --") + # assert_equal(format(masked, " <5"), "-- ") + + # Expect a FutureWarning for using format_spec with MaskedElement + with assert_warns(FutureWarning): + with_format_string = format(masked, " >5") + assert_equal(with_format_string, "--") + + def test_str_repr(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999)''') + ) + + # arrays with a continuation + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, --, ..., 1997, 1998, 1999], + mask=[False, True, True, ..., False, False, False], + fill_value=999999)''') + ) + + # line-wrapped 1d arrays are correctly aligned + a = np.ma.arange(20) + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19], + mask=False, + fill_value=999999)''') + ) + + # 2d arrays cause wrapping + a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) + a[1,1] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999, + dtype=int8)''') + ) + + # but not it they're a row vector + assert_equal( + repr(a[:1]), + textwrap.dedent('''\ + masked_array(data=[[1, 2, 3]], + mask=[[False, False, False]], + fill_value=999999, + dtype=int8)''') + ) + + # dtype=int is implied, so not shown + assert_equal( + repr(a.astype(int)), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999)''') + ) + + def test_str_repr_legacy(self): + oldopts = np.get_printoptions() + np.set_printoptions(legacy='1.13') + try: + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' + ' mask = [False True False],\n' + ' fill_value = 999999)\n') + + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' + ' mask = [False True True ..., False False False],\n' + ' fill_value = 999999)\n' + ) + finally: + np.set_printoptions(**oldopts) + + def test_0d_unicode(self): + u = u'caf\xe9' + utype = type(u) + + arr_nomask = np.ma.array(u) + arr_masked = np.ma.array(u, mask=True) + + assert_equal(utype(arr_nomask), u) + assert_equal(utype(arr_masked), u'--') + + def test_pickling(self): + # Tests pickling + for dtype in (int, float, str, object): + a = arange(10).astype(dtype) + a.fill_value = 999 + + masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked + True, # Fully masked + False) # Fully unmasked + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for mask in masks: + a.mask = mask + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled._data, a._data) + if dtype in (object, int): + assert_equal(a_pickled.fill_value, 999) + else: + assert_equal(a_pickled.fill_value, dtype(999)) + assert_array_equal(a_pickled.mask, mask) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + x = np.array([(1.0, 2), (3.0, 4)], + dtype=[('x', float), ('y', int)]).view(np.recarray) + a = masked_array(x, mask=[(True, False), (False, True)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.recarray)) + + def test_pickling_maskedconstant(self): + # Test pickling MaskedConstant + mc = np.ma.masked + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto)) + assert_equal(mc_pickled._baseclass, mc._baseclass) + assert_equal(mc_pickled._mask, mc._mask) + assert_equal(mc_pickled._data, mc._data) + + def test_pickling_wstructured(self): + # Tests pickling w/ structured array + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + + def test_pickling_keepalignment(self): + # Tests pickling w/ F_CONTIGUOUS arrays + a = arange(10) + a.shape = (-1, 2) + b = a.T + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + test = pickle.loads(pickle.dumps(b, protocol=proto)) + assert_equal(test, b) + + def test_single_element_subscript(self): + # Tests single element subscripts of Maskedarrays. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_topython(self): + # Tests some communication issues with Python. + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + + with suppress_warnings() as sup: + sup.filter(UserWarning, 'Warning: converting a masked element') + assert_(np.isnan(float(array([1], mask=[1])))) + + a = array([1, 2, 3], mask=[1, 0, 0]) + assert_raises(TypeError, lambda: float(a)) + assert_equal(float(a[-1]), 3.) + assert_(np.isnan(float(a[0]))) + assert_raises(TypeError, int, a) + assert_equal(int(a[-1]), 3) + assert_raises(MAError, lambda:int(a[0])) + + def test_oddfeatures_1(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_equal(z.real, x) + assert_equal(z.imag, 10 * x) + assert_equal((z * conjugate(z)).real, 101 * x * x) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_equal(x, z) + + def test_oddfeatures_2(self): + # Tests some more features. + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + @suppress_copy_mask_on_assignment + def test_oddfeatures_3(self): + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) + + def test_filled_with_object_dtype(self): + a = np.ma.masked_all(1, dtype='O') + assert_equal(a.filled('x')[0], 'x') + + def test_filled_with_flexible_dtype(self): + # Test filled w/ flexible dtype + flexi = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + flexi[0] = masked + assert_equal(flexi.filled(), + np.array([(default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),)], dtype=flexi.dtype)) + flexi[0] = masked + assert_equal(flexi.filled(1), + np.array([(1, '1', 1.)], dtype=flexi.dtype)) + + def test_filled_with_mvoid(self): + # Test filled w/ mvoid + ndtype = [('a', int), ('b', float)] + a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) + # Filled using default + test = a.filled() + assert_equal(tuple(test), (1, default_fill_value(1.))) + # Explicit fill_value + test = a.filled((-1, -1)) + assert_equal(tuple(test), (1, -1)) + # Using predefined filling values + a.fill_value = (-999, -999) + assert_equal(tuple(a.filled()), (1, -999)) + + def test_filled_with_nested_dtype(self): + # Test filled w/ nested dtype + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + # test if mask gets set correctly (see #6760) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))])) + assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), + ('f1', 'i1', (2, 2))], (2, 2))])) + assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), + ('f1', '?', (2, 2))], (2, 2))])) + + def test_filled_with_f_order(self): + # Test filled w/ F-contiguous array + a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), + mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), + order='F') # this is currently ignored + assert_(a.flags['F_CONTIGUOUS']) + assert_(a.filled(0).flags['F_CONTIGUOUS']) + + def test_optinfo_propagation(self): + # Checks that _optinfo dictionary isn't back-propagated + x = array([1, 2, 3, ], dtype=float) + x._optinfo['info'] = '???' + y = x.copy() + assert_equal(y._optinfo['info'], '???') + y._optinfo['info'] = '!!!' + assert_equal(x._optinfo['info'], '???') + + def test_optinfo_forward_propagation(self): + a = array([1,2,2,4]) + a._optinfo["key"] = "value" + assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) + + def test_fancy_printoptions(self): + # Test printing a masked array w/ fancy dtype. + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + # Test 0-d array with multi-dimensional dtype + t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask = (False, [[True, False, True], + [False, False, True]], + False), + dtype = "int, (2,3)float, float") + control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" + assert_equal(str(t_2d0), control) + + def test_flatten_structured_array(self): + # Test flatten_structured_array on arrays + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + def test_void0d(self): + # Test creating a mvoid object + ndtype = [('a', int), ('b', int)] + a = np.array([(1, 2,)], dtype=ndtype)[0] + f = mvoid(a) + assert_(isinstance(f, mvoid)) + + a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] + assert_(isinstance(a, mvoid)) + + a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + f = mvoid(a._data[0], a._mask[0]) + assert_(isinstance(f, mvoid)) + + def test_mvoid_getitem(self): + # Test mvoid.__getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + f = a[0] + assert_(isinstance(f, mvoid)) + assert_equal((f[0], f['a']), (1, 1)) + assert_equal(f['b'], 2) + # w/ mask + f = a[1] + assert_(isinstance(f, mvoid)) + assert_(f[0] is masked) + assert_(f['a'] is masked) + assert_equal(f[1], 4) + + # exotic dtype + A = masked_array(data=[([0,1],)], + mask=[([True, False],)], + dtype=[("A", ">i2", (2,))]) + assert_equal(A[0]["A"], A["A"][0]) + assert_equal(A[0]["A"], masked_array(data=[0, 1], + mask=[True, False], dtype=">i2")) + + def test_mvoid_iter(self): + # Test iteration on __getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + assert_equal(list(a[0]), [1, 2]) + # w/ mask + assert_equal(list(a[1]), [masked, 4]) + + def test_mvoid_print(self): + # Test printing a mvoid + mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) + assert_equal(str(mx[0]), "(1, 1)") + mx['b'][0] = masked + ini_display = masked_print_option._display + masked_print_option.set_display("-X-") + try: + assert_equal(str(mx[0]), "(1, -X-)") + assert_equal(repr(mx[0]), "(1, -X-)") + finally: + masked_print_option.set_display(ini_display) + + # also check if there are object datatypes (see gh-7493) + mx = array([(1,), (2,)], dtype=[('a', 'O')]) + assert_equal(str(mx[0]), "(1,)") + + def test_mvoid_multidim_print(self): + + # regression test for gh-6019 + t_ma = masked_array(data = [([1, 2, 3],)], + mask = [([False, True, False],)], + fill_value = ([999999, 999999, 999999],), + dtype = [('a', ' 1: + assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) + assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) + assert_equal(np.sum(x, 1), sum(x, 1)) + assert_equal(np.product(x, 1), product(x, 1)) + + def test_binops_d2D(self): + # Test binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_domained_binops_d2D(self): + # Test domained binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a / b + control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_noshrinking(self): + # Check that we don't shrink a mask when not wanted + # Binary operations + a = masked_array([1., 2., 3.], mask=[False, False, False], + shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_ufunc_nomask(self): + # check the case ufuncs should set the mask to false + m = np.ma.array([1]) + # check we don't get array([False], dtype=bool) + assert_equal(np.true_divide(m, 5).mask.shape, ()) + + def test_noshink_on_creation(self): + # Check that the mask is not shrunk on array creation when not wanted + a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): + # Tests mod + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + + def test_TakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) + assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) + assert_equal(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y)) + assert_equal(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y)) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_imag_real(self): + # Check complex + xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) + assert_equal(xx.imag, [10, 2]) + assert_equal(xx.imag.filled(), [1e+20, 2]) + assert_equal(xx.imag.dtype, xx._data.imag.dtype) + assert_equal(xx.real, [1, 20]) + assert_equal(xx.real.filled(), [1e+20, 20]) + assert_equal(xx.real.dtype, xx._data.real.dtype) + + def test_methods_with_output(self): + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) + + for funcname in funclist: + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + # A ndarray as explicit input + output = np.empty(4, dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty(4, dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + assert_(output[0] is masked) + + def test_eq_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] == a) + assert_equal(test.data, [[True, False], [False, False]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_ne_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] != a) + assert_equal(test.data, [[False, True], [True, True]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_eq_ne_structured_extra(self): + # ensure simple examples are symmetric and make sense. + # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 + dt = np.dtype('i4,i4') + for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt), + mvoid((1, 2), mask=(0, 1), dtype=dt), + mvoid((1, 2), mask=(1, 0), dtype=dt), + mvoid((1, 2), mask=(1, 1), dtype=dt)): + ma1 = m1.view(MaskedArray) + r1 = ma1.view('2i4') + for m2 in (np.array((1, 1), dtype=dt), + mvoid((1, 1), dtype=dt), + mvoid((1, 0), mask=(0, 1), dtype=dt), + mvoid((3, 2), mask=(0, 1), dtype=dt)): + ma2 = m2.view(MaskedArray) + r2 = ma2.view('2i4') + eq_expected = (r1 == r2).all() + assert_equal(m1 == m2, eq_expected) + assert_equal(m2 == m1, eq_expected) + assert_equal(ma1 == m2, eq_expected) + assert_equal(m1 == ma2, eq_expected) + assert_equal(ma1 == ma2, eq_expected) + # Also check it is the same if we do it element by element. + el_by_el = [m1[name] == m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).all(), eq_expected) + ne_expected = (r1 != r2).any() + assert_equal(m1 != m2, ne_expected) + assert_equal(m2 != m1, ne_expected) + assert_equal(ma1 != m2, ne_expected) + assert_equal(m1 != ma2, ne_expected) + assert_equal(ma1 != ma2, ne_expected) + el_by_el = [m1[name] != m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_eq_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_ne_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_eq_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_ne_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + def test_eq_with_None(self): + # Really, comparisons with None should not be done, but check them + # anyway. Note that pep8 will flag these tests. + # Deprecation is in place for arrays, and when it happens this + # test will fail (and have to be changed accordingly). + + # With partial mask + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Comparison to `None`") + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) + assert_equal(a.data == None, [True, False]) + assert_equal(a != None, array([False, True], mask=[0, 1])) + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) + assert_equal(a != None, [False, True]) + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) + assert_equal(a != None, array([True, False], mask=True)) + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) + + def test_eq_with_scalar(self): + a = array(1) + assert_equal(a == 1, True) + assert_equal(a == 0, False) + assert_equal(a != 1, False) + assert_equal(a != 0, True) + b = array(1, mask=True) + assert_equal(b == 0, masked) + assert_equal(b == 1, masked) + assert_equal(b != 0, masked) + assert_equal(b != 1, masked) + + def test_eq_different_dimensions(self): + m1 = array([1, 1], mask=[0, 1]) + # test comparison with both masked and regular arrays. + for m2 in (array([[0, 1], [1, 2]]), + np.array([[0, 1], [1, 2]])): + test = (m1 == m2) + assert_equal(test.data, [[False, False], + [True, False]]) + assert_equal(test.mask, [[False, True], + [False, True]]) + + def test_numpyarithmetics(self): + # Check that the mask is not back-propagated when using numpy functions + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + +class TestMaskedArrayAttributes: + + def test_keepmask(self): + # Tests the keep mask flag + x = masked_array([1, 2, 3], mask=[1, 0, 0]) + mx = masked_array(x) + assert_equal(mx.mask, x.mask) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) + assert_equal(mx.mask, [0, 1, 0]) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) + assert_equal(mx.mask, [1, 1, 0]) + # We default to true + mx = masked_array(x, mask=[0, 1, 0]) + assert_equal(mx.mask, [1, 1, 0]) + + def test_hardmask(self): + # Test hard_mask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + # We need to copy, to avoid updating d in xh ! + xs = array(d, mask=m, hard_mask=False, copy=True) + xh[[1, 4]] = [10, 40] + xs[[1, 4]] = [10, 40] + assert_equal(xh._data, [0, 10, 2, 3, 4]) + assert_equal(xs._data, [0, 10, 2, 3, 40]) + assert_equal(xs.mask, [0, 0, 0, 1, 0]) + assert_(xh._hardmask) + assert_(not xs._hardmask) + xh[1:4] = [10, 20, 30] + xs[1:4] = [10, 20, 30] + assert_equal(xh._data, [0, 10, 20, 3, 4]) + assert_equal(xs._data, [0, 10, 20, 30, 40]) + assert_equal(xs.mask, nomask) + xh[0] = masked + xs[0] = masked + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, [1, 0, 0, 0, 0]) + xh[:] = 1 + xs[:] = 1 + assert_equal(xh._data, [0, 1, 1, 3, 4]) + assert_equal(xs._data, [1, 1, 1, 1, 1]) + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, nomask) + # Switch to soft mask + xh.soften_mask() + xh[:] = arange(5) + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh.mask, nomask) + # Switch back to hard mask + xh.harden_mask() + xh[xh < 3] = masked + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + xh[filled(xh > 1, False)] = 5 + assert_equal(xh._data, [0, 1, 2, 5, 5]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + + xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) + xh[0] = 0 + assert_equal(xh._data, [[1, 0], [3, 4]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[-1, -1] = 5 + assert_equal(xh._data, [[1, 0], [3, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[filled(xh < 5, False)] = 2 + assert_equal(xh._data, [[1, 2], [2, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + + def test_hardmask_again(self): + # Another test of hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + xh[4:5] = 999 + xh[0:1] = 999 + assert_equal(xh._data, [999, 1, 2, 3, 4]) + + def test_hardmask_oncemore_yay(self): + # OK, yet another test of hardmask + # Make sure that harden_mask/soften_mask//unshare_mask returns self + a = array([1, 2, 3], mask=[1, 0, 0]) + b = a.harden_mask() + assert_equal(a, b) + b[0] = 0 + assert_equal(a, b) + assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) + a = b.soften_mask() + a[0] = 0 + assert_equal(a, b) + assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) + + def test_smallmask(self): + # Checks the behaviour of _smallmask + a = arange(10) + a[1] = masked + a[1] = 1 + assert_equal(a._mask, nomask) + a = arange(10) + a._smallmask = False + a[1] = masked + a[1] = 1 + assert_equal(a._mask, zeros(10)) + + def test_shrink_mask(self): + # Tests .shrink_mask() + a = array([1, 2, 3], mask=[0, 0, 0]) + b = a.shrink_mask() + assert_equal(a, b) + assert_equal(a.mask, nomask) + + # Mask cannot be shrunk on structured types, so is a no-op + a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) + b = a.copy() + a.shrink_mask() + assert_equal(a.mask, b.mask) + + def test_flat(self): + # Test that flat can return all types of items [#4585, #4615] + # test 2-D record array + # ... on structured array w/ masked records + x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], + [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x['a'][0, 1] = masked + x['b'][1, 0] = masked + x['c'][0, 2] = masked + x[-1, -1] = masked + xflat = x.flat + assert_equal(xflat[0], x[0, 0]) + assert_equal(xflat[1], x[0, 1]) + assert_equal(xflat[2], x[0, 2]) + assert_equal(xflat[:3], x[0]) + assert_equal(xflat[3], x[1, 0]) + assert_equal(xflat[4], x[1, 1]) + assert_equal(xflat[5], x[1, 2]) + assert_equal(xflat[3:], x[1]) + assert_equal(xflat[-1], x[-1, -1]) + i = 0 + j = 0 + for xf in xflat: + assert_equal(xf, x[j, i]) + i += 1 + if i >= x.shape[-1]: + i = 0 + j += 1 + + def test_assign_dtype(self): + # check that the mask's dtype is updated when dtype is changed + a = np.zeros(4, dtype='f4,i4') + + m = np.ma.array(a) + m.dtype = np.dtype('f4') + repr(m) # raises? + assert_equal(m.dtype, np.dtype('f4')) + + # check that dtype changes that change shape of mask too much + # are not allowed + def assign(): + m = np.ma.array(a) + m.dtype = np.dtype('f8') + assert_raises(ValueError, assign) + + b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? + assert_equal(b.dtype, np.dtype('f4')) + + # check that nomask is preserved + a = np.zeros(4, dtype='f4') + m = np.ma.array(a) + m.dtype = np.dtype('f4,i4') + assert_equal(m.dtype, np.dtype('f4,i4')) + assert_equal(m._mask, np.ma.nomask) + + +class TestFillingValues: + + def test_check_on_scalar(self): + # Test _check_fill_value set to valid and invalid values + _check_fill_value = np.ma.core._check_fill_value + + fval = _check_fill_value(0, int) + assert_equal(fval, 0) + fval = _check_fill_value(None, int) + assert_equal(fval, default_fill_value(0)) + + fval = _check_fill_value(0, "|S3") + assert_equal(fval, b"0") + fval = _check_fill_value(None, "|S3") + assert_equal(fval, default_fill_value(b"camelot!")) + assert_raises(TypeError, _check_fill_value, 1e+20, int) + assert_raises(TypeError, _check_fill_value, 'stuff', int) + + def test_check_on_fields(self): + # Tests _check_fill_value with records + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('a', int), ('b', float), ('c', "|S3")] + # A check on a list should return a single record + fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # A check on None should output the defaults + fval = _check_fill_value(None, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [default_fill_value(0), + default_fill_value(0.), + asbytes(default_fill_value("0"))]) + #.....Using a structured type as fill_value should work + fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using a flexible type w/ a different type shouldn't matter + # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured + # types by position + fill_val = np.array((-999, -12345678.9, "???"), + dtype=[("A", int), ("B", float), ("C", "|S3")]) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using an object-array shouldn't matter either + fill_val = np.ndarray(shape=(1,), dtype=object) + fill_val[0] = (-999, -12345678.9, b"???") + fval = _check_fill_value(fill_val, object) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # NOTE: This test was never run properly as "fill_value" rather than + # "fill_val" was assigned. Written properly, it fails. + #fill_val = np.array((-999, -12345678.9, "???")) + #fval = _check_fill_value(fill_val, ndtype) + #assert_(isinstance(fval, ndarray)) + #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + #.....One-field-only flexible type should work as well + ndtype = [("a", int)] + fval = _check_fill_value(-999999999, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), (-999999999,)) + + def test_fillvalue_conversion(self): + # Tests the behavior of fill_value during conversion + # We had a tailored comment to make sure special attributes are + # properly dealt with + a = array([b'3', b'4', b'5']) + a._optinfo.update({'comment':"updated!"}) + + b = array(a, dtype=int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + + b = array(a, dtype=float) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0.)) + + b = a.astype(int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + assert_equal(b._optinfo['comment'], "updated!") + + b = a.astype([('a', '|S3')]) + assert_equal(b['a']._data, a._data) + assert_equal(b['a'].fill_value, a.fill_value) + + def test_default_fill_value(self): + # check all calling conventions + f1 = default_fill_value(1.) + f2 = default_fill_value(np.array(1.)) + f3 = default_fill_value(np.array(1.).dtype) + assert_equal(f1, f2) + assert_equal(f1, f3) + + def test_default_fill_value_structured(self): + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + + f1 = default_fill_value(fields) + f2 = default_fill_value(fields.dtype) + expected = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.)), dtype=fields.dtype) + assert_equal(f1, expected) + assert_equal(f2, expected) + + def test_default_fill_value_void(self): + dt = np.dtype([('v', 'V7')]) + f = default_fill_value(dt) + assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) + + def test_fillvalue(self): + # Yet more fun with the fill_value + data = masked_array([1, 2, 3], fill_value=-999) + series = data[[0, 2, 1]] + assert_equal(series._fill_value, data._fill_value) + + mtype = [('f', float), ('s', '|S3')] + x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) + x.fill_value = 999 + assert_equal(x.fill_value.item(), [999., b'999']) + assert_equal(x['f'].fill_value, 999) + assert_equal(x['s'].fill_value, b'999') + + x.fill_value = (9, '???') + assert_equal(x.fill_value.item(), (9, b'???')) + assert_equal(x['f'].fill_value, 9) + assert_equal(x['s'].fill_value, b'???') + + x = array([1, 2, 3.1]) + x.fill_value = 999 + assert_equal(np.asarray(x.fill_value).dtype, float) + assert_equal(x.fill_value, 999.) + assert_equal(x._fill_value, np.array(999.)) + + def test_subarray_fillvalue(self): + # gh-10483 test multi-field index fill value + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Numpy has detected") + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] + + def test_fillvalue_exotic_dtype(self): + # Tests yet more exotic flexible dtypes + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('i', int), ('s', '|S8'), ('f', float)] + control = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),), + dtype=ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + # The shape shouldn't matter + ndtype = [('f0', float, (2, 2))] + control = np.array((default_fill_value(0.),), + dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + + ndtype = np.dtype("int, (2,3)float, float") + control = np.array((default_fill_value(0), + default_fill_value(0.), + default_fill_value(0.),), + dtype="int, float, float").astype(ndtype) + test = _check_fill_value(None, ndtype) + assert_equal(test, control) + control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + # but when indexing, fill value should become scalar not tuple + # See issue #6723 + M = masked_array(control) + assert_equal(M["f1"].fill_value.ndim, 0) + + def test_fillvalue_datetime_timedelta(self): + # Test default fillvalue for datetime64 and timedelta64 types. + # See issue #4476, this would return '?' which would cause errors + # elsewhere + + for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", + "h", "D", "W", "M", "Y"): + control = numpy.datetime64("NaT", timecode) + test = default_fill_value(numpy.dtype(" 0 + + # test different unary domains + sqrt(m) + log(m) + tan(m) + arcsin(m) + arccos(m) + arccosh(m) + + # test binary domains + divide(m, 2) + + # also check that allclose uses ma ufuncs, to avoid warning + allclose(m, 0.5) + +class TestMaskedArrayInPlaceArithmetics: + # Test MaskedArray Arithmetics + + def setup(self): + x = arange(10) + y = arange(10) + xm = arange(10) + xm[2] = masked + self.intdata = (x, y, xm) + self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) + self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + self.othertypes = [np.dtype(_).type for _ in self.othertypes] + self.uint8data = ( + x.astype(np.uint8), + y.astype(np.uint8), + xm.astype(np.uint8) + ) + + def test_inplace_addition_scalar(self): + # Test of inplace additions + (x, y, xm) = self.intdata + xm[2] = masked + x += 1 + assert_equal(x, y + 1) + xm += 1 + assert_equal(xm, y + 1) + + (x, _, xm) = self.floatdata + id1 = x.data.ctypes.data + x += 1. + assert_(id1 == x.data.ctypes.data) + assert_equal(x, y + 1.) + + def test_inplace_addition_array(self): + # Test of inplace additions + (x, y, xm) = self.intdata + m = xm.mask + a = arange(10, dtype=np.int16) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar(self): + # Test of inplace subtractions + (x, y, xm) = self.intdata + x -= 1 + assert_equal(x, y - 1) + xm -= 1 + assert_equal(xm, y - 1) + + def test_inplace_subtraction_array(self): + # Test of inplace subtractions + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + x *= 2.0 + assert_equal(x, y * 2) + xm *= 2.0 + assert_equal(xm, y * 2) + + def test_inplace_multiplication_array(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_division_scalar_int(self): + # Test of inplace division + (x, y, xm) = self.intdata + x = arange(10) * 2 + xm = arange(10) * 2 + xm[2] = masked + x //= 2 + assert_equal(x, y) + xm //= 2 + assert_equal(xm, y) + + def test_inplace_division_scalar_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + x /= 2.0 + assert_equal(x, y / 2.0) + xm /= arange(10) + assert_equal(xm, ones((10,))) + + def test_inplace_division_array_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x /= a + xm /= a + assert_equal(x, y / a) + assert_equal(xm, y / a) + assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) + + def test_inplace_division_misc(self): + + x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] + y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + + z = xm / ym + assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + xm = xm.copy() + xm /= ym + assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + def test_datafriendly_add(self): + # Test keeping data w/ (inplace) addition + x = array([1, 2, 3], mask=[0, 0, 1]) + # Test add w/ scalar + xx = x + 1 + assert_equal(xx.data, [2, 3, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test iadd w/ scalar + x += 1 + assert_equal(x.data, [2, 3, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test add w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x + array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 4, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test iadd w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x += array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 4, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_sub(self): + # Test keeping data w/ (inplace) subtraction + # Test sub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - 1 + assert_equal(xx.data, [0, 1, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test isub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= 1 + assert_equal(x.data, [0, 1, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test sub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 0, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test isub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 0, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_mul(self): + # Test keeping data w/ (inplace) multiplication + # Test mul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * 2 + assert_equal(xx.data, [2, 4, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test imul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= 2 + assert_equal(x.data, [2, 4, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test mul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 40, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test imul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(x.data, [1, 40, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_div(self): + # Test keeping data w/ (inplace) division + # Test div on scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x / 2. + assert_equal(xx.data, [1 / 2., 2 / 2., 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test idiv on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= 2. + assert_equal(x.data, [1 / 2., 2 / 2., 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test div on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x / array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(xx.data, [1., 2. / 20., 3.]) + assert_equal(xx.mask, [1, 0, 1]) + # Test idiv on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(x.data, [1., 2 / 20., 3.]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_pow(self): + # Test keeping data w/ (inplace) power + # Test pow on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x ** 2.5 + assert_equal(xx.data, [1., 2. ** 2.5, 3.]) + assert_equal(xx.mask, [0, 0, 1]) + # Test ipow on scalar + x **= 2.5 + assert_equal(x.data, [1., 2. ** 2.5, 3]) + assert_equal(x.mask, [0, 0, 1]) + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_inplace_addition_scalar_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + xm[2] = masked + x += t(1) + assert_equal(x, y + t(1)) + xm += t(1) + assert_equal(xm, y + t(1)) + + assert_equal(len(w), 0, f'Failed on type={t}.') + + def test_inplace_addition_array_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + assert_equal(len(w), 0, f'Failed on type={t}.') + + def test_inplace_subtraction_scalar_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x -= t(1) + assert_equal(x, y - t(1)) + xm -= t(1) + assert_equal(xm, y - t(1)) + + assert_equal(len(w), 0, f'Failed on type={t}.') + + def test_inplace_subtraction_array_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + assert_equal(len(w), 0, f'Failed on type={t}.') + + def test_inplace_multiplication_scalar_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x *= t(2) + assert_equal(x, y * t(2)) + xm *= t(2) + assert_equal(xm, y * t(2)) + + assert_equal(len(w), 0, f'Failed on type={t}.') + + def test_inplace_multiplication_array_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + assert_equal(len(w), 0, f'Failed on type={t}.') + + def test_inplace_floor_division_scalar_type(self): + # Test of inplace division + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + x //= t(2) + xm //= t(2) + assert_equal(x, y) + assert_equal(xm, y) + + assert_equal(len(w), 0, "Failed on type=%s." % t) + + def test_inplace_floor_division_array_type(self): + # Test of inplace division + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x //= a + xm //= a + assert_equal(x, y // a) + assert_equal(xm, y // a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + + assert_equal(len(w), 0, f'Failed on type={t}.') + + def test_inplace_division_scalar_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= t(2) + assert_equal(x, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= t(2) + assert_equal(xm, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_division_array_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= a + assert_equal(x, y / a) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= a + assert_equal(xm, y / a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_pow_type(self): + # Test keeping data w/ (inplace) power + for t in self.othertypes: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always") + # Test pow on scalar + x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) + xx = x ** t(2) + xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) + assert_equal(xx.data, xx_r.data) + assert_equal(xx.mask, xx_r.mask) + # Test ipow on scalar + x **= t(2) + assert_equal(x.data, xx_r.data) + assert_equal(x.mask, xx_r.mask) + + assert_equal(len(w), 0, f'Failed on type={t}.') + + +class TestMaskedArrayMethods: + # Test class for miscellaneous MaskedArrays methods. + def setup(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_generic_methods(self): + # Tests some MaskedArray methods. + a = array([1, 3, 2]) + assert_equal(a.any(), a._data.any()) + assert_equal(a.all(), a._data.all()) + assert_equal(a.argmax(), a._data.argmax()) + assert_equal(a.argmin(), a._data.argmin()) + assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) + assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) + assert_equal(a.conj(), a._data.conj()) + assert_equal(a.conjugate(), a._data.conjugate()) + + m = array([[1, 2], [3, 4]]) + assert_equal(m.diagonal(), m._data.diagonal()) + assert_equal(a.sum(), a._data.sum()) + assert_equal(a.take([1, 2]), a._data.take([1, 2])) + assert_equal(m.transpose(), m._data.transpose()) + + def test_allclose(self): + # Tests allclose on arrays + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + assert_(allclose(a, b)) + # Test allclose w/ infs + a[0] = np.inf + assert_(not allclose(a, b)) + b[0] = np.inf + assert_(allclose(a, b)) + # Test allclose w/ masked + a = masked_array(a) + a[-1] = masked + assert_(allclose(a, b, masked_equal=True)) + assert_(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + assert_(allclose(a, 0, masked_equal=True)) + + # Test that the function works for MIN_INT integer typed arrays + a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) + assert_(allclose(a, a)) + + def test_allclose_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, 4]], dtype="m8[ns]") + assert allclose(a, a, atol=0) + assert allclose(a, a, atol=np.timedelta64(1, "ns")) + + def test_allany(self): + # Checks the any/all methods/functions. + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool_) + mx = masked_array(x, mask=m) + mxbig = (mx > 0.5) + mxsmall = (mx < 0.5) + + assert_(not mxbig.all()) + assert_(mxbig.any()) + assert_equal(mxbig.all(0), [False, False, True]) + assert_equal(mxbig.all(1), [False, False, True]) + assert_equal(mxbig.any(0), [False, False, True]) + assert_equal(mxbig.any(1), [True, True, True]) + + assert_(not mxsmall.all()) + assert_(mxsmall.any()) + assert_equal(mxsmall.all(0), [True, True, False]) + assert_equal(mxsmall.all(1), [False, False, False]) + assert_equal(mxsmall.any(0), [True, True, False]) + assert_equal(mxsmall.any(1), [True, True, False]) + + def test_allany_oddities(self): + # Some fun with all and any + store = empty((), dtype=bool) + full = array([1, 2, 3], mask=True) + + assert_(full.all() is masked) + full.all(out=store) + assert_(store) + assert_(store._mask, True) + assert_(store is not masked) + + store = empty((), dtype=bool) + assert_(full.any() is masked) + full.any(out=store) + assert_(not store) + assert_(store._mask, True) + assert_(store is not masked) + + def test_argmax_argmin(self): + # Tests argmin & argmax on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + + assert_equal(mx.argmin(), 35) + assert_equal(mX.argmin(), 35) + assert_equal(m2x.argmin(), 4) + assert_equal(m2X.argmin(), 4) + assert_equal(mx.argmax(), 28) + assert_equal(mX.argmax(), 28) + assert_equal(m2x.argmax(), 31) + assert_equal(m2X.argmax(), 31) + + assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) + assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) + assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) + assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) + + assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) + assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) + assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) + assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) + + def test_clip(self): + # Tests clip on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) + mx = array(x, mask=m) + clipped = mx.clip(2, 8) + assert_equal(clipped.mask, mx.mask) + assert_equal(clipped._data, x.clip(2, 8)) + assert_equal(clipped._data, mx._data.clip(2, 8)) + + def test_clip_out(self): + # gh-14140 + a = np.arange(10) + m = np.ma.MaskedArray(a, mask=[0, 1] * 5) + m.clip(0, 5, out=m) + assert_equal(m.mask, [0, 1] * 5) + + def test_compress(self): + # test compress + a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) + condition = (a > 1.5) & (a < 3.5) + assert_equal(a.compress(condition), [2., 3.]) + + a[[2, 3]] = masked + b = a.compress(condition) + assert_equal(b._data, [2., 3.]) + assert_equal(b._mask, [0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + condition = (a < 4.) + b = a.compress(condition) + assert_equal(b._data, [1., 2., 3.]) + assert_equal(b._mask, [0, 0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + a = masked_array([[10, 20, 30], [40, 50, 60]], + mask=[[0, 0, 1], [1, 0, 0]]) + b = a.compress(a.ravel() >= 22) + assert_equal(b._data, [30, 40, 50, 60]) + assert_equal(b._mask, [1, 1, 0, 0]) + + x = np.array([3, 1, 2]) + b = a.compress(x >= 2, axis=1) + assert_equal(b._data, [[10, 30], [40, 60]]) + assert_equal(b._mask, [[0, 1], [1, 0]]) + + def test_compressed(self): + # Tests compressed + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + a[0] = masked + b = a.compressed() + assert_equal(b, [2, 3, 4]) + + def test_empty(self): + # Tests empty/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = empty_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = empty(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check empty_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = empty_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view(masked_array) + assert_(np.may_share_memory(a.mask, b.mask)) + + @suppress_copy_mask_on_assignment + def test_put(self): + # Tests put. + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_equal(x, [0, 10, 2, -1, 40]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + i = [0, 2, 4, 6] + x.put(i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + put(x, i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + def test_put_nomask(self): + # GitHub issue 6425 + x = zeros(10) + z = array([3., -1.], mask=[False, True]) + + x.put([1, 2], z) + assert_(x[0] is not masked) + assert_equal(x[0], 0) + assert_(x[1] is not masked) + assert_equal(x[1], 3) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_equal(x[3], 0) + + def test_put_hardmask(self): + # Tests put on hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d + 1, mask=m, hard_mask=True, copy=True) + xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) + assert_equal(xh._data, [3, 4, 2, 4, 5]) + + def test_putmask(self): + x = arange(6) + 1 + mx = array(x, mask=[0, 0, 0, 1, 1, 1]) + mask = [0, 0, 1, 0, 0, 1] + # w/o mask, w/o masked values + xx = x.copy() + putmask(xx, mask, 99) + assert_equal(xx, [1, 2, 99, 4, 5, 99]) + # w/ mask, w/o masked values + mxx = mx.copy() + putmask(mxx, mask, 99) + assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) + assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) + # w/o mask, w/ masked values + values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) + xx = x.copy() + putmask(xx, mask, values) + assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) + # w/ mask, w/ masked values + mxx = mx.copy() + putmask(mxx, mask, values) + assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) + # w/ mask, w/ masked values + hardmask + mxx = mx.copy() + mxx.harden_mask() + putmask(mxx, mask, values) + assert_equal(mxx, [1, 2, 30, 4, 5, 60]) + + def test_ravel(self): + # Tests ravel + a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, aravel.shape) + a = array([0, 0], mask=[1, 1]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, a.shape) + # Checks that small_mask is preserved + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) + assert_equal(a.ravel()._mask, [0, 0, 0, 0]) + # Test that the fill_value is preserved + a.fill_value = -99 + a.shape = (2, 2) + ar = a.ravel() + assert_equal(ar._mask, [0, 0, 0, 0]) + assert_equal(ar._data, [1, 2, 3, 4]) + assert_equal(ar.fill_value, -99) + # Test index ordering + assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) + assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) + + def test_reshape(self): + # Tests reshape + x = arange(4) + x[0] = masked + y = x.reshape(2, 2) + assert_equal(y.shape, (2, 2,)) + assert_equal(y._mask.shape, (2, 2,)) + assert_equal(x.shape, (4,)) + assert_equal(x._mask.shape, (4,)) + + def test_sort(self): + # Test sort + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + sortedx = sort(x) + assert_equal(sortedx._data, [1, 2, 3, 4]) + assert_equal(sortedx._mask, [0, 0, 0, 1]) + + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [4, 1, 2, 3]) + assert_equal(sortedx._mask, [1, 0, 0, 0]) + + x.sort() + assert_equal(x._data, [1, 2, 3, 4]) + assert_equal(x._mask, [0, 0, 0, 1]) + + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + x.sort(endwith=False) + assert_equal(x._data, [4, 1, 2, 3]) + assert_equal(x._mask, [1, 0, 0, 0]) + + x = [1, 4, 2, 3] + sortedx = sort(x) + assert_(not isinstance(sorted, MaskedArray)) + + x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) + x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [1, 2, -2, -1, 0]) + assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) + + def test_stable_sort(self): + x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8) + expected = array([0, 3, 1, 4, 2, 5]) + computed = argsort(x, kind='stable') + assert_equal(computed, expected) + + def test_argsort_matches_sort(self): + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + for kwargs in [dict(), + dict(endwith=True), + dict(endwith=False), + dict(fill_value=2), + dict(fill_value=2, endwith=True), + dict(fill_value=2, endwith=False)]: + sortedx = sort(x, **kwargs) + argsortedx = x[argsort(x, **kwargs)] + assert_equal(sortedx._data, argsortedx._data) + assert_equal(sortedx._mask, argsortedx._mask) + + def test_sort_2d(self): + # Check sort of 2D array. + # 2D array w/o mask + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + # 2D array w/mask + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) + # 3D + a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], + [[1, 2, 3], [7, 8, 9], [4, 5, 6]], + [[7, 8, 9], [1, 2, 3], [4, 5, 6]], + [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) + a[a % 4 == 0] = masked + am = a.copy() + an = a.filled(99) + am.sort(0) + an.sort(0) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(1) + an.sort(1) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(2) + an.sort(2) + assert_equal(am, an) + + def test_sort_flexible(self): + # Test sort on structured dtype. + a = array( + data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + mask_last = array( + data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + mask_first = array( + data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], + dtype=[('A', int), ('B', int)]) + + test = sort(a) + assert_equal(test, mask_last) + assert_equal(test.mask, mask_last.mask) + + test = sort(a, endwith=False) + assert_equal(test, mask_first) + assert_equal(test.mask, mask_first.mask) + + # Test sort on dtype with subarray (gh-8069) + # Just check that the sort does not error, structured array subarrays + # are treated as byte strings and that leads to differing behavior + # depending on endianess and `endwith`. + dt = np.dtype([('v', int, 2)]) + a = a.view(dt) + test = sort(a) + test = sort(a, endwith=False) + + def test_argsort(self): + # Test argsort + a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) + assert_equal(np.argsort(a), argsort(a)) + + def test_squeeze(self): + # Check squeeze + data = masked_array([[1, 2, 3]]) + assert_equal(data.squeeze(), [1, 2, 3]) + data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) + assert_equal(data.squeeze(), [1, 2, 3]) + assert_equal(data.squeeze()._mask, [1, 1, 1]) + + # normal ndarrays return a view + arr = np.array([[1]]) + arr_sq = arr.squeeze() + assert_equal(arr_sq, 1) + arr_sq[...] = 2 + assert_equal(arr[0,0], 2) + + # so maskedarrays should too + m_arr = masked_array([[1]], mask=True) + m_arr_sq = m_arr.squeeze() + assert_(m_arr_sq is not np.ma.masked) + assert_equal(m_arr_sq.mask, True) + m_arr_sq[...] = 2 + assert_equal(m_arr[0,0], 2) + + def test_swapaxes(self): + # Tests swapaxes on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mX = array(x, mask=m).reshape(6, 6) + mXX = mX.reshape(3, 2, 2, 3) + + mXswapped = mX.swapaxes(0, 1) + assert_equal(mXswapped[-1], mX[:, -1]) + + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_take(self): + # Tests take + x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) + assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) + assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) + assert_equal(x.take([[0, 1], [0, 1]]), + masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) + + # assert_equal crashes when passed np.ma.mask + assert_(x[1] is np.ma.masked) + assert_(x.take(1) is np.ma.masked) + + x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) + assert_equal(x.take([0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + assert_equal(take(x, [0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + + def test_take_masked_indices(self): + # Test take w/ masked indices + a = np.array((40, 18, 37, 9, 22)) + indices = np.arange(3)[None,:] + np.arange(5)[:, None] + mindices = array(indices, mask=(indices >= len(a))) + # No mask + test = take(a, mindices, mode='clip') + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 22], + [22, 22, 22]]) + assert_equal(test, ctrl) + # Masked indices + test = take(a, mindices) + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 40], + [22, 40, 40]]) + ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # Masked input + masked indices + a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) + test = take(a, mindices) + ctrl[0, 1] = ctrl[1, 0] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_tolist(self): + # Tests to list + # ... on 1D + x = array(np.arange(12)) + x[[1, -2]] = masked + xlist = x.tolist() + assert_(xlist[1] is None) + assert_(xlist[-2] is None) + # ... on 2D + x.shape = (3, 4) + xlist = x.tolist() + ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] + assert_equal(xlist[0], [0, None, 2, 3]) + assert_equal(xlist[1], [4, 5, 6, 7]) + assert_equal(xlist[2], [8, 9, None, 11]) + assert_equal(xlist, ctrl) + # ... on structured array w/ masked records + x = array(list(zip([1, 2, 3], + [1.1, 2.2, 3.3], + ['one', 'two', 'thr'])), + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x[-1] = masked + assert_equal(x.tolist(), + [(1, 1.1, b'one'), + (2, 2.2, b'two'), + (None, None, None)]) + # ... on structured array w/ masked fields + a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], + dtype=[('a', int), ('b', int)]) + test = a.tolist() + assert_equal(test, [[1, None], [3, 4]]) + # ... on mvoid + a = a[0] + test = a.tolist() + assert_equal(test, [1, None]) + + def test_tolist_specialcase(self): + # Test mvoid.tolist: make sure we return a standard Python object + a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) + # w/o mask: each entry is a np.void whose elements are standard Python + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + # w/ mask: each entry is a ma.void whose elements should be + # standard Python + a.mask[0] = (0, 1) + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + + def test_toflex(self): + # Test the conversion to records + data = arange(10) + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = [('i', int), ('s', '|S3'), ('f', float)] + data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = np.dtype("int, (2,3)float, float") + data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + # Test the reconstruction of a masked_array from a record + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + + def test_arraymethod(self): + # Test a _arraymethod w/ n argument + marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) + control = masked_array([[1], [2], [3], [4], [5]], + mask=[0, 0, 1, 0, 0]) + assert_equal(marray.T, control) + assert_equal(marray.transpose(), control) + + assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) + + def test_arraymethod_0d(self): + # gh-9430 + x = np.ma.array(42, mask=True) + assert_equal(x.T.mask, x.mask) + assert_equal(x.T.data, x.data) + + def test_transpose_view(self): + x = np.ma.array([[1, 2, 3], [4, 5, 6]]) + x[0,1] = np.ma.masked + xt = x.T + + xt[1,0] = 10 + xt[0,1] = np.ma.masked + + assert_equal(x.data, xt.T.data) + assert_equal(x.mask, xt.T.mask) + + def test_diagonal_view(self): + x = np.ma.zeros((3,3)) + x[0,0] = 10 + x[1,1] = np.ma.masked + x[2,2] = 20 + xd = x.diagonal() + x[1,1] = 15 + assert_equal(xd.mask, x.diagonal().mask) + assert_equal(xd.data, x.diagonal().data) + + +class TestMaskedArrayMathMethods: + + def setup(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_cumsumprod(self): + # Tests cumsum & cumprod on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXcp = mX.cumsum(0) + assert_equal(mXcp._data, mX.filled(0).cumsum(0)) + mXcp = mX.cumsum(1) + assert_equal(mXcp._data, mX.filled(0).cumsum(1)) + + mXcp = mX.cumprod(0) + assert_equal(mXcp._data, mX.filled(1).cumprod(0)) + mXcp = mX.cumprod(1) + assert_equal(mXcp._data, mX.filled(1).cumprod(1)) + + def test_cumsumprod_with_output(self): + # Tests cumsum/cumprod w/ output + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + for funcname in ('cumsum', 'cumprod'): + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty((3, 4), dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + + def test_ptp(self): + # Tests ptp on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), mx.compressed().ptp()) + rows = np.zeros(n, float) + cols = np.zeros(m, float) + for k in range(m): + cols[k] = mX[:, k].compressed().ptp() + for k in range(n): + rows[k] = mX[k].compressed().ptp() + assert_equal(mX.ptp(0), cols) + assert_equal(mX.ptp(1), rows) + + def test_add_object(self): + x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) + y = x + 'x' + assert_equal(y[1], 'bx') + assert_(y.mask[0]) + + def test_sum_object(self): + # Test sum on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.sum(), 5) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.sum(axis=0), [5, 7, 9]) + + def test_prod_object(self): + # Test prod on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.prod(), 2 * 3) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.prod(axis=0), [4, 10, 18]) + + def test_meananom_object(self): + # Test mean/anom on object dtype + a = masked_array([1, 2, 3], dtype=object) + assert_equal(a.mean(), 2) + assert_equal(a.anom(), [-1, 0, 1]) + + def test_trace(self): + # Tests trace on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_almost_equal(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0)) + assert_equal(np.trace(mX), mX.trace()) + + # gh-5560 + arr = np.arange(2*4*4).reshape(2,4,4) + m_arr = np.ma.masked_array(arr, False) + assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) + + def test_dot(self): + # Tests dot on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + fx = mx.filled(0) + r = mx.dot(mx) + assert_almost_equal(r.filled(0), fx.dot(fx)) + assert_(r.mask is nomask) + + fX = mX.filled(0) + r = mX.dot(mX) + assert_almost_equal(r.filled(0), fX.dot(fX)) + assert_(r.mask[1,3]) + r1 = empty_like(r) + mX.dot(mX, out=r1) + assert_almost_equal(r, r1) + + mYY = mXX.swapaxes(-1, -2) + fXX, fYY = mXX.filled(0), mYY.filled(0) + r = mXX.dot(mYY) + assert_almost_equal(r.filled(0), fXX.dot(fYY)) + r1 = empty_like(r) + mXX.dot(mYY, out=r1) + assert_almost_equal(r, r1) + + def test_dot_shape_mismatch(self): + # regression test + x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) + y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) + z = masked_array([[0,1],[3,3]]) + x.dot(y, out=z) + assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) + assert_almost_equal(z.mask, [[0, 1], [0, 0]]) + + def test_varmean_nomask(self): + # gh-5769 + foo = array([1,2,3,4], dtype='f8') + bar = array([1,2,3,4], dtype='f8') + assert_equal(type(foo.mean()), np.float64) + assert_equal(type(foo.var()), np.float64) + assert((foo.mean() == bar.mean()) is np.bool_(True)) + + # check array type is preserved and out works + foo = array(np.arange(16).reshape((4,4)), dtype='f8') + bar = empty(4, dtype='f4') + assert_equal(type(foo.mean(axis=1)), MaskedArray) + assert_equal(type(foo.var(axis=1)), MaskedArray) + assert_(foo.mean(axis=1, out=bar) is bar) + assert_(foo.var(axis=1, out=bar) is bar) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_almost_equal(mX.std(axis=None, ddof=1), + mX.compressed().std(ddof=1)) + assert_almost_equal(mX.var(axis=None, ddof=1), + mX.compressed().var(ddof=1)) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + @suppress_copy_mask_on_assignment + def test_varstd_specialcases(self): + # Test a special case for var + nout = np.array(-1, dtype=float) + mout = array(-1, dtype=float) + + x = array(arange(10), mask=True) + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method() is masked) + assert_(method(0) is masked) + assert_(method(-1) is masked) + # Using a masked array as explicit output + method(out=mout) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout) + assert_(np.isnan(nout)) + + x = array(arange(10), mask=True) + x[-1] = 9 + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method(ddof=1) is masked) + assert_(method(0, ddof=1) is masked) + assert_(method(-1, ddof=1) is masked) + # Using a masked array as explicit output + method(out=mout, ddof=1) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout, ddof=1) + assert_(np.isnan(nout)) + + def test_varstd_ddof(self): + a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) + test = a.std(axis=0, ddof=0) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=1) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=2) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [1, 1, 1]) + + def test_diag(self): + # Test diag + x = arange(9).reshape((3, 3)) + x[1, 1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + def test_axis_methods_nomask(self): + # Test the combination nomask & methods w/ axis + a = array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(a.sum(0), [5, 7, 9]) + assert_equal(a.sum(-1), [6, 15]) + assert_equal(a.sum(1), [6, 15]) + + assert_equal(a.prod(0), [4, 10, 18]) + assert_equal(a.prod(-1), [6, 120]) + assert_equal(a.prod(1), [6, 120]) + + assert_equal(a.min(0), [1, 2, 3]) + assert_equal(a.min(-1), [1, 4]) + assert_equal(a.min(1), [1, 4]) + + assert_equal(a.max(0), [4, 5, 6]) + assert_equal(a.max(-1), [3, 6]) + assert_equal(a.max(1), [3, 6]) + + +class TestMaskedArrayMathMethodsComplex: + # Test class for miscellaneous MaskedArrays methods. + def setup(self): + # Base data definition. + x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, + 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + +class TestMaskedArrayFunctions: + # Test class for miscellaneous functions. + + def setup(self): + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + self.info = (xm, ym) + + def test_masked_where_bool(self): + x = [1, 2] + y = masked_where(False, x) + assert_equal(y, [1, 2]) + assert_equal(y[1], 2) + + def test_masked_equal_wlist(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [0, 0, 1]) + mx = masked_not_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [1, 1, 0]) + + def test_masked_equal_fill_value(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx._mask, [0, 0, 1]) + assert_equal(mx.fill_value, 3) + + def test_masked_where_condition(self): + # Tests masking functions. + x = array([1., 2., 3., 4., 5.]) + x[2] = masked + assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) + assert_equal(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2)) + assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) + assert_equal(masked_where(less_equal(x, 2), x), + masked_less_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5]) + + def test_masked_where_oddities(self): + # Tests some generic features. + atest = ones((10, 10, 10), dtype=float) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_equal(atest, ctest) + + def test_masked_where_shape_constraint(self): + a = arange(10) + with assert_raises(IndexError): + masked_equal(1, a) + test = masked_equal(a, 1) + assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + + def test_masked_where_structured(self): + # test that masked_where on a structured array sets a structured + # mask (see issue #2972) + a = np.zeros(10, dtype=[("A", " 6, x) + + def test_masked_otherfunctions(self): + assert_equal(masked_inside(list(range(5)), 1, 3), + [0, 199, 199, 199, 4]) + assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) + assert_equal(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0]) + assert_equal(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1]) + assert_equal(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0]) + assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1]) + + def test_round(self): + a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], + mask=[0, 1, 0, 0, 0]) + assert_equal(a.round(), [1., 2., 3., 5., 6.]) + assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) + assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) + b = empty_like(a) + a.round(out=b) + assert_equal(b, [1., 2., 3., 5., 6.]) + + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_round_with_output(self): + # Testing round with an explicit output + + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = np.round(xm, decimals=2, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xm.round(decimals=2, out=output)) + + output = empty((3, 4), dtype=float) + result = xm.round(decimals=2, out=output) + assert_(result is output) + + def test_round_with_scalar(self): + # Testing round with scalar/zero dimension input + # GH issue 2244 + a = array(1.1, mask=[False]) + assert_equal(a.round(), 1) + + a = array(1.1, mask=[True]) + assert_(a.round() is masked) + + a = array(1.1, mask=[False]) + output = np.empty(1, dtype=float) + output.fill(-9999) + a.round(out=output) + assert_equal(output, 1) + + a = array(1.1, mask=[False]) + output = array(-9999., mask=[True]) + a.round(out=output) + assert_equal(output[()], 1) + + a = array(1.1, mask=[True]) + output = array(-9999., mask=[False]) + a.round(out=output) + assert_(output[()] is masked) + + def test_identity(self): + a = identity(5) + assert_(isinstance(a, MaskedArray)) + assert_equal(a, np.identity(5)) + + def test_power(self): + x = -1.1 + assert_almost_equal(power(x, 2.), 1.21) + assert_(power(x, masked) is masked) + x = array([-1.1, -1.1, 1.1, 1.1, 0.]) + b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) + y = power(x, b) + assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + b.mask = nomask + y = power(x, b) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + z = x ** b + assert_equal(z._mask, y._mask) + assert_almost_equal(z, y) + assert_almost_equal(z._data, y._data) + x **= b + assert_equal(x._mask, y._mask) + assert_almost_equal(x, y) + assert_almost_equal(x._data, y._data) + + def test_power_with_broadcasting(self): + # Test power w/ broadcasting + a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) + a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) + b1 = np.array([2, 4, 3]) + b2 = np.array([b1, b1]) + b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) + + ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], + mask=[[1, 1, 0], [0, 1, 1]]) + # No broadcasting, base & exp w/ mask + test = a2m ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # No broadcasting, base w/ mask, exp w/o mask + test = a2m ** b2 + assert_equal(test, ctrl) + assert_equal(test.mask, a2m.mask) + # No broadcasting, base w/o mask, exp w/ mask + test = a2 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, b2m.mask) + + ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], + mask=[[0, 1, 0], [0, 1, 0]]) + test = b1 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + test = b2m ** b1 + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_where(self): + # Test the where function + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + + d = where(xm > 2, xm, -9) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + assert_equal(d._mask, xm._mask) + d = where(xm > 2, -9, ym) + assert_equal(d, [5., 0., 3., 2., -1., -9., + -9., -10., -9., 1., 0., -9.]) + assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) + d = where(xm > 2, xm, masked) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + tmp = xm._mask.copy() + tmp[(xm <= 2).filled(True)] = True + assert_equal(d._mask, tmp) + + ixm = xm.astype(int) + d = where(ixm > 2, ixm, masked) + assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) + assert_equal(d.dtype, ixm.dtype) + + def test_where_object(self): + a = np.array(None) + b = masked_array(None) + r = b.copy() + assert_equal(np.ma.where(True, a, a), r) + assert_equal(np.ma.where(True, b, b), r) + + def test_where_with_masked_choice(self): + x = arange(10) + x[3] = masked + c = x >= 8 + # Set False to masked + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_equal(x, z) + # Set True to masked + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + + def test_where_with_masked_condition(self): + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + x = arange(1, 6) + x[-1] = masked + y = arange(1, 6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_equal(z, zm) + assert_(getmask(zm) is nomask) + assert_equal(zm, [1, 2, 3, 40, 50]) + z = where(c, masked, 1) + assert_equal(z, [99, 99, 99, 1, 1]) + z = where(c, 1, masked) + assert_equal(z, [99, 1, 1, 99, 99]) + + def test_where_type(self): + # Test the type conservation with where + x = np.arange(4, dtype=np.int32) + y = np.arange(4, dtype=np.float32) * 2.2 + test = where(x > 1.5, y, x).dtype + control = np.find_common_type([np.int32, np.float32], []) + assert_equal(test, control) + + def test_where_broadcast(self): + # Issue 8599 + x = np.arange(9).reshape(3, 3) + y = np.zeros(3) + core = np.where([1, 0, 1], x, y) + ma = where([1, 0, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured(self): + # Issue 8600 + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + y = np.array((10, 20), dtype=dt) + core = np.where([0, 1, 1], x, y) + ma = np.where([0, 1, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured_masked(self): + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + + ma = where([0, 1, 1], x, masked) + expected = masked_where([1, 0, 0], x) + + assert_equal(ma.dtype, expected.dtype) + assert_equal(ma, expected) + assert_equal(ma.mask, expected.mask) + + def test_choose(self): + # Test choose + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + chosen = choose([2, 3, 1, 0], choices) + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='clip') + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='wrap') + assert_equal(chosen, array([20, 1, 12, 3])) + # Check with some masked indices + indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([99, 1, 12, 99])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + # Check with some masked choices + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([20, 31, 12, 3])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + + def test_choose_with_out(self): + # Test choose with an explicit out keyword + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + store = empty(4, dtype=int) + chosen = choose([2, 3, 1, 0], choices, out=store) + assert_equal(store, array([20, 31, 12, 3])) + assert_(store is chosen) + # Check with some masked indices + out + store = empty(4, dtype=int) + indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([99, 31, 12, 99])) + assert_equal(store.mask, [1, 0, 0, 1]) + # Check with some masked choices + out ina ndarray ! + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + store = empty(4, dtype=int).view(ndarray) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([999999, 31, 12, 999999])) + + def test_reshape(self): + a = arange(10) + a[0] = masked + # Try the default + b = a.reshape((5, 2)) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ arguments as list instead of tuple + b = a.reshape(5, 2) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ order + b = a.reshape((5, 2), order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + # Try w/ order + b = a.reshape(5, 2, order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + + c = np.reshape(a, (2, 5)) + assert_(isinstance(c, MaskedArray)) + assert_equal(c.shape, (2, 5)) + assert_(c[0, 0] is masked) + assert_(c.flags['C']) + + def test_make_mask_descr(self): + # Flexible + ntype = [('a', float), ('b', float)] + test = make_mask_descr(ntype) + assert_equal(test, [('a', bool), ('b', bool)]) + assert_(test is make_mask_descr(test)) + + # Standard w/ shape + ntype = (float, 2) + test = make_mask_descr(ntype) + assert_equal(test, (bool, 2)) + assert_(test is make_mask_descr(test)) + + # Standard standard + ntype = float + test = make_mask_descr(ntype) + assert_equal(test, np.dtype(bool)) + assert_(test is make_mask_descr(test)) + + # Nested + ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] + test = make_mask_descr(ntype) + control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) + assert_equal(test, control) + assert_(test is make_mask_descr(test)) + + # Named+ shape + ntype = [('a', (float, 2))] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([('a', (bool, 2))])) + assert_(test is make_mask_descr(test)) + + # 2 names + ntype = [(('A', 'a'), float)] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([(('A', 'a'), bool)])) + assert_(test is make_mask_descr(test)) + + # nested boolean types should preserve identity + base_type = np.dtype([('a', int, 3)]) + base_mtype = make_mask_descr(base_type) + sub_type = np.dtype([('a', int), ('b', base_mtype)]) + test = make_mask_descr(sub_type) + assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])])) + assert_(test.fields['b'][0] is base_mtype) + + def test_make_mask(self): + # Test make_mask + # w/ a list as an input + mask = [0, 1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a ndarray as an input + mask = np.array([0, 1], dtype=bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1, 1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', float), ('b', float)] + bdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + # Ensure this also works for void + mask = np.array((False, True), dtype='?,?')[()] + assert_(isinstance(mask, np.void)) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test, mask) + assert_(test is not mask) + mask = np.array((0, 1), dtype='i4,i4')[()] + test2 = make_mask(mask, dtype=mask.dtype) + assert_equal(test2, test) + # test that nomask is returned when m is nomask. + bools = [True, False] + dtypes = [MaskType, float] + msgformat = 'copy=%s, shrink=%s, dtype=%s' + for cpy, shr, dt in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) + assert_(res is nomask, msgformat % (cpy, shr, dt)) + + def test_mask_or(self): + # Initialize + mtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using another array w / the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w / a different dtype + othertype = [('A', bool), ('B', bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + # Using nested arrays + dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) + + def test_flatten_mask(self): + # Tests flatten mask + # Standard dtype + mask = np.array([0, 0, 1], dtype=bool) + assert_equal(flatten_mask(mask), mask) + # Flexible dtype + mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + data = [(0, (0, 0)), (0, (0, 1))] + mask = np.array(data, dtype=mdtype) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + def test_on_ndarray(self): + # Test functions on ndarrays + a = np.array([1, 2, 3, 4]) + m = array(a, mask=False) + test = anom(a) + assert_equal(test, m.anom()) + test = reshape(a, (2, 2)) + assert_equal(test, m.reshape(2, 2)) + + def test_compress(self): + # Test compress function on ndarray and masked array + # Address Github #2495. + arr = np.arange(8) + arr.shape = 4, 2 + cond = np.array([True, False, True, True]) + control = arr[[0, 2, 3]] + test = np.ma.compress(cond, arr, axis=0) + assert_equal(test, control) + marr = np.ma.array(arr) + test = np.ma.compress(cond, marr, axis=0) + assert_equal(test, control) + + def test_compressed(self): + # Test ma.compressed function. + # Address gh-4026 + a = np.ma.array([1, 2]) + test = np.ma.compressed(a) + assert_(type(test) is np.ndarray) + + # Test case when input data is ndarray subclass + class A(np.ndarray): + pass + + a = np.ma.array(A(shape=0)) + test = np.ma.compressed(a) + assert_(type(test) is A) + + # Test that compress flattens + test = np.ma.compressed([[1],[2]]) + assert_equal(test.ndim, 1) + test = np.ma.compressed([[[[[1]]]]]) + assert_equal(test.ndim, 1) + + # Test case when input is MaskedArray subclass + class M(MaskedArray): + pass + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test.ndim, 1) + + # with .compressed() overridden + class M(MaskedArray): + def compressed(self): + return 42 + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test, 42) + + def test_convolve(self): + a = masked_equal(np.arange(5), 2) + b = np.array([1, 1]) + test = np.ma.convolve(a, b) + assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1)) + + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1)) + + test = np.ma.convolve([1, 1], [1, 1, 1]) + assert_equal(test, masked_equal([1, 2, 2, 1], -1)) + + a = [1, 1] + b = masked_equal([1, -1, -1, 1], -1) + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1)) + test = np.ma.convolve(a, b, propagate_mask=True) + assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) + + +class TestMaskedFields: + + def setup(self): + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = ['one', 'two', 'three', 'four', 'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mdtype = [('a', bool), ('b', bool), ('c', bool)] + mask = [0, 1, 0, 0, 1] + base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) + + def test_set_records_masks(self): + base = self.data['base'] + mdtype = self.data['mdtype'] + # Set w/ nomask or masked + base.mask = nomask + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = masked + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ simple boolean + base.mask = False + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = True + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ list + base.mask = [0, 0, 0, 1, 1] + assert_equal_records(base._mask, + np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], + dtype=mdtype)) + + def test_set_record_element(self): + # Check setting an element of a record) + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[0] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 2, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'two', b'three', b'four', b'five']) + + def test_set_record_slice(self): + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[:3] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 3, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'pi', b'pi', b'four', b'five']) + + def test_mask_element(self): + "Check record access" + base = self.data['base'] + base[0] = masked + + for n in ('a', 'b', 'c'): + assert_equal(base[n].mask, [1, 1, 0, 0, 1]) + assert_equal(base[n]._data, base._data[n]) + + def test_getmaskarray(self): + # Test getmaskarray on flexible dtype + ndtype = [('a', int), ('b', float)] + test = empty(3, dtype=ndtype) + assert_equal(getmaskarray(test), + np.array([(0, 0), (0, 0), (0, 0)], + dtype=[('a', '|b1'), ('b', '|b1')])) + test[:] = masked + assert_equal(getmaskarray(test), + np.array([(1, 1), (1, 1), (1, 1)], + dtype=[('a', '|b1'), ('b', '|b1')])) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + # Transform globally to simple dtype + test = a.view(float) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + # Transform globally to dty + test = a.view((float, 2)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + + def test_getitem(self): + ndtype = [('a', float), ('b', float)] + a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) + a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), + dtype=[('a', bool), ('b', bool)]) + + def _test_index(i): + assert_equal(type(a[i]), mvoid) + assert_equal_records(a[i]._data, a._data[i]) + assert_equal_records(a[i]._mask, a._mask[i]) + + assert_equal(type(a[i, ...]), MaskedArray) + assert_equal_records(a[i,...]._data, a._data[i,...]) + assert_equal_records(a[i,...]._mask, a._mask[i,...]) + + _test_index(1) # No mask + _test_index(0) # One element masked + _test_index(-2) # All element masked + + def test_setitem(self): + # Issue 4866: check that one can set individual items in [record][col] + # and [col][record] order + ndtype = np.dtype([('a', float), ('b', int)]) + ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) + ma['a'][1] = 3.0 + assert_equal(ma['a'], np.array([1.0, 3.0])) + ma[1]['a'] = 4.0 + assert_equal(ma['a'], np.array([1.0, 4.0])) + # Issue 2403 + mdtype = np.dtype([('a', bool), ('b', bool)]) + # soft mask + control = np.array([(False, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a[0]['a'] = 2 + assert_equal(a.mask, control) + # hard mask + control = np.array([(True, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a[0]['a'] = 2 + assert_equal(a.mask, control) + + def test_setitem_scalar(self): + # 8510 + mask_0d = np.ma.masked_array(1, mask=True) + arr = np.ma.arange(3) + arr[0] = mask_0d + assert_array_equal(arr.mask, [True, False, False]) + + def test_element_len(self): + # check that len() works for mvoid (Github issue #576) + for rec in self.data['base']: + assert_equal(len(rec), len(self.data['ddtype'])) + + +class TestMaskedObjectArray: + + def test_getitem(self): + arr = np.ma.array([None, None]) + for dt in [float, object]: + a0 = np.eye(2).astype(dt) + a1 = np.eye(3).astype(dt) + arr[0] = a0 + arr[1] = a1 + + assert_(arr[0] is a0) + assert_(arr[1] is a1) + assert_(isinstance(arr[0,...], MaskedArray)) + assert_(isinstance(arr[1,...], MaskedArray)) + assert_(arr[0,...][()] is a0) + assert_(arr[1,...][()] is a1) + + arr[0] = np.ma.masked + + assert_(arr[1] is a1) + assert_(isinstance(arr[0,...], MaskedArray)) + assert_(isinstance(arr[1,...], MaskedArray)) + assert_equal(arr[0,...].mask, True) + assert_(arr[1,...][()] is a1) + + # gh-5962 - object arrays of arrays do something special + assert_equal(arr[0].data, a0) + assert_equal(arr[0].mask, True) + assert_equal(arr[0,...][()].data, a0) + assert_equal(arr[0,...][()].mask, True) + + def test_nested_ma(self): + + arr = np.ma.array([None, None]) + # set the first object to be an unmasked masked constant. A little fiddly + arr[0,...] = np.array([np.ma.masked], object)[0,...] + + # check the above line did what we were aiming for + assert_(arr.data[0] is np.ma.masked) + + # test that getitem returned the value by identity + assert_(arr[0] is np.ma.masked) + + # now mask the masked value! + arr[0] = np.ma.masked + assert_(arr[0] is np.ma.masked) + + +class TestMaskedView: + + def setup(self): + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + self.data = (data, a, controlmask) + + def test_view_to_nothing(self): + (data, a, controlmask) = self.data + test = a.view() + assert_(isinstance(test, MaskedArray)) + assert_equal(test._data, a._data) + assert_equal(test._mask, a._mask) + + def test_view_to_type(self): + (data, a, controlmask) = self.data + test = a.view(np.ndarray) + assert_(not isinstance(test, MaskedArray)) + assert_equal(test, a._data) + assert_equal_records(test, data.view(a.dtype).squeeze()) + + def test_view_to_simple_dtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view(float) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + + def test_view_to_flexible_dtype(self): + (data, a, controlmask) = self.data + + test = a.view([('A', float), ('B', float)]) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a']) + assert_equal(test['B'], a['b']) + + test = a[0].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][0]) + assert_equal(test['B'], a['b'][0]) + + test = a[-1].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][-1]) + assert_equal(test['B'], a['b'][-1]) + + def test_view_to_subdtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + # View on 1 masked element + test = a[0].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[0]) + assert_equal(test.mask, (1, 0)) + # View on 1 unmasked element + test = a[-1].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[-1]) + + def test_view_to_dtype_and_type(self): + (data, a, controlmask) = self.data + + test = a.view((float, 2), np.recarray) + assert_equal(test, data) + assert_(isinstance(test, np.recarray)) + assert_(not isinstance(test, MaskedArray)) + + +class TestOptionalArgs: + def test_ndarrayfuncs(self): + # test axis arg behaves the same as ndarray (including multiple axes) + + d = np.arange(24.0).reshape((2,3,4)) + m = np.zeros(24, dtype=bool).reshape((2,3,4)) + # mask out last element of last dimension + m[:,:,-1] = True + a = np.ma.array(d, mask=m) + + def testaxis(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test axis arg + assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1)) + assert_equal(ma_f(a, axis=(0,1))[...,:-1], + numpy_f(d[...,:-1], axis=(0,1))) + + def testkeepdims(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test keepdims arg + assert_equal(ma_f(a, keepdims=True).shape, + numpy_f(d, keepdims=True).shape) + assert_equal(ma_f(a, keepdims=False).shape, + numpy_f(d, keepdims=False).shape) + + # test both at once + assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1], + numpy_f(d[...,:-1], axis=1, keepdims=True)) + assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1], + numpy_f(d[...,:-1], axis=(0,1), keepdims=True)) + + for f in ['sum', 'prod', 'mean', 'var', 'std']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + for f in ['min', 'max']: + testaxis(f, a, d) + + d = (np.arange(24).reshape((2,3,4))%2 == 0) + a = np.ma.array(d, mask=m) + for f in ['all', 'any']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + def test_count(self): + # test np.ma.count specially + + d = np.arange(24.0).reshape((2,3,4)) + m = np.zeros(24, dtype=bool).reshape((2,3,4)) + m[:,0,:] = True + a = np.ma.array(d, mask=m) + + assert_equal(count(a), 16) + assert_equal(count(a, axis=1), 2*ones((2,4))) + assert_equal(count(a, axis=(0,1)), 4*ones((4,))) + assert_equal(count(a, keepdims=True), 16*ones((1,1,1))) + assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4))) + assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4))) + assert_equal(count(a, axis=-2), 2*ones((2,4))) + assert_raises(ValueError, count, a, axis=(1,1)) + assert_raises(np.AxisError, count, a, axis=3) + + # check the 'nomask' path + a = np.ma.array(d, mask=nomask) + + assert_equal(count(a), 24) + assert_equal(count(a, axis=1), 3*ones((2,4))) + assert_equal(count(a, axis=(0,1)), 6*ones((4,))) + assert_equal(count(a, keepdims=True), 24*ones((1,1,1))) + assert_equal(np.ndim(count(a, keepdims=True)), 3) + assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4))) + assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4))) + assert_equal(count(a, axis=-2), 3*ones((2,4))) + assert_raises(ValueError, count, a, axis=(1,1)) + assert_raises(np.AxisError, count, a, axis=3) + + # check the 'masked' singleton + assert_equal(count(np.ma.masked), 0) + + # check 0-d arrays do not allow axis > 0 + assert_raises(np.AxisError, count, np.ma.array(1), axis=1) + + +class TestMaskedConstant: + def _do_add_test(self, add): + # sanity check + assert_(add(np.ma.masked, 1) is np.ma.masked) + + # now try with a vector + vector = np.array([1, 2, 3]) + result = add(np.ma.masked, vector) + + # lots of things could go wrong here + assert_(result is not np.ma.masked) + assert_(not isinstance(result, np.ma.core.MaskedConstant)) + assert_equal(result.shape, vector.shape) + assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool)) + + def test_ufunc(self): + self._do_add_test(np.add) + + def test_operator(self): + self._do_add_test(lambda a, b: a + b) + + def test_ctor(self): + m = np.ma.array(np.ma.masked) + + # most importantly, we do not want to create a new MaskedConstant + # instance + assert_(not isinstance(m, np.ma.core.MaskedConstant)) + assert_(m is not np.ma.masked) + + def test_repr(self): + # copies should not exist, but if they do, it should be obvious that + # something is wrong + assert_equal(repr(np.ma.masked), 'masked') + + # create a new instance in a weird way + masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) + assert_not_equal(repr(masked2), 'masked') + + def test_pickle(self): + from io import BytesIO + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(np.ma.masked, f, protocol=proto) + f.seek(0) + res = pickle.load(f) + assert_(res is np.ma.masked) + + def test_copy(self): + # gh-9328 + # copy is a no-op, like it is with np.True_ + assert_equal( + np.ma.masked.copy() is np.ma.masked, + np.True_.copy() is np.True_) + + def test__copy(self): + import copy + assert_( + copy.copy(np.ma.masked) is np.ma.masked) + + def test_deepcopy(self): + import copy + assert_( + copy.deepcopy(np.ma.masked) is np.ma.masked) + + def test_immutable(self): + orig = np.ma.masked + assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) + assert_raises(ValueError,operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.mask, (), False) + + view = np.ma.masked.view(np.ma.MaskedArray) + assert_raises(ValueError, operator.setitem, view, (), 1) + assert_raises(ValueError, operator.setitem, view.data, (), 1) + assert_raises(ValueError, operator.setitem, view.mask, (), False) + + def test_coercion_int(self): + a_i = np.zeros((), int) + assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) + assert_raises(MaskError, int, np.ma.masked) + + def test_coercion_float(self): + a_f = np.zeros((), float) + assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + assert_(np.isnan(a_f[()])) + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_unicode(self): + a_u = np.zeros((), 'U10') + a_u[()] = np.ma.masked + assert_equal(a_u[()], u'--') + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_bytes(self): + a_b = np.zeros((), 'S10') + a_b[()] = np.ma.masked + assert_equal(a_b[()], b'--') + + def test_subclass(self): + # https://github.com/astropy/astropy/issues/6645 + class Sub(type(np.ma.masked)): pass + + a = Sub() + assert_(a is Sub()) + assert_(a is not np.ma.masked) + assert_not_equal(repr(a), 'masked') + + def test_attributes_readonly(self): + assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,)) + assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64) + + +class TestMaskedWhereAliases: + + # TODO: Test masked_object, masked_equal, ... + + def test_masked_values(self): + res = masked_values(np.array([-32768.0]), np.int16(-32768)) + assert_equal(res.mask, [True]) + + res = masked_values(np.inf, np.inf) + assert_equal(res.mask, True) + + res = np.ma.masked_values(np.inf, -np.inf) + assert_equal(res.mask, False) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True) + assert_(res.mask is np.ma.nomask) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False) + assert_equal(res.mask, [False] * 4) + + +def test_masked_array(): + a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) + assert_equal(np.argwhere(a), [[1], [3]]) + +def test_masked_array_no_copy(): + # check nomask array is updated in place + a = np.ma.array([1, 2, 3, 4]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [False, False, True, False]) + # check masked array is updated in place + a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [True, False, True, False]) + +def test_append_masked_array(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_equal([4,3,2], value=2) + + result = np.ma.append(a, b) + expected_data = [1, 2, 3, 4, 3, 2] + expected_mask = [False, True, False, False, False, True] + assert_array_equal(result.data, expected_data) + assert_array_equal(result.mask, expected_mask) + + a = np.ma.masked_all((2,2)) + b = np.ma.ones((3,1)) + + result = np.ma.append(a, b) + expected_data = [1] * 3 + expected_mask = [True] * 4 + [False] * 3 + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + result = np.ma.append(a, b, axis=None) + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + +def test_append_masked_array_along_axis(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + + # When `axis` is specified, `values` must have the correct shape. + assert_raises(ValueError, np.ma.append, a, b, axis=0) + + result = np.ma.append(a[np.newaxis,:], b, axis=0) + expected = np.ma.arange(1, 10) + expected[[1, 6]] = np.ma.masked + expected = expected.reshape((3,3)) + assert_array_equal(result.data, expected.data) + assert_array_equal(result.mask, expected.mask) + +def test_default_fill_value_complex(): + # regression test for Python 3, where 'unicode' was not defined + assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) + + +def test_ufunc_with_output(): + # check that giving an output argument always returns that output. + # Regression test for gh-8416. + x = array([1., 2., 3.], mask=[0, 0, 1]) + y = np.add(x, 1., out=x) + assert_(y is x) + + +def test_ufunc_with_out_varied(): + """ Test that masked arrays are immune to gh-10459 """ + # the mask of the output should not affect the result, however it is passed + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) + expected = array([11, 22, 33], mask=[1, 0, 0]) + + out_pos = out.copy() + res_pos = np.add(a, b, out_pos) + + out_kw = out.copy() + res_kw = np.add(a, b, out=out_kw) + + out_tup = out.copy() + res_tup = np.add(a, b, out=(out_tup,)) + + assert_equal(res_kw.mask, expected.mask) + assert_equal(res_kw.data, expected.data) + assert_equal(res_tup.mask, expected.mask) + assert_equal(res_tup.data, expected.data) + assert_equal(res_pos.mask, expected.mask) + assert_equal(res_pos.data, expected.data) + + +def test_astype_mask_ordering(): + descr = [('v', int, 3), ('x', [('y', float)])] + x = array([ + [([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))], + [([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr) + x[0]['v'][0] = np.ma.masked + + x_a = x.astype(descr) + assert x_a.dtype.names == np.dtype(descr).names + assert x_a.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a) + + assert_(x is x.astype(x.dtype, copy=False)) + assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray) + + x_f = x.astype(x.dtype, order='F') + assert_(x_f.flags.f_contiguous) + assert_(x_f.mask.flags.f_contiguous) + + # Also test the same indirectly, via np.array + x_a2 = np.array(x, dtype=descr, subok=True) + assert x_a2.dtype.names == np.dtype(descr).names + assert x_a2.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a2) + + assert_(x is np.array(x, dtype=descr, copy=False, subok=True)) + + x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True) + assert_(x_f2.flags.f_contiguous) + assert_(x_f2.mask.flags.f_contiguous) + + +@pytest.mark.parametrize('dt1', num_dts, ids=num_ids) +@pytest.mark.parametrize('dt2', num_dts, ids=num_ids) +@pytest.mark.filterwarnings('ignore::numpy.ComplexWarning') +def test_astype_basic(dt1, dt2): + # See gh-12070 + src = np.ma.array(ones(3, dt1), fill_value=1) + dst = src.astype(dt2) + + assert_(src.fill_value == 1) + assert_(src.dtype == dt1) + assert_(src.fill_value.dtype == dt1) + + assert_(dst.fill_value == 1) + assert_(dst.dtype == dt2) + assert_(dst.fill_value.dtype == dt2) + + assert_equal(src, dst) + + +def test_fieldless_void(): + dt = np.dtype([]) # a void dtype with no fields + x = np.empty(4, dt) + + # these arrays contain no values, so there's little to test - but this + # shouldn't crash + mx = np.ma.array(x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + mx = np.ma.array(x, mask=x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + +def test_mask_shape_assignment_does_not_break_masked(): + a = np.ma.masked + b = np.ma.array(1, mask=a.mask) + b.shape = (1,) + assert_equal(a.mask.shape, ()) + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") +def test_doc_note(): + def method(self): + """This docstring + + Has multiple lines + + And notes + + Notes + ----- + original note + """ + pass + + expected_doc = """This docstring + +Has multiple lines + +And notes + +Notes +----- +note + +original note""" + + assert_equal(np.ma.core.doc_note(method.__doc__, "note"), expected_doc) diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/test_deprecations.py b/MLPY/Lib/site-packages/numpy/ma/tests/test_deprecations.py new file mode 100644 index 0000000000000000000000000000000000000000..fc553b252ae14ea3303d7894450e7d667c9fe49e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/tests/test_deprecations.py @@ -0,0 +1,68 @@ +"""Test deprecation and future warnings. + +""" +import numpy as np +from numpy.testing import assert_warns +from numpy.ma.testutils import assert_equal +from numpy.ma.core import MaskedArrayFutureWarning + +class TestArgsort: + """ gh-8701 """ + def _test_base(self, argsort, cls): + arr_0d = np.array(1).view(cls) + argsort(arr_0d) + + arr_1d = np.array([1, 2, 3]).view(cls) + argsort(arr_1d) + + # argsort has a bad default for >1d arrays + arr_2d = np.array([[1, 2], [3, 4]]).view(cls) + result = assert_warns( + np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) + assert_equal(result, argsort(arr_2d, axis=None)) + + # should be no warnings for explicitly specifying it + argsort(arr_2d, axis=None) + argsort(arr_2d, axis=-1) + + def test_function_ndarray(self): + return self._test_base(np.ma.argsort, np.ndarray) + + def test_function_maskedarray(self): + return self._test_base(np.ma.argsort, np.ma.MaskedArray) + + def test_method(self): + return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) + + +class TestMinimumMaximum: + def test_minimum(self): + assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2])) + + def test_maximum(self): + assert_warns(DeprecationWarning, np.ma.maximum, np.ma.array([1, 2])) + + def test_axis_default(self): + # NumPy 1.13, 2017-05-06 + + data1d = np.ma.arange(6) + data2d = data1d.reshape(2, 3) + + ma_min = np.ma.minimum.reduce + ma_max = np.ma.maximum.reduce + + # check that the default axis is still None, but warns on 2d arrays + result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + assert_equal(result, ma_max(data2d, axis=None)) + + result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + assert_equal(result, ma_min(data2d, axis=None)) + + # no warnings on 1d, as both new and old defaults are equivalent + result = ma_min(data1d) + assert_equal(result, ma_min(data1d, axis=None)) + assert_equal(result, ma_min(data1d, axis=0)) + + result = ma_max(data1d) + assert_equal(result, ma_max(data1d, axis=None)) + assert_equal(result, ma_max(data1d, axis=0)) diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/test_extras.py b/MLPY/Lib/site-packages/numpy/ma/tests/test_extras.py new file mode 100644 index 0000000000000000000000000000000000000000..634f5767686f1c7059cb80de3eda4d96ec4885e0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/tests/test_extras.py @@ -0,0 +1,1705 @@ +# pylint: disable-msg=W0611, W0612, W0511 +"""Tests suite for MaskedArray. +Adapted from the original test_ma by Pierre Gerard-Marchant + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +import warnings +import itertools +import pytest + +import numpy as np +from numpy.testing import ( + assert_warns, suppress_warnings + ) +from numpy.ma.testutils import ( + assert_, assert_array_equal, assert_equal, assert_almost_equal + ) +from numpy.ma.core import ( + array, arange, masked, MaskedArray, masked_array, getmaskarray, shape, + nomask, ones, zeros, count + ) +from numpy.ma.extras import ( + atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, + median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, + ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, + mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, + notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, + diagflat, stack, vstack + ) + + +class TestGeneric: + # + def test_masked_all(self): + # Tests masked_all + # Standard dtype + test = masked_all((2,), dtype=float) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + test = masked_all((2,), dtype=dt) + control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + test = masked_all((2, 2), dtype=dt) + control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], + mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], + dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((1, 1), dtype=dt) + control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) + assert_equal(test, control) + + def test_masked_all_with_object_nested(self): + # Test masked_all works with nested array with dtype of an 'object' + # refers to issue #15895 + my_dtype = np.dtype([('b', ([('c', object)], (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(type(masked_arr['b']['c']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']['c']), 1) + assert_equal(masked_arr['b']['c'].shape, (1, 1)) + assert_equal(masked_arr['b']['c']._fill_value.shape, ()) + + def test_masked_all_with_object(self): + # same as above except that the array is not nested + my_dtype = np.dtype([('b', (object, (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']), 1) + assert_equal(masked_arr['b'].shape, (1, 1)) + assert_equal(masked_arr['b']._fill_value.shape, ()) + + def test_masked_all_like(self): + # Tests masked_all + # Standard dtype + base = array([1, 2], dtype=float) + test = masked_all_like(base) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + test = masked_all_like(base) + control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + test = masked_all_like(control) + assert_equal(test, control) + + def check_clump(self, f): + for i in range(1, 7): + for j in range(2**i): + k = np.arange(i, dtype=int) + ja = np.full(i, j, dtype=int) + a = masked_array(2**k) + a.mask = (ja & (2**k)) != 0 + s = 0 + for sl in f(a): + s += a.data[sl].sum() + if f == clump_unmasked: + assert_equal(a.compressed().sum(), s) + else: + a.mask = ~a.mask + assert_equal(a.compressed().sum(), s) + + def test_clump_masked(self): + # Test clump_masked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + # + test = clump_masked(a) + control = [slice(0, 3), slice(6, 7), slice(8, 10)] + assert_equal(test, control) + + self.check_clump(clump_masked) + + def test_clump_unmasked(self): + # Test clump_unmasked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + test = clump_unmasked(a) + control = [slice(3, 6), slice(7, 8), ] + assert_equal(test, control) + + self.check_clump(clump_unmasked) + + def test_flatnotmasked_contiguous(self): + # Test flatnotmasked_contiguous + a = arange(10) + # No mask + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(0, a.size)]) + # mask of all false + a.mask = np.zeros(10, dtype=bool) + assert_equal(test, [slice(0, a.size)]) + # Some mask + a[(a < 3) | (a > 8) | (a == 5)] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(3, 5), slice(6, 9)]) + # + a[:] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, []) + + +class TestAverage: + # Several tests of average. Why so many ? Good point... + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + assert_equal(2.0, average(ott, axis=0)) + assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_equal(2.0, result) + assert_(wts == 4.0) + ott[:] = masked + assert_equal(average(ott, axis=0).mask, [True]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_equal(average(ott, axis=0), [2.0, 0.0]) + assert_equal(average(ott, axis=1).mask[0], [True]) + assert_equal([2., 0.], average(ott, axis=0)) + result, wts = average(ott, axis=0, returned=True) + assert_equal(wts, [1., 0.]) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6, dtype=np.float_) + assert_equal(average(x, axis=0), 2.5) + assert_equal(average(x, axis=0, weights=w1), 2.5) + y = array([arange(6, dtype=np.float_), 2.0 * arange(6)]) + assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) + assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + assert_equal(average(y, None, weights=w2), 20. / 6.) + assert_equal(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.]) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_equal(average(masked_array(x, m1), axis=0), 2.5) + assert_equal(average(masked_array(x, m2), axis=0), 2.5) + assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_equal(average(z, None), 20. / 6.) + assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + assert_equal(average(z, axis=1), [2.5, 5.0]) + assert_equal(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0]) + + def test_testAverage3(self): + # Yet more tests of average! + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[False, False], [True, False]]) + a2da = average(a2d, axis=0) + assert_equal(a2da, [0.5, 3.0]) + a2dma = average(a2dm, axis=0) + assert_equal(a2dma, [1.0, 3.0]) + a2dma = average(a2dm, axis=None) + assert_equal(a2dma, 7. / 3.) + a2dma = average(a2dm, axis=1) + assert_equal(a2dma, [1.5, 4.0]) + + def test_onintegers_with_mask(self): + # Test average on integers with mask + a = average(array([1, 2])) + assert_equal(a, 1.5) + a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) + assert_equal(a, 1.5) + + def test_complex(self): + # Test with complex data. + # (Regression test for https://github.com/numpy/numpy/issues/2684) + mask = np.array([[0, 0, 0, 1, 0], + [0, 1, 0, 0, 0]], dtype=bool) + a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], + [9j, 0+1j, 2+3j, 4+5j, 7+7j]], + mask=mask) + + av = average(a) + expected = np.average(a.compressed()) + assert_almost_equal(av.real, expected.real) + assert_almost_equal(av.imag, expected.imag) + + av0 = average(a, axis=0) + expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j + assert_almost_equal(av0.real, expected0.real) + assert_almost_equal(av0.imag, expected0.imag) + + av1 = average(a, axis=1) + expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j + assert_almost_equal(av1.real, expected1.real) + assert_almost_equal(av1.imag, expected1.imag) + + # Test with the 'weights' argument. + wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], + [1.0, 1.0, 1.0, 1.0, 1.0]]) + wav = average(a, weights=wts) + expected = np.average(a.compressed(), weights=wts[~mask]) + assert_almost_equal(wav.real, expected.real) + assert_almost_equal(wav.imag, expected.imag) + + wav0 = average(a, weights=wts, axis=0) + expected0 = (average(a.real, weights=wts, axis=0) + + average(a.imag, weights=wts, axis=0)*1j) + assert_almost_equal(wav0.real, expected0.real) + assert_almost_equal(wav0.imag, expected0.imag) + + wav1 = average(a, weights=wts, axis=1) + expected1 = (average(a.real, weights=wts, axis=1) + + average(a.imag, weights=wts, axis=1)*1j) + assert_almost_equal(wav1.real, expected1.real) + assert_almost_equal(wav1.imag, expected1.imag) + + def test_masked_weights(self): + # Test with masked weights. + # (Regression test for https://github.com/numpy/numpy/issues/10438) + a = np.ma.array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]]) + weights_unmasked = masked_array([5, 28, 31], mask=False) + weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0]) + + avg_unmasked = average(a, axis=0, + weights=weights_unmasked, returned=False) + expected_unmasked = np.array([6.0, 5.21875, 6.21875]) + assert_almost_equal(avg_unmasked, expected_unmasked) + + avg_masked = average(a, axis=0, weights=weights_masked, returned=False) + expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678]) + assert_almost_equal(avg_masked, expected_masked) + + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_1d(self): + # Tests mr_ on 1D arrays. + assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) + b = ones(5) + m = [1, 0, 0, 0, 0] + d = masked_array(b, mask=m) + c = mr_[d, 0, 0, d] + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + assert_array_equal(c.mask, mr_[m, 0, 0, m]) + + def test_2d(self): + # Tests mr_ on 2D arrays. + a_1 = np.random.rand(5, 5) + a_2 = np.random.rand(5, 5) + m_1 = np.round_(np.random.rand(5, 5), 0) + m_2 = np.round_(np.random.rand(5, 5), 0) + b_1 = masked_array(a_1, mask=m_1) + b_2 = masked_array(a_2, mask=m_2) + # append columns + d = mr_['1', b_1, b_2] + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b_1) + assert_array_equal(d[:, 5:], b_2) + assert_array_equal(d.mask, np.r_['1', m_1, m_2]) + d = mr_[b_1, b_2] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5,:], b_1) + assert_array_equal(d[5:,:], b_2) + assert_array_equal(d.mask, np.r_[m_1, m_2]) + + def test_masked_constant(self): + actual = mr_[np.ma.masked, 1] + assert_equal(actual.mask, [True, False]) + assert_equal(actual.data[1], 1) + + actual = mr_[[1, 2], np.ma.masked] + assert_equal(actual.mask, [False, False, True]) + assert_equal(actual.data[:2], [1, 2]) + + +class TestNotMasked: + # Tests notmasked_edges and notmasked_contiguous. + + def test_edges(self): + # Tests unmasked_edges + data = masked_array(np.arange(25).reshape(5, 5), + mask=[[0, 0, 1, 0, 0], + [0, 0, 0, 1, 1], + [1, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [1, 1, 1, 0, 0]],) + test = notmasked_edges(data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, 1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) + # + test = notmasked_edges(data.data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data.data, 0) + assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data.data, -1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) + # + data[-2] = masked + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, -1) + assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) + assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) + + def test_contiguous(self): + # Tests notmasked_contiguous + a = masked_array(np.arange(24).reshape(3, 8), + mask=[[0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0]]) + tmp = notmasked_contiguous(a, None) + assert_equal(tmp, [ + slice(0, 4, None), + slice(16, 22, None), + slice(23, 24, None) + ]) + + tmp = notmasked_contiguous(a, 0) + assert_equal(tmp, [ + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(2, 3, None)], + [slice(2, 3, None)], + [], + [slice(2, 3, None)] + ]) + # + tmp = notmasked_contiguous(a, 1) + assert_equal(tmp, [ + [slice(0, 4, None)], + [], + [slice(0, 6, None), slice(7, 8, None)] + ]) + + +class TestCompressFunctions: + + def test_compress_nd(self): + # Tests compress_nd + x = np.array(list(range(3*4*5))).reshape(3, 4, 5) + m = np.zeros((3,4,5)).astype(bool) + m[1,1,1] = True + x = array(x, mask=m) + + # axis=None + a = compress_nd(x) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + # axis=0 + a = compress_nd(x, 0) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [45, 46, 47, 48, 49], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + # axis=1 + a = compress_nd(x, 1) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[20, 21, 22, 23, 24], + [30, 31, 32, 33, 34], + [35, 36, 37, 38, 39]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + a2 = compress_nd(x, (1,)) + a3 = compress_nd(x, -2) + a4 = compress_nd(x, (-2,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=2 + a = compress_nd(x, 2) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [25, 27, 28, 29], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (2,)) + a3 = compress_nd(x, -1) + a4 = compress_nd(x, (-1,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 1) + a = compress_nd(x, (0, 1)) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + a2 = compress_nd(x, (0, -2)) + assert_equal(a, a2) + + # axis=(1, 2) + a = compress_nd(x, (1, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (-2, 2)) + a3 = compress_nd(x, (1, -1)) + a4 = compress_nd(x, (-2, -1)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 2) + a = compress_nd(x, (0, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (0, -1)) + assert_equal(a, a2) + + def test_compress_rowcols(self): + # Tests compress_rowcols + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) + assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) + assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[8]]) + assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) + assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_equal(compress_rowcols(x).size, 0) + assert_equal(compress_rowcols(x, 0).size, 0) + assert_equal(compress_rowcols(x, 1).size, 0) + + def test_mask_rowcols(self): + # Tests mask_rowcols. + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1,).mask, + [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_(mask_rowcols(x).all() is masked) + assert_(mask_rowcols(x, 0).all() is masked) + assert_(mask_rowcols(x, 1).all() is masked) + assert_(mask_rowcols(x).mask.all()) + assert_(mask_rowcols(x, 0).mask.all()) + assert_(mask_rowcols(x, 1).mask.all()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize(["func", "rowcols_axis"], + [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)]) + def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): + # Test deprecation of the axis argument to `mask_rows` and `mask_cols` + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + with assert_warns(DeprecationWarning): + res = func(x, axis=axis) + assert_equal(res, mask_rowcols(x, rowcols_axis)) + + def test_dot(self): + # Tests dot product + n = np.arange(1, 7) + # + m = [1, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 0]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 1] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 1], [1, 1]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + assert_equal(c, dot(a, b)) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b) + assert_equal(c.mask, nomask) + c = dot(b, a) + assert_equal(c.mask, nomask) + # + a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 0], [1, 1]]) + c = dot(a, b) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 0], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + + def test_dot_returns_maskedarray(self): + # See gh-6611 + a = np.eye(3) + b = array(a) + assert_(type(dot(a, a)) is MaskedArray) + assert_(type(dot(a, b)) is MaskedArray) + assert_(type(dot(b, a)) is MaskedArray) + assert_(type(dot(b, b)) is MaskedArray) + + def test_dot_out(self): + a = array(np.eye(3)) + out = array(np.zeros((3, 3))) + res = dot(a, a, out=out) + assert_(res is out) + assert_equal(a, res) + + +class TestApplyAlongAxis: + # Tests 2D functions + def test_3d(self): + a = arange(12.).reshape(2, 2, 3) + + def myfunc(b): + return b[1] + + xa = apply_along_axis(myfunc, 2, a) + assert_equal(xa, [[1, 4], [7, 10]]) + + # Tests kwargs functions + def test_3d_kwargs(self): + a = arange(12).reshape(2, 2, 3) + + def myfunc(b, offset=0): + return b[1+offset] + + xa = apply_along_axis(myfunc, 2, a, offset=1) + assert_equal(xa, [[2, 5], [8, 11]]) + + +class TestApplyOverAxes: + # Tests apply_over_axes + def test_basic(self): + a = arange(24).reshape(2, 3, 4) + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[60], [92], [124]]]) + assert_equal(test, ctrl) + a[(a % 2).astype(bool)] = masked + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[28], [44], [60]]]) + assert_equal(test, ctrl) + + +class TestMedian: + def test_pytype(self): + r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) + assert_equal(r, np.inf) + + def test_inf(self): + # test that even which computes handles inf / x = masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=-1) + assert_equal(r, np.inf) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=None) + assert_equal(r, np.inf) + # all masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=-1) + assert_equal(r.mask, True) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=None) + assert_equal(r.mask, True) + + def test_non_masked(self): + x = np.arange(9) + assert_equal(np.ma.median(x), 4.) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = range(8) + assert_equal(np.ma.median(x), 3.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = 5 + assert_equal(np.ma.median(x), 5.) + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = np.arange(9 * 8).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + # float + x = np.arange(9 * 8.).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + + def test_docstring_examples(self): + "test the examples given in the docstring of ma.median" + x = array(np.arange(8), mask=[0]*4 + [1]*4) + assert_equal(np.ma.median(x), 1.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + ma_x = np.ma.median(x, axis=-1, overwrite_input=True) + assert_equal(ma_x, [2., 5.]) + assert_equal(ma_x.shape, (2,), "shape mismatch") + assert_(type(ma_x) is MaskedArray) + + def test_axis_argument_errors(self): + msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s" + for ndmin in range(5): + for mask in [False, True]: + x = array(1, ndmin=ndmin, mask=mask) + + # Valid axis values should not raise exception + args = itertools.product(range(-ndmin, ndmin), [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except Exception: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + # Invalid axis values should raise exception + args = itertools.product([-(ndmin + 1), ndmin], [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except np.AxisError: + pass + else: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + def test_masked_0d(self): + # Check values + x = array(1, mask=False) + assert_equal(np.ma.median(x), 1) + x = array(1, mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + + def test_masked_1d(self): + x = array(np.arange(5), mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant) + x = array(np.arange(5), mask=False) + assert_equal(np.ma.median(x), 2.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0,1,0,0,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0,1,1,1,1]) + assert_equal(np.ma.median(x), 0.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(5), mask=[0,1,1,0,0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(5.), mask=[0,1,1,0,0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(6), mask=[0,1,1,1,1,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(6.), mask=[0,1,1,1,1,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + + def test_1d_shape_consistency(self): + assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape, + np.ma.median(array([1,2,3],mask=[0,1,0])).shape ) + + def test_2d(self): + # Tests median w/ 2D + (n, p) = (101, 30) + x = masked_array(np.linspace(-1., 1., n),) + x[:10] = x[-10:] = masked + z = masked_array(np.empty((n, p), dtype=float)) + z[:, 0] = x[:] + idx = np.arange(len(x)) + for i in range(1, p): + np.random.shuffle(idx) + z[:, i] = x[idx] + assert_equal(median(z[:, 0]), 0) + assert_equal(median(z), 0) + assert_equal(median(z, axis=0), np.zeros(p)) + assert_equal(median(z.T, axis=1), np.zeros(p)) + + def test_2d_waxis(self): + # Tests median w/ 2D arrays and different axis. + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x), 14.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) + assert_(type(np.ma.median(x, axis=0)) is MaskedArray) + assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) + assert_(type(np.ma.median(x, axis=1)) is MaskedArray) + assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) + + def test_3d(self): + # Tests median w/ 3D + x = np.ma.arange(24).reshape(3, 4, 2) + x[x % 3 == 0] = masked + assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) + x.shape = (4, 3, 2) + assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) + x = np.ma.arange(24).reshape(4, 3, 2) + x[x % 5 == 0] = masked + assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) + + def test_neg_axis(self): + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x, axis=-1), median(x, axis=1)) + + def test_out_1d(self): + # integer float even odd + for v in (30, 30., 31, 31.): + x = masked_array(np.arange(v)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(())) + r = median(x, out=out) + if v == 30: + assert_equal(out, 14.5) + else: + assert_equal(out, 15.) + assert_(r is out) + assert_(type(r) is MaskedArray) + + def test_out(self): + # integer float even odd + for v in (40, 40., 30, 30.): + x = masked_array(np.arange(v).reshape(10, -1)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(10)) + r = median(x, axis=1, out=out) + if v == 30: + e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + else: + e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3, + mask=[True]*3 + [False]*4 + [True]*3) + assert_equal(r, e) + assert_(r is out) + assert_(type(r) is MaskedArray) + + def test_single_non_masked_value_on_axis(self): + data = [[1., 0.], + [0., 3.], + [0., 0.]] + masked_arr = np.ma.masked_equal(data, 0) + expected = [1., 3.] + assert_array_equal(np.ma.median(masked_arr, axis=0), + expected) + + def test_nan(self): + for mask in (False, np.zeros(6, dtype=bool)): + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm.mask = mask + + # scalar result + r = np.ma.median(dm, axis=None) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + r = np.ma.median(dm.ravel(), axis=0) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + + r = np.ma.median(dm, axis=0) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [1, np.nan, 3]) + r = np.ma.median(dm, axis=1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + r = np.ma.median(dm, axis=-1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm[:, 2] = np.ma.masked + assert_array_equal(np.ma.median(dm, axis=None), np.nan) + assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3]) + assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5]) + + def test_out_nan(self): + o = np.ma.masked_array(np.zeros((4,))) + d = np.ma.masked_array(np.ones((3, 4))) + d[2, 1] = np.nan + d[2, 2] = np.ma.masked + assert_equal(np.ma.median(d, 0, out=o), o) + o = np.ma.masked_array(np.zeros((3,))) + assert_equal(np.ma.median(d, 1, out=o), o) + o = np.ma.masked_array(np.zeros(())) + assert_equal(np.ma.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.ma.masked_array(np.arange(24, dtype=float)) + a[::3] = np.ma.masked + a[2] = np.nan + assert_array_equal(np.ma.median(a), np.nan) + assert_array_equal(np.ma.median(a, axis=0), np.nan) + + a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4)) + a.mask = np.arange(a.size) % 2 == 1 + aorig = a.copy() + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_array_equal(np.ma.median(a), np.nan) + assert_(np.isscalar(np.ma.median(a))) + + # axis0 + b = np.ma.median(aorig, axis=0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 0), b) + + # axis1 + b = np.ma.median(aorig, axis=1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 1), b) + + # axis02 + b = np.ma.median(aorig, axis=(0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.ma.median(a, (0, 2)), b) + + def test_ambigous_fill(self): + # 255 is max value, used as filler for sort + a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) + a = np.ma.masked_array(a, mask=a == 3) + assert_array_equal(np.ma.median(a, axis=1), 255) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), 255) + + def test_special(self): + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) + assert_equal(np.ma.median(a), inf) + + a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_array_equal(np.ma.median(a, axis=1), inf) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), inf) + + # no mask + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=0), inf) + assert_equal(np.ma.median(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + a = np.ma.masked_array(a, mask=np.isnan(a)) + if inf > 0: + assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.ma.median(a), 4.5) + else: + assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.ma.median(a), -2.5) + assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) + + for i in range(0, 10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=1), inf) + assert_equal(np.ma.median(a, axis=0), + ([np.nan] * i) + [inf] * j) + + def test_empty(self): + # empty arrays + a = np.ma.masked_array(np.array([], dtype=float)) + with suppress_warnings() as w: + w.record(RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # multiple dimensions + a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) + # no axis + with suppress_warnings() as w: + w.record(RuntimeWarning) + warnings.filterwarnings('always', '', RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) + assert_equal(np.ma.median(a, axis=0), b) + assert_equal(np.ma.median(a, axis=1), b) + + # axis 2 + b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.ma.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.ma.masked_array(np.arange(7.)) + assert_(type(np.ma.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.ma.median(o.astype(object))), float) + + +class TestCov: + + def setup(self): + self.data = array(np.random.rand(12)) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test cov on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test cov 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.cov(nx), cov(x)) + assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(nx, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + # + try: + cov(x, allow_masked=False) + except ValueError: + pass + # + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), + cov(x, x[::-1], rowvar=False)) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), + cov(x, x[::-1], rowvar=False, bias=True)) + + def test_2d_with_missing(self): + # Test cov on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + valid = np.logical_not(getmaskarray(x)).astype(int) + frac = np.dot(valid, valid.T) + xf = (x - x.mean(1)[:, None]).filled(0) + assert_almost_equal(cov(x), + np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) + assert_almost_equal(cov(x, bias=True), + np.cov(xf, bias=True) * x.shape[1] / frac) + frac = np.dot(valid.T, valid) + xf = (x - x.mean(0)).filled(0) + assert_almost_equal(cov(x, rowvar=False), + (np.cov(xf, rowvar=False) * + (x.shape[0] - 1) / (frac - 1.))) + assert_almost_equal(cov(x, rowvar=False, bias=True), + (np.cov(xf, rowvar=False, bias=True) * + x.shape[0] / frac)) + + +class TestCorrcoef: + + def setup(self): + self.data = array(np.random.rand(12)) + self.data2 = array(np.random.rand(12)) + + def test_ddof(self): + # ddof raises DeprecationWarning + x, y = self.data, self.data2 + expected = np.corrcoef(x) + expected2 = np.corrcoef(x, y) + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof has no or negligible effect on the function + assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) + assert_almost_equal(corrcoef(x, ddof=-1), expected) + assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) + assert_almost_equal(corrcoef(x, ddof=3), expected) + assert_almost_equal(corrcoef(x, y, ddof=3), expected2) + + def test_bias(self): + x, y = self.data, self.data2 + expected = np.corrcoef(x) + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, y, True, False) + assert_warns(DeprecationWarning, corrcoef, x, y, True, True) + assert_warns(DeprecationWarning, corrcoef, x, bias=False) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(x, bias=1), expected) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test corrcoef on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test corrcoef 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.corrcoef(nx), corrcoef(x)) + assert_almost_equal(np.corrcoef(nx, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + try: + corrcoef(x, allow_masked=False) + except ValueError: + pass + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) + assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), + corrcoef(x, x[::-1], rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], bias=1)) + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], ddof=2)) + + def test_2d_with_missing(self): + # Test corrcoef on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + + test = corrcoef(x) + control = np.corrcoef(x) + assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], + control[:-1, :-1]) + + +class TestPolynomial: + # + def test_polyfit(self): + # Tests polyfit + # On ndarrays + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) + # ON 1D maskedarrays + x = x.view(MaskedArray) + x[0] = masked + y = y.view(MaskedArray) + y[0, 0] = y[-1, -1] = masked + # + (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, + full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + w = np.random.rand(10) + 1 + wo = w.copy() + xs = x[1:-1] + ys = y[1:-1] + ws = w[1:-1] + (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) + (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) + assert_equal(w, wo) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + def test_polyfit_with_masked_NaNs(self): + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + + x[0] = np.nan + y[-1,-1] = np.nan + x = x.view(MaskedArray) + y = y.view(MaskedArray) + x[0] = masked + y[-1,-1] = masked + + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + +class TestArraySetOps: + + def test_unique_onlist(self): + # Test unique on list + data = [1, 1, 1, 2, 2, 3] + test = unique(data, return_index=True, return_inverse=True) + assert_(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique_onmaskedarray(self): + # Test unique on masked data w/use_mask=True + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array(data=[1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique_allmasked(self): + # Test all masked + data = masked_array([1, 1, 1], mask=True) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, ], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + # Test masked + data = masked + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + # Tests mediff1d + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin(self): + # Test ediff1d w/ to_begin + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1, 2, 3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_toend(self): + # Test ediff1d w/ to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin_toend(self): + # Test ediff1d w/ to_begin and to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], + mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_ndarray(self): + # Test ediff1d w/ a ndarray + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_intersect1d(self): + # Test intersect1d + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + def test_setxor1d(self): + # Test setxor1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 2, 3]) + b = array([6, 5, 4]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([], [])) + + def test_isin(self): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + a = np.arange(24).reshape([2, 3, 4]) + mask = np.zeros([2, 3, 4]) + mask[1, 2, 0] = 1 + a = array(a, mask=mask) + b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], + mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) + ec = zeros((2, 3, 4), dtype=bool) + ec[0, 0, 0] = True + ec[0, 0, 1] = True + ec[0, 2, 3] = True + c = isin(a, b) + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, ec) + #compare results of np.isin to ma.isin + d = np.isin(a, b[~b.mask]) & ~a.mask + assert_array_equal(c, d) + + def test_in1d(self): + # Test in1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, False, True, True]) + # + assert_array_equal([], in1d([], [])) + + def test_in1d_invert(self): + # Test in1d's invert parameter + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + assert_array_equal([], in1d([], [], invert=True)) + + def test_union1d(self): + # Test union1d + a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) + y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) + ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) + z = union1d(x, y) + assert_equal(z, ez) + # + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + # Test setdiff1d + a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + a = array([], np.uint32, mask=[]) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_char_array(self): + # Test setdiff1d_charray + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + +class TestShapeBase: + + def test_atleast_2d(self): + # Test atleast_2d + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = atleast_2d(a) + assert_equal(b.shape, (1, 3)) + assert_equal(b.mask.shape, b.data.shape) + assert_equal(a.shape, (3,)) + assert_equal(a.mask.shape, a.data.shape) + assert_equal(b.mask.shape, b.data.shape) + + def test_shape_scalar(self): + # the atleast and diagflat function should work with scalars + # GitHub issue #3367 + # Additionally, the atleast functions should accept multiple scalars + # correctly + b = atleast_1d(1.0) + assert_equal(b.shape, (1,)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_1d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1,)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_2d(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_2d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_3d(1.0) + assert_equal(b.shape, (1, 1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_3d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + + b = diagflat(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.data.shape) + + +class TestStack: + + def test_stack_1d(self): + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = masked_array([9, 8, 7], mask=[1, 0, 0]) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_masks(self): + a = masked_array([0, 1, 2], mask=True) + b = masked_array([9, 8, 7], mask=False) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_nd(self): + # 2D + shp = (3, 2) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) + + # 4D + shp = (3, 2, 4, 5,) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/test_mrecords.py b/MLPY/Lib/site-packages/numpy/ma/tests/test_mrecords.py new file mode 100644 index 0000000000000000000000000000000000000000..51326f76a934084fde5dcc3a679948d194b22bdb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/tests/test_mrecords.py @@ -0,0 +1,493 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for mrecords. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import numpy as np +import numpy.ma as ma +from numpy import recarray +from numpy.ma import masked, nomask +from numpy.testing import temppath +from numpy.core.records import ( + fromrecords as recfromrecords, fromarrays as recfromarrays + ) +from numpy.ma.mrecords import ( + MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, + addfield + ) +from numpy.ma.testutils import ( + assert_, assert_equal, + assert_equal_records, + ) +from numpy.compat import pickle + + +class TestMRecords: + + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = [b'one', b'two', b'three', b'four', b'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + + def test_byview(self): + # Test creation by view + base = self.base + mbase = base.view(mrecarray) + assert_equal(mbase.recordmask, base.recordmask) + assert_equal_records(mbase._mask, base._mask) + assert_(isinstance(mbase._data, recarray)) + assert_equal_records(mbase._data, base._data.view(recarray)) + for field in ('a', 'b', 'c'): + assert_equal(base[field], mbase[field]) + assert_equal_records(mbase.view(mrecarray), mbase) + + def test_get(self): + # Tests fields retrieval + base = self.base.copy() + mbase = base.view(mrecarray) + # As fields.......... + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase, field), mbase[field]) + assert_equal(base[field], mbase[field]) + # as elements ....... + mbase_first = mbase[0] + assert_(isinstance(mbase_first, mrecarray)) + assert_equal(mbase_first.dtype, mbase.dtype) + assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) + # Used to be mask, now it's recordmask + assert_equal(mbase_first.recordmask, nomask) + assert_equal(mbase_first._mask.item(), (False, False, False)) + assert_equal(mbase_first['a'], mbase['a'][0]) + mbase_last = mbase[-1] + assert_(isinstance(mbase_last, mrecarray)) + assert_equal(mbase_last.dtype, mbase.dtype) + assert_equal(mbase_last.tolist(), (None, None, None)) + # Used to be mask, now it's recordmask + assert_equal(mbase_last.recordmask, True) + assert_equal(mbase_last._mask.item(), (True, True, True)) + assert_equal(mbase_last['a'], mbase['a'][-1]) + assert_((mbase_last['a'] is masked)) + # as slice .......... + mbase_sl = mbase[:2] + assert_(isinstance(mbase_sl, mrecarray)) + assert_equal(mbase_sl.dtype, mbase.dtype) + # Used to be mask, now it's recordmask + assert_equal(mbase_sl.recordmask, [0, 1]) + assert_equal_records(mbase_sl.mask, + np.array([(False, False, False), + (True, True, True)], + dtype=mbase._mask.dtype)) + assert_equal_records(mbase_sl, base[:2].view(mrecarray)) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase_sl, field), base[:2][field]) + + def test_set_fields(self): + # Tests setting fields. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase = mbase.copy() + mbase.fill_value = (999999, 1e20, 'N/A') + # Change the data, the mask should be conserved + mbase.a._data[:] = 5 + assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) + assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) + # Change the elements, and the mask will follow + mbase.a = 1 + assert_equal(mbase['a']._data, [1]*5) + assert_equal(ma.getmaskarray(mbase['a']), [0]*5) + # Use to be _mask, now it's recordmask + assert_equal(mbase.recordmask, [False]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 0), + (0, 1, 1)], + dtype=bool)) + # Set a field to mask ........................ + mbase.c = masked + # Use to be mask, and now it's still mask ! + assert_equal(mbase.c.mask, [1]*5) + assert_equal(mbase.c.recordmask, [1]*5) + assert_equal(ma.getmaskarray(mbase['c']), [1]*5) + assert_equal(ma.getdata(mbase['c']), [b'N/A']*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 1), + (0, 1, 1), + (0, 0, 1), + (0, 0, 1), + (0, 1, 1)], + dtype=bool)) + # Set fields by slices ....................... + mbase = base.view(mrecarray).copy() + mbase.a[3:] = 5 + assert_equal(mbase.a, [1, 2, 3, 5, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) + mbase.b[3:] = masked + assert_equal(mbase.b, base['b']) + assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) + # Set fields globally.......................... + ndtype = [('alpha', '|S1'), ('num', int)] + data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) + rdata = data.view(MaskedRecords) + val = ma.array([10, 20, 30], mask=[1, 0, 0]) + + rdata['num'] = val + assert_equal(rdata.num, val) + assert_equal(rdata.num.mask, [1, 0, 0]) + + def test_set_fields_mask(self): + # Tests setting the mask of a field. + base = self.base.copy() + # This one has already a mask.... + mbase = base.view(mrecarray) + mbase['a'][-2] = masked + assert_equal(mbase.a, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) + # This one has not yet + mbase = fromarrays([np.arange(5), np.random.rand(5)], + dtype=[('a', int), ('b', float)]) + mbase['a'][-2] = masked + assert_equal(mbase.a, [0, 1, 2, 3, 4]) + assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) + + def test_set_mask(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Set the mask to True ....................... + mbase.mask = masked + assert_equal(ma.getmaskarray(mbase['b']), [1]*5) + assert_equal(mbase['a']._mask, mbase['b']._mask) + assert_equal(mbase['a']._mask, mbase['c']._mask) + assert_equal(mbase._mask.tolist(), + np.array([(1, 1, 1)]*5, dtype=bool)) + # Delete the mask ............................ + mbase.mask = nomask + assert_equal(ma.getmaskarray(mbase['c']), [0]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0)]*5, dtype=bool)) + + def test_set_mask_fromarray(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Sets the mask w/ an array + mbase.mask = [1, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) + # Yay, once more ! + mbase.mask = [0, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) + + def test_set_mask_fromfields(self): + mbase = self.base.copy().view(mrecarray) + + nmask = np.array( + [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], + dtype=[('a', bool), ('b', bool), ('c', bool)]) + mbase.mask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + # Reinitialize and redo + mbase.mask = False + mbase.fieldmask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + + def test_set_elements(self): + base = self.base.copy() + # Set an element to mask ..................... + mbase = base.view(mrecarray).copy() + mbase[-2] = masked + assert_equal( + mbase._mask.tolist(), + np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], + dtype=bool)) + # Used to be mask, now it's recordmask! + assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) + # Set slices ................................. + mbase = base.view(mrecarray).copy() + mbase[:2] = (5, 5, 5) + assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'5', b'5', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + + mbase = base.view(mrecarray).copy() + mbase[:2] = masked + assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + + def test_setslices_hardmask(self): + # Tests setting slices w/ hardmask. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + try: + mbase[-2:] = (5, 5, 5) + assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'5', b'five']) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) + assert_equal(mbase.b._mask, mbase.a._mask) + assert_equal(mbase.b._mask, mbase.c._mask) + except NotImplementedError: + # OK, not implemented yet... + pass + except AssertionError: + raise + else: + raise Exception("Flexible hard masks should be supported !") + # Not using a tuple should crash + try: + mbase[-2:] = 3 + except (NotImplementedError, TypeError): + pass + else: + raise TypeError("Should have expected a readable buffer object!") + + def test_hardmask(self): + # Test hardmask + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + assert_(mbase._hardmask) + mbase.mask = nomask + assert_equal_records(mbase._mask, base._mask) + mbase.soften_mask() + assert_(not mbase._hardmask) + mbase.mask = nomask + # So, the mask of a field is no longer set to nomask... + assert_equal_records(mbase._mask, + ma.make_mask_none(base.shape, base.dtype)) + assert_(ma.make_mask(mbase['b']._mask) is nomask) + assert_equal(mbase['a']._mask, mbase['b']._mask) + + def test_pickling(self): + # Test pickling + base = self.base.copy() + mrec = base.view(mrecarray) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + _ = pickle.dumps(mrec, protocol=proto) + mrec_ = pickle.loads(_) + assert_equal(mrec_.dtype, mrec.dtype) + assert_equal_records(mrec_._data, mrec._data) + assert_equal(mrec_._mask, mrec._mask) + assert_equal_records(mrec_._mask, mrec._mask) + + def test_filled(self): + # Test filling the array + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + mrecfilled = mrec.filled() + assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) + assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), + dtype=float)) + assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), + dtype='|S8')) + + def test_tolist(self): + # Test tolist. + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + + assert_equal(mrec.tolist(), + [(1, 1.1, None), (2, 2.2, b'two'), + (None, None, b'three')]) + + def test_withnames(self): + # Test the creation w/ format and names + x = mrecarray(1, formats=float, names='base') + x[0]['base'] = 10 + assert_equal(x['base'][0], 10) + + def test_exotic_formats(self): + # Test that 'exotic' formats are processed properly + easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) + easy[0] = masked + assert_equal(easy.filled(1).item(), (1, b'1', 1.)) + + solo = mrecarray(1, dtype=[('f0', ' 1: + assert_(eq(np.concatenate((x, y), 1), + concatenate((xm, ym), 1))) + assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) + assert_(eq(np.sum(x, 1), sum(x, 1))) + assert_(eq(np.product(x, 1), product(x, 1))) + + def test_testCI(self): + # Test of conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_(eq(np.sort(x1), sort(x2, fill_value=0))) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_(eq(x1[2], x2[2])) + assert_(eq(x1[2:5], x2[2:5])) + assert_(eq(x1[:], x2[:])) + assert_(eq(x1[1:], x3[1:])) + x1[2] = 9 + x2[2] = 9 + assert_(eq(x1, x2)) + x1[1:3] = 99 + x2[1:3] = 99 + assert_(eq(x1, x2)) + x2[1] = masked + assert_(eq(x1, x2)) + x2[1:3] = masked + assert_(eq(x1, x2)) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_(eq(x1, x2)) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_(eq(3.0, x2.fill_value)) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_testCopySize(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_(y1._data is not x1) + assert_(allequal(x1, y1._data)) + assert_(y1._mask is m) + + y1a = array(y1, copy=0) + # For copy=False, one might expect that the array would just + # passed on, i.e., that it would be "is" instead of "==". + # See gh-4043 for discussion. + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3, copy=0) + assert_(y2._mask is m3) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask is m3) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._mask is not m) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + assert_(y2a._mask is not m) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_(eq(concatenate([x4, x4]), y4)) + assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) + y6 = repeat(x4, 2, axis=0) + assert_(eq(y5, y6)) + + def test_testPut(self): + # Test of put + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + m2 = m.copy() + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x._mask is m) + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_(eq(x, [0, 10, 2, -1, 40])) + + x = array(d, mask=m2, copy=True) + x.put([0, 1, 2], [-1, 100, 200]) + assert_(x._mask is not m2) + assert_(x[3] is masked) + assert_(x[4] is masked) + assert_(eq(x, [-1, 100, 200, 0, 0])) + + def test_testPut2(self): + # Test of put + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + x[2:4] = z + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + y = x[2:4] + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + y[:] = z + assert_(y[0] is masked) + assert_(y[1] is not masked) + assert_(eq(y, [10, 40])) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + def test_testMaPut(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] + i = np.nonzero(m)[0] + put(ym, i, zm) + assert_(all(take(ym, i, axis=0) == zm)) + + def test_testOddFeatures(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_(eq(z.real, x)) + assert_(eq(z.imag, 10 * x)) + assert_(eq((z * conjugate(z)).real, 101 * x * x)) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_(eq(x, z)) + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_(eq(x, z)) + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + c[0] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) + assert_(eq(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2))) + assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) + assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) + assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) + assert_(eq(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0])) + assert_(eq(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1])) + assert_(eq(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0])) + assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1])) + assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5])) + atest = ones((10, 10, 10), dtype=np.float32) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_(eq(atest, ctest)) + z = choose(c, (-x, x)) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + x = arange(6) + x[5] = masked + y = arange(6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_(eq(z, zm)) + assert_(getmask(zm) is nomask) + assert_(eq(zm, [0, 1, 2, 30, 40, 50])) + z = where(c, masked, 1) + assert_(eq(z, [99, 99, 99, 1, 1, 1])) + z = where(c, 1, masked) + assert_(eq(z, [99, 1, 1, 99, 99, 99])) + + def test_testMinMax2(self): + # Test of minimum, maximum. + assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) + assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) + x = arange(5) + y = arange(5) - 2 + x[3] = masked + y[0] = masked + assert_(eq(minimum(x, y), where(less(x, y), x, y))) + assert_(eq(maximum(x, y), where(greater(x, y), x, y))) + assert_(minimum.reduce(x) == 0) + assert_(maximum.reduce(x) == 4) + + def test_testTakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) + assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) + assert_(eq(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y))) + assert_(eq(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y))) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_testInplace(self): + # Test of inplace operations and rich comparisons + y = arange(10) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x += 1 + assert_(eq(x, y + 1)) + xm += 1 + assert_(eq(x, y + 1)) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x -= 1 + assert_(eq(x, y - 1)) + xm -= 1 + assert_(eq(xm, y - 1)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x *= 2.0 + assert_(eq(x, y * 2)) + xm *= 2.0 + assert_(eq(xm, y * 2)) + + x = arange(10) * 2 + xm = arange(10) + xm[2] = masked + x //= 2 + assert_(eq(x, y)) + xm //= 2 + assert_(eq(x, y)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x /= 2.0 + assert_(eq(x, y / 2.0)) + xm /= arange(10) + assert_(eq(xm, ones((10,)))) + + x = arange(10).astype(np.float32) + xm = arange(10) + xm[2] = masked + x += 1. + assert_(eq(x, y + 1.)) + + def test_testPickle(self): + # Test of pickling + x = arange(12) + x[4:10:2] = masked + x = x.reshape(4, 3) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + s = pickle.dumps(x, protocol=proto) + y = pickle.loads(s) + assert_(eq(x, y)) + + def test_testMasked(self): + # Test of masked element + xx = arange(6) + xx[1] = masked + assert_(str(masked) == '--') + assert_(xx[1] is masked) + assert_equal(filled(xx[1], 0), 0) + + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + assert_(eq(2.0, average(ott, axis=0))) + assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_(eq(2.0, result)) + assert_(wts == 4.0) + ott[:] = masked + assert_(average(ott, axis=0) is masked) + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_(eq(average(ott, axis=0), [2.0, 0.0])) + assert_(average(ott, axis=1)[0] is masked) + assert_(eq([2., 0.], average(ott, axis=0))) + result, wts = average(ott, axis=0, returned=True) + assert_(eq(wts, [1., 0.])) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6) + assert_(allclose(average(x, axis=0), 2.5)) + assert_(allclose(average(x, axis=0, weights=w1), 2.5)) + y = array([arange(6), 2.0 * arange(6)]) + assert_(allclose(average(y, None), + np.add.reduce(np.arange(6)) * 3. / 12.)) + assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + assert_(allclose(average(y, None, weights=w2), 20. / 6.)) + assert_(allclose(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.])) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) + assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) + assert_(average(masked_array(x, m4), axis=0) is masked) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_(allclose(average(z, None), 20. / 6.)) + assert_(allclose(average(z, axis=0), + [0., 1., 99., 99., 4.0, 7.5])) + assert_(allclose(average(z, axis=1), [2.5, 5.0])) + assert_(allclose(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0])) + + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_(shape(w2) == shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[0, 0], [1, 0]]) + a2da = average(a2d, axis=0) + assert_(eq(a2da, [0.5, 3.0])) + a2dma = average(a2dm, axis=0) + assert_(eq(a2dma, [1.0, 3.0])) + a2dma = average(a2dm, axis=None) + assert_(eq(a2dma, 7. / 3.)) + a2dma = average(a2dm, axis=1) + assert_(eq(a2dma, [1.5, 4.0])) + + def test_testToPython(self): + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + assert_raises(ValueError, bool, array([0, 1])) + assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) + + def test_testScalarArithmetic(self): + xm = array(0, mask=1) + #TODO FIXME: Find out what the following raises a warning in r8247 + with np.errstate(divide='ignore'): + assert_((1 / array(0)).mask) + assert_((1 + xm).mask) + assert_((-xm).mask) + assert_((-xm).mask) + assert_(maximum(xm, xm).mask) + assert_(minimum(xm, xm).mask) + assert_(xm.filled().dtype is xm._data.dtype) + x = array(0, mask=0) + assert_(x.filled() == x._data) + assert_equal(str(xm), str(masked_print_option)) + + def test_testArrayMethods(self): + a = array([1, 3, 2]) + assert_(eq(a.any(), a._data.any())) + assert_(eq(a.all(), a._data.all())) + assert_(eq(a.argmax(), a._data.argmax())) + assert_(eq(a.argmin(), a._data.argmin())) + assert_(eq(a.choose(0, 1, 2, 3, 4), + a._data.choose(0, 1, 2, 3, 4))) + assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) + assert_(eq(a.conj(), a._data.conj())) + assert_(eq(a.conjugate(), a._data.conjugate())) + m = array([[1, 2], [3, 4]]) + assert_(eq(m.diagonal(), m._data.diagonal())) + assert_(eq(a.sum(), a._data.sum())) + assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) + assert_(eq(m.transpose(), m._data.transpose())) + + def test_testArrayAttributes(self): + a = array([1, 3, 2]) + assert_equal(a.ndim, 1) + + def test_testAPI(self): + assert_(not [m for m in dir(np.ndarray) + if m not in dir(MaskedArray) and + not m.startswith('_')]) + + def test_testSingleElementSubscript(self): + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + +class TestUfuncs: + def setup(self): + self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) + + def test_testUfuncRegression(self): + f_invalid_ignore = [ + 'sqrt', 'arctanh', 'arcsin', 'arccos', + 'arccosh', 'arctanh', 'log', 'log10', 'divide', + 'true_divide', 'floor_divide', 'remainder', 'fmod'] + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', + 'sin', 'cos', 'tan', + 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', + 'arcsinh', + 'arccosh', + 'arctanh', + 'absolute', 'fabs', 'negative', + 'floor', 'ceil', + 'logical_not', + 'add', 'subtract', 'multiply', + 'divide', 'true_divide', 'floor_divide', + 'remainder', 'fmod', 'hypot', 'arctan2', + 'equal', 'not_equal', 'less_equal', 'greater_equal', + 'less', 'greater', + 'logical_and', 'logical_or', 'logical_xor']: + try: + uf = getattr(umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(np.ma, f) + args = self.d[:uf.nin] + with np.errstate(): + if f in f_invalid_ignore: + np.seterr(invalid='ignore') + if f in ['arctanh', 'log', 'log10']: + np.seterr(divide='ignore') + ur = uf(*args) + mr = mf(*args) + assert_(eq(ur.filled(0), mr.filled(0), f)) + assert_(eqmask(ur.mask, mr.mask)) + + def test_reduce(self): + a = self.d[0] + assert_(not alltrue(a, axis=0)) + assert_(sometrue(a, axis=0)) + assert_equal(sum(a[:3], axis=0), 0) + assert_equal(product(a, axis=0), 0) + + def test_minmax(self): + a = arange(1, 13).reshape(3, 4) + amask = masked_where(a < 5, a) + assert_equal(amask.max(), a.max()) + assert_equal(amask.min(), 5) + assert_((amask.max(0) == a.max(0)).all()) + assert_((amask.min(0) == [5, 6, 7, 8]).all()) + assert_(amask.max(1)[0].mask) + assert_(amask.min(1)[0].mask) + + def test_nonzero(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) + assert_(eq(nonzero(x), [0])) + + +class TestArrayMethods: + + def setup(self): + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + self.d = (x, X, XX, m, mx, mX, mXX) + + def test_trace(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_(eq(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0))) + + def test_clip(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + clipped = mx.clip(2, 8) + assert_(eq(clipped.mask, mx.mask)) + assert_(eq(clipped._data, x.clip(2, 8))) + assert_(eq(clipped._data, mx._data.clip(2, 8))) + + def test_ptp(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), mx.compressed().ptp()) + rows = np.zeros(n, np.float_) + cols = np.zeros(m, np.float_) + for k in range(m): + cols[k] = mX[:, k].compressed().ptp() + for k in range(n): + rows[k] = mX[k].compressed().ptp() + assert_(eq(mX.ptp(0), cols)) + assert_(eq(mX.ptp(1), rows)) + + def test_swapaxes(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXswapped = mX.swapaxes(0, 1) + assert_(eq(mXswapped[-1], mX[:, -1])) + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_cumprod(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumprod(0) + assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) + mXcp = mX.cumprod(1) + assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) + + def test_cumsum(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumsum(0) + assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) + mXcp = mX.cumsum(1) + assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) + + def test_varstd(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + assert_(eq(mX.var(axis=None), mX.compressed().var())) + assert_(eq(mX.std(axis=None), mX.compressed().std())) + assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) + assert_(eq(mX.var().shape, X.var().shape)) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + for k in range(6): + assert_(eq(mXvar1[k], mX[k].compressed().var())) + assert_(eq(mXvar0[k], mX[:, k].compressed().var())) + assert_(eq(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std())) + + +def eqmask(m1, m2): + if m1 is nomask: + return m2 is nomask + if m2 is nomask: + return m1 is nomask + return (m1 == m2).all() diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/test_regression.py b/MLPY/Lib/site-packages/numpy/ma/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..f10b82107e72ecf887a0bab2489017ac997e3c4b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/tests/test_regression.py @@ -0,0 +1,91 @@ +import numpy as np +from numpy.testing import ( + assert_, assert_array_equal, assert_allclose, suppress_warnings + ) + + +class TestRegression: + def test_masked_array_create(self): + # Ticket #17 + x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], + mask=[0, 0, 0, 1, 1, 1, 0, 0]) + assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) + + def test_masked_array(self): + # Ticket #61 + np.ma.array(1, mask=[1]) + + def test_mem_masked_where(self): + # Ticket #62 + from numpy.ma import masked_where, MaskType + a = np.zeros((1, 1)) + b = np.zeros(a.shape, MaskType) + c = masked_where(b, a) + a-c + + def test_masked_array_multiply(self): + # Ticket #254 + a = np.ma.zeros((4, 1)) + a[2, 0] = np.ma.masked + b = np.zeros((4, 2)) + a*b + b*a + + def test_masked_array_repeat(self): + # Ticket #271 + np.ma.array([1], mask=False).repeat(10) + + def test_masked_array_repr_unicode(self): + # Ticket #1256 + repr(np.ma.array(u"Unicode")) + + def test_atleast_2d(self): + # Ticket #1559 + a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) + b = np.atleast_2d(a) + assert_(a.mask.ndim == 1) + assert_(b.mask.ndim == 2) + + def test_set_fill_value_unicode_py3(self): + # Ticket #2733 + a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) + a.fill_value = 'X' + assert_(a.fill_value == 'X') + + def test_var_sets_maskedarray_scalar(self): + # Issue gh-2757 + a = np.ma.array(np.arange(5), mask=True) + mout = np.ma.array(-1, dtype=float) + a.var(out=mout) + assert_(mout._data == 0) + + def test_ddof_corrcoef(self): + # See gh-3336 + x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) + y = np.array([2, 2.5, 3.1, 3, 5]) + # this test can be removed after deprecation. + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + r0 = np.ma.corrcoef(x, y, ddof=0) + r1 = np.ma.corrcoef(x, y, ddof=1) + # ddof should not have an effect (it gets cancelled out) + assert_allclose(r0.data, r1.data) + + def test_mask_not_backmangled(self): + # See gh-10314. Test case taken from gh-3140. + a = np.ma.MaskedArray([1., 2.], mask=[False, False]) + assert_(a.mask.shape == (2,)) + b = np.tile(a, (2, 1)) + # Check that the above no longer changes a.shape to (1, 2) + assert_(a.mask.shape == (2,)) + assert_(b.shape == (2, 2)) + assert_(b.mask.shape == (2, 2)) + + def test_empty_list_on_structured(self): + # See gh-12464. Indexing with empty list should give empty result. + ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4') + assert_array_equal(ma[[]], ma[:0]) + + def test_masked_array_tobytes_fortran(self): + ma = np.ma.arange(4).reshape((2,2)) + assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes()) diff --git a/MLPY/Lib/site-packages/numpy/ma/tests/test_subclassing.py b/MLPY/Lib/site-packages/numpy/ma/tests/test_subclassing.py new file mode 100644 index 0000000000000000000000000000000000000000..37cfda4f5741baab2e07771b466ace4fe3f3a928 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/tests/test_subclassing.py @@ -0,0 +1,345 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +import numpy as np +from numpy.testing import assert_, assert_raises +from numpy.ma.testutils import assert_equal +from numpy.ma.core import ( + array, arange, masked, MaskedArray, masked_array, log, add, hypot, + divide, asarray, asanyarray, nomask + ) +# from numpy.ma.core import ( + +def assert_startswith(a, b): + # produces a better error message than assert_(a.startswith(b)) + assert_equal(a[:len(b)], b) + +class SubArray(np.ndarray): + # Defines a generic np.ndarray subclass, that stores some metadata + # in the dictionary `info`. + def __new__(cls,arr,info={}): + x = np.asanyarray(arr).view(cls) + x.info = info.copy() + return x + + def __array_finalize__(self, obj): + if callable(getattr(super(), '__array_finalize__', None)): + super().__array_finalize__(obj) + self.info = getattr(obj, 'info', {}).copy() + return + + def __add__(self, other): + result = super().__add__(other) + result.info['added'] = result.info.get('added', 0) + 1 + return result + + def __iadd__(self, other): + result = super().__iadd__(other) + result.info['iadded'] = result.info.get('iadded', 0) + 1 + return result + + +subarray = SubArray + + +class SubMaskedArray(MaskedArray): + """Pure subclass of MaskedArray, keeping some info on subclass.""" + def __new__(cls, info=None, **kwargs): + obj = super().__new__(cls, **kwargs) + obj._optinfo['info'] = info + return obj + + +class MSubArray(SubArray, MaskedArray): + + def __new__(cls, data, info={}, mask=nomask): + subarr = SubArray(data, info) + _data = MaskedArray.__new__(cls, data=subarr, mask=mask) + _data.info = subarr.info + return _data + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + +msubarray = MSubArray + + +# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing +# setting to non-class values (and thus np.ma.core.masked_print_option) +# and overrides __array_wrap__, updating the info dict, to check that this +# doesn't get destroyed by MaskedArray._update_from. But this one also needs +# its own iterator... +class CSAIterator: + """ + Flat iterator object that uses its own setter/getter + (works around ndarray.flat not propagating subclass setters/getters + see https://github.com/numpy/numpy/issues/4564) + roughly following MaskedIterator + """ + def __init__(self, a): + self._original = a + self._dataiter = a.view(np.ndarray).flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + out = self._dataiter.__getitem__(indx) + if not isinstance(out, np.ndarray): + out = out.__array__() + out = out.view(type(self._original)) + return out + + def __setitem__(self, index, value): + self._dataiter[index] = self._original._validate_input(value) + + def __next__(self): + return next(self._dataiter).__array__().view(type(self._original)) + + +class ComplicatedSubArray(SubArray): + + def __str__(self): + return f'myprefix {self.view(SubArray)} mypostfix' + + def __repr__(self): + # Return a repr that does not start with 'name(' + return f'<{self.__class__.__name__} {self}>' + + def _validate_input(self, value): + if not isinstance(value, ComplicatedSubArray): + raise ValueError("Can only set to MySubArray values") + return value + + def __setitem__(self, item, value): + # validation ensures direct assignment with ndarray or + # masked_print_option will fail + super().__setitem__(item, self._validate_input(value)) + + def __getitem__(self, item): + # ensure getter returns our own class also for scalars + value = super().__getitem__(item) + if not isinstance(value, np.ndarray): # scalar + value = value.__array__().view(ComplicatedSubArray) + return value + + @property + def flat(self): + return CSAIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + def __array_wrap__(self, obj, context=None): + obj = super().__array_wrap__(obj, context) + if context is not None and context[0] is np.multiply: + obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 + + return obj + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup(self): + x = np.arange(5, dtype='float') + mx = msubarray(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_data_subclassing(self): + # Tests whether the subclass is kept. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xsub = SubArray(x) + xmsub = masked_array(xsub, mask=m) + assert_(isinstance(xmsub, MaskedArray)) + assert_equal(xmsub._data, xsub) + assert_(isinstance(xmsub._data, SubArray)) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, subarray)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), msubarray)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a msubarray + assert_(isinstance(add(mx, mx), msubarray)) + assert_(isinstance(add(mx, x), msubarray)) + # Result should work + assert_equal(add(mx, x), mx+x) + assert_(isinstance(add(mx, mx)._data, subarray)) + assert_(isinstance(add.outer(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, x), msubarray)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), msubarray)) + assert_(isinstance(divide(mx, x), msubarray)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + + def test_attributepropagation(self): + x = array(arange(5), mask=[0]+[1]*4) + my = masked_array(subarray(x)) + ym = msubarray(x) + # + z = (my+1) + assert_(isinstance(z, MaskedArray)) + assert_(not isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_equal(z._data.info, {}) + # + z = (ym+1) + assert_(isinstance(z, MaskedArray)) + assert_(isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_(z._data.info['added'] > 0) + # Test that inplace methods from data get used (gh-4617) + ym += 1 + assert_(isinstance(ym, MaskedArray)) + assert_(isinstance(ym, MSubArray)) + assert_(isinstance(ym._data, SubArray)) + assert_(ym._data.info['iadded'] > 0) + # + ym._set_mask([1, 0, 0, 0, 1]) + assert_equal(ym._mask, [1, 0, 0, 0, 1]) + ym._series._set_mask([0, 0, 0, 0, 1]) + assert_equal(ym._mask, [0, 0, 0, 0, 1]) + # + xsub = subarray(x, info={'name':'x'}) + mxsub = masked_array(xsub) + assert_(hasattr(mxsub, 'info')) + assert_equal(mxsub.info, xsub.info) + + def test_subclasspreservation(self): + # Checks that masked_array(...,subok=True) preserves the class. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xinfo = [(i, j) for (i, j) in zip(x, m)] + xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) + # + mxsub = masked_array(xsub, subok=False) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = asarray(xsub) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = masked_array(xsub, subok=True) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, xsub._mask) + # + mxsub = asanyarray(xsub) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, m) + + def test_subclass_items(self): + """test that getter and setter go via baseclass""" + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + # getter should return a ComplicatedSubArray, even for single item + # first check we wrote ComplicatedSubArray correctly + assert_(isinstance(xcsub[1], ComplicatedSubArray)) + assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) + + # now that it propagates inside the MaskedArray + assert_(isinstance(mxcsub[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) + assert_(mxcsub[0] is masked) + assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) + + # also for flattened version (which goes via MaskedIterator) + assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) + assert_(mxcsub.flat[0] is masked) + assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) + + # setter should only work with ComplicatedSubArray input + # first check we wrote ComplicatedSubArray correctly + assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) + # now that it propagates inside the MaskedArray + assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) + mxcsub[1] = xcsub[4] + mxcsub[1:4] = xcsub[1:4] + # also for flattened version (which goes via MaskedIterator) + assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) + mxcsub.flat[1] = xcsub[4] + mxcsub.flat[1:4] = xcsub[1:4] + + def test_subclass_nomask_items(self): + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub_nomask = masked_array(xcsub) + + assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) + + assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) + + def test_subclass_repr(self): + """test that repr uses the name of the subclass + and 'array' for np.ndarray""" + x = np.arange(5) + mx = masked_array(x, mask=[True, False, True, False, False]) + assert_startswith(repr(mx), 'masked_array') + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_startswith(repr(mxsub), + f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]') + + def test_subclass_str(self): + """test str with subclass that has overridden str, setitem""" + # first without override + x = np.arange(5) + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_equal(str(mxsub), '[-- 1 -- 3 4]') + + xcsub = ComplicatedSubArray(x) + assert_raises(ValueError, xcsub.__setitem__, 0, + np.ma.core.masked_print_option) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') + + def test_pure_subclass_info_preservation(self): + # Test that ufuncs and methods conserve extra information consistently; + # see gh-7122. + arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) + arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) + diff1 = np.subtract(arr1, arr2) + assert_('info' in diff1._optinfo) + assert_(diff1._optinfo['info'] == 'test') + diff2 = arr1 - arr2 + assert_('info' in diff2._optinfo) + assert_(diff2._optinfo['info'] == 'test') diff --git a/MLPY/Lib/site-packages/numpy/ma/testutils.py b/MLPY/Lib/site-packages/numpy/ma/testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..c5ef27f47f63b1c59fdc697145bd47e38ab3142d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/testutils.py @@ -0,0 +1,288 @@ +"""Miscellaneous functions for testing masked arrays and subclasses + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ + +""" +import operator + +import numpy as np +from numpy import ndarray, float_ +import numpy.core.umath as umath +import numpy.testing +from numpy.testing import ( + assert_, assert_allclose, assert_array_almost_equal_nulp, + assert_raises, build_err_msg + ) +from .core import mask_or, getmask, masked_array, nomask, masked, filled + +__all__masked = [ + 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', + 'assert_array_approx_equal', 'assert_array_compare', + 'assert_array_equal', 'assert_array_less', 'assert_close', + 'assert_equal', 'assert_equal_records', 'assert_mask_equal', + 'assert_not_equal', 'fail_if_array_equal', + ] + +# Include some normal test functions to avoid breaking other projects who +# have mistakenly included them from this file. SciPy is one. That is +# unfortunate, as some of these functions are not intended to work with +# masked arrays. But there was no way to tell before. +from unittest import TestCase +__some__from_testing = [ + 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', + 'assert_raises' + ] + +__all__ = __all__masked + __some__from_testing + + +def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): + """ + Returns true if all components of a and b are equal to given tolerances. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. The relative error rtol should + be positive and << 1.0 The absolute error atol comes into play for + those elements of b that are very small or zero; it says how small a + must be also. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) + d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) + return d.ravel() + + +def almost(a, b, decimal=6, fill_value=True): + """ + Returns True if a and b are equal up to decimal places. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) + d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) + return d.ravel() + + +def _assert_equal_on_sequences(actual, desired, err_msg=''): + """ + Asserts the equality of two non-array sequences. + + """ + assert_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + return + + +def assert_equal_records(a, b): + """ + Asserts that two records are equal. + + Pretty crude for now. + + """ + assert_equal(a.dtype, b.dtype) + for f in a.dtype.names: + (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) + if not (af is masked) and not (bf is masked): + assert_equal(operator.getitem(a, f), operator.getitem(b, f)) + return + + +def assert_equal(actual, desired, err_msg=''): + """ + Asserts that two items are equal. + + """ + # Case #1: dictionary ..... + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(f"{k} not in {actual}") + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + # Case #2: lists ..... + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + return _assert_equal_on_sequences(actual, desired, err_msg='') + if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): + msg = build_err_msg([actual, desired], err_msg,) + if not desired == actual: + raise AssertionError(msg) + return + # Case #4. arrays or equivalent + if ((actual is masked) and not (desired is masked)) or \ + ((desired is masked) and not (actual is masked)): + msg = build_err_msg([actual, desired], + err_msg, header='', names=('x', 'y')) + raise ValueError(msg) + actual = np.asanyarray(actual) + desired = np.asanyarray(desired) + (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) + if actual_dtype.char == "S" and desired_dtype.char == "S": + return _assert_equal_on_sequences(actual.tolist(), + desired.tolist(), + err_msg='') + return assert_array_equal(actual, desired, err_msg) + + +def fail_if_equal(actual, desired, err_msg='',): + """ + Raises an assertion error if two items are equal. + + """ + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + fail_if_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + fail_if_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + return + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return fail_if_array_equal(actual, desired, err_msg) + msg = build_err_msg([actual, desired], err_msg) + if not desired != actual: + raise AssertionError(msg) + + +assert_not_equal = fail_if_equal + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Asserts that two items are almost equal. + + The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal). + + """ + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return assert_array_almost_equal(actual, desired, decimal=decimal, + err_msg=err_msg, verbose=verbose) + msg = build_err_msg([actual, desired], + err_msg=err_msg, verbose=verbose) + if not round(abs(desired - actual), decimal) == 0: + raise AssertionError(msg) + + +assert_close = assert_almost_equal + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + fill_value=True): + """ + Asserts that comparison between two masked arrays is satisfied. + + The comparison is elementwise. + + """ + # Allocate a common mask and refill + m = mask_or(getmask(x), getmask(y)) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) + if ((x is masked) and not (y is masked)) or \ + ((y is masked) and not (x is masked)): + msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, + header=header, names=('x', 'y')) + raise ValueError(msg) + # OK, now run the basic tests on filled versions + return np.testing.assert_array_compare(comparison, + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Checks the elementwise equality of two masked arrays. + + """ + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def fail_if_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an assertion error if two masked arrays are not equal elementwise. + + """ + def compare(x, y): + return (not np.alltrue(approx(x, y))) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return approx(x, y, rtol=10. ** -decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return almost(x, y, decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Checks that x is smaller than y elementwise. + + """ + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not less-ordered') + + +def assert_mask_equal(m1, m2, err_msg=''): + """ + Asserts the equality of two masks. + + """ + if m1 is nomask: + assert_(m2 is nomask) + if m2 is nomask: + assert_(m1 is nomask) + assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/MLPY/Lib/site-packages/numpy/ma/timer_comparison.py b/MLPY/Lib/site-packages/numpy/ma/timer_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..8664e864ee2c06ec6dfc42471772b4f4b98418b0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/ma/timer_comparison.py @@ -0,0 +1,443 @@ +import timeit +from functools import reduce + +import numpy as np +from numpy import float_ +import numpy.core.fromnumeric as fromnumeric + +from numpy.testing import build_err_msg + + +pi = np.pi + +class ModuleTester: + def __init__(self, module): + self.module = module + self.allequal = module.allequal + self.arange = module.arange + self.array = module.array + self.concatenate = module.concatenate + self.count = module.count + self.equal = module.equal + self.filled = module.filled + self.getmask = module.getmask + self.getmaskarray = module.getmaskarray + self.id = id + self.inner = module.inner + self.make_mask = module.make_mask + self.masked = module.masked + self.masked_array = module.masked_array + self.masked_values = module.masked_values + self.mask_or = module.mask_or + self.nomask = module.nomask + self.ones = module.ones + self.outer = module.outer + self.repeat = module.repeat + self.resize = module.resize + self.sort = module.sort + self.take = module.take + self.transpose = module.transpose + self.zeros = module.zeros + self.MaskType = module.MaskType + try: + self.umath = module.umath + except AttributeError: + self.umath = module.core.umath + self.testnames = [] + + def assert_array_compare(self, comparison, x, y, err_msg='', header='', + fill_value=True): + """ + Assert that a comparison of two masked arrays is satisfied elementwise. + + """ + xf = self.filled(x) + yf = self.filled(y) + m = self.mask_or(self.getmask(x), self.getmask(y)) + + x = self.filled(self.masked_array(xf, mask=m), fill_value) + y = self.filled(self.masked_array(yf, mask=m), fill_value) + if (x.dtype.char != "O"): + x = x.astype(float_) + if isinstance(x, np.ndarray) and x.size > 1: + x[np.isnan(x)] = 0 + elif np.isnan(x): + x = 0 + if (y.dtype.char != "O"): + y = y.astype(float_) + if isinstance(y, np.ndarray) and y.size > 1: + y[np.isnan(y)] = 0 + elif np.isnan(y): + y = 0 + try: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + f'\n(shapes {x.shape}, {y.shape} mismatch)', + header=header, + names=('x', 'y')) + assert cond, msg + val = comparison(x, y) + if m is not self.nomask and fill_value: + val = self.masked_array(val, mask=m) + if isinstance(val, bool): + cond = val + reduced = [0] + else: + reduced = val.ravel() + cond = reduced.all() + reduced = reduced.tolist() + if not cond: + match = 100-100.0*reduced.count(1)/len(reduced) + msg = build_err_msg([x, y], + err_msg + + '\n(mismatch %s%%)' % (match,), + header=header, + names=('x', 'y')) + assert cond, msg + except ValueError as e: + msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) + raise ValueError(msg) from e + + def assert_array_equal(self, x, y, err_msg=''): + """ + Checks the elementwise equality of two masked arrays. + + """ + self.assert_array_compare(self.equal, x, y, err_msg=err_msg, + header='Arrays are not equal') + + @np.errstate(all='ignore') + def test_0(self): + """ + Tests creation + + """ + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + xm = self.masked_array(x, mask=m) + xm[0] + + @np.errstate(all='ignore') + def test_1(self): + """ + Tests creation + + """ + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = self.masked_array(x, mask=m1) + ym = self.masked_array(y, mask=m2) + xf = np.where(m1, 1.e+20, x) + xm.set_fill_value(1.e+20) + + assert((xm-ym).filled(0).any()) + s = x.shape + assert(xm.size == reduce(lambda x, y:x*y, s)) + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + @np.errstate(all='ignore') + def test_2(self): + """ + Tests conversions and indexing. + + """ + x1 = np.array([1, 2, 4, 3]) + x2 = self.array(x1, mask=[1, 0, 0, 0]) + x3 = self.array(x1, mask=[0, 1, 0, 1]) + x4 = self.array(x1) + # test conversion to strings, no errors + str(x2) + repr(x2) + # tests of indexing + assert type(x2[1]) is type(x1[1]) + assert x1[1] == x2[1] + x1[2] = 9 + x2[2] = 9 + self.assert_array_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + x2[1] = self.masked + x2[1:3] = self.masked + x2[:] = x1 + x2[1] = self.masked + x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + x1 = np.arange(5)*1.0 + x2 = self.masked_values(x1, 3.0) + x1 = self.array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + # check that no error occurs. + x1[1] + x2[1] + assert x1[1:1].shape == (0,) + # Tests copy-size + n = [0, 0, 1, 0, 0] + m = self.make_mask(n) + m2 = self.make_mask(m) + assert(m is m2) + m3 = self.make_mask(m, copy=1) + assert(m is not m3) + + @np.errstate(all='ignore') + def test_3(self): + """ + Tests resize/repeat + + """ + x4 = self.arange(4) + x4[2] = self.masked + y4 = self.resize(x4, (8,)) + assert self.allequal(self.concatenate([x4, x4]), y4) + assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) + self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = self.repeat(x4, 2, axis=0) + assert self.allequal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert self.allequal(y5, y7) + y8 = x4.repeat(2, 0) + assert self.allequal(y5, y8) + + @np.errstate(all='ignore') + def test_4(self): + """ + Test of take, transpose, inner, outer products. + + """ + x = self.arange(24) + y = np.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) + assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) + assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), + self.inner(x, y)) + assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), + self.outer(x, y)) + y = self.array(['abc', 1, 'def', 2, 3], object) + y[2] = self.masked + t = self.take(y, [0, 3, 4]) + assert t[0] == 'abc' + assert t[1] == 2 + assert t[2] == 3 + + @np.errstate(all='ignore') + def test_5(self): + """ + Tests inplace w/ scalar + + """ + x = self.arange(10) + y = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x += 1 + assert self.allequal(x, y+1) + xm += 1 + assert self.allequal(xm, y+1) + + x = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x -= 1 + assert self.allequal(x, y-1) + xm -= 1 + assert self.allequal(xm, y-1) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x *= 2.0 + assert self.allequal(x, y*2) + xm *= 2.0 + assert self.allequal(xm, y*2) + + x = self.arange(10)*2 + xm = self.arange(10)*2 + xm[2] = self.masked + x /= 2 + assert self.allequal(x, y) + xm /= 2 + assert self.allequal(xm, y) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x /= 2.0 + assert self.allequal(x, y/2.0) + xm /= self.arange(10) + self.assert_array_equal(xm, self.ones((10,))) + + x = self.arange(10).astype(float_) + xm = self.arange(10) + xm[2] = self.masked + x += 1. + assert self.allequal(x, y + 1.) + + @np.errstate(all='ignore') + def test_6(self): + """ + Tests inplace w/ array + + """ + x = self.arange(10, dtype=float_) + y = self.arange(10) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x += a + xm += a + assert self.allequal(x, y+a) + assert self.allequal(xm, y+a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x -= a + xm -= a + assert self.allequal(x, y-a) + assert self.allequal(xm, y-a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x *= a + xm *= a + assert self.allequal(x, y*a) + assert self.allequal(xm, y*a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x /= a + xm /= a + + @np.errstate(all='ignore') + def test_7(self): + "Tests ufunc" + d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), + self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', +# 'sin', 'cos', 'tan', +# 'arcsin', 'arccos', 'arctan', +# 'sinh', 'cosh', 'tanh', +# 'arcsinh', +# 'arccosh', +# 'arctanh', +# 'absolute', 'fabs', 'negative', +# # 'nonzero', 'around', +# 'floor', 'ceil', +# # 'sometrue', 'alltrue', +# 'logical_not', +# 'add', 'subtract', 'multiply', +# 'divide', 'true_divide', 'floor_divide', +# 'remainder', 'fmod', 'hypot', 'arctan2', +# 'equal', 'not_equal', 'less_equal', 'greater_equal', +# 'less', 'greater', +# 'logical_and', 'logical_or', 'logical_xor', + ]: + try: + uf = getattr(self.umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(self.module, f) + args = d[:uf.nin] + ur = uf(*args) + mr = mf(*args) + self.assert_array_equal(ur.filled(0), mr.filled(0), f) + self.assert_array_equal(ur._mask, mr._mask) + + @np.errstate(all='ignore') + def test_99(self): + # test average + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + self.assert_array_equal(2.0, self.average(ott, axis=0)) + self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) + result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) + self.assert_array_equal(2.0, result) + assert(wts == 4.0) + ott[:] = self.masked + assert(self.average(ott, axis=0) is self.masked) + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = self.masked + self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) + assert(self.average(ott, axis=1)[0] is self.masked) + self.assert_array_equal([2., 0.], self.average(ott, axis=0)) + result, wts = self.average(ott, axis=0, returned=1) + self.assert_array_equal(wts, [1., 0.]) + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = self.arange(6) + self.assert_array_equal(self.average(x, axis=0), 2.5) + self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) + y = self.array([self.arange(6), 2.0*self.arange(6)]) + self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) + self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) + self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + m1 = self.zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = self.ones(6) + m5 = [0, 1, 1, 1, 1, 1] + self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) + self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) + z = self.masked_array(y, m3) + self.assert_array_equal(self.average(z, None), 20./6.) + self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) + self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) + + @np.errstate(all='ignore') + def test_A(self): + x = self.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + + +if __name__ == '__main__': + setup_base = ("from __main__ import ModuleTester \n" + "import numpy\n" + "tester = ModuleTester(module)\n") + setup_cur = "import numpy.ma.core as module\n" + setup_base + (nrepeat, nloop) = (10, 10) + + for i in range(1, 8): + func = 'tester.test_%i()' % i + cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) + cur = np.sort(cur) + print("#%i" % i + 50*'.') + print(eval("ModuleTester.test_%i.__doc__" % i)) + print(f'core_current : {cur[0]:.3f} - {cur[1]:.3f}') diff --git a/MLPY/Lib/site-packages/numpy/matlib.py b/MLPY/Lib/site-packages/numpy/matlib.py new file mode 100644 index 0000000000000000000000000000000000000000..93f6bc81ebf925d204b8b090f1f69f198d956c36 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matlib.py @@ -0,0 +1,376 @@ +import warnings + +# 2018-05-29, PendingDeprecationWarning added to matrix.__new__ +# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning +warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. " + "The matrix subclass is not the recommended way to represent " + "matrices or deal with linear algebra (see " + "https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). " + "Please adjust your code to use regular ndarray. ", + PendingDeprecationWarning, stacklevel=2) + +import numpy as np +from numpy.matrixlib.defmatrix import matrix, asmatrix +# Matlib.py contains all functions in the numpy namespace with a few +# replacements. See doc/source/reference/routines.matlib.rst for details. +# Need * as we're copying the numpy namespace. +from numpy import * # noqa: F403 + +__version__ = np.__version__ + +__all__ = np.__all__[:] # copy numpy namespace +__all__ += ['rand', 'randn', 'repmat'] + +def empty(shape, dtype=None, order='C'): + """Return a new matrix of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty matrix. + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + See Also + -------- + empty_like, zeros + + Notes + ----- + `empty`, unlike `zeros`, does not set the matrix values to zero, + and may therefore be marginally faster. On the other hand, it requires + the user to manually set all the values in the array, and should be + used with caution. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) + + """ + return ndarray.__new__(matrix, shape, dtype, order=order) + +def ones(shape, dtype=None, order='C'): + """ + Matrix of ones. + + Return a matrix of given shape and type, filled with ones. + + Parameters + ---------- + shape : {sequence of ints, int} + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is np.float64. + order : {'C', 'F'}, optional + Whether to store matrix in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Matrix of ones of given shape, dtype, and order. + + See Also + -------- + ones : Array of ones. + matlib.zeros : Zero matrix. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> np.matlib.ones((2,3)) + matrix([[1., 1., 1.], + [1., 1., 1.]]) + + >>> np.matlib.ones(2) + matrix([[1., 1.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(1) + return a + +def zeros(shape, dtype=None, order='C'): + """ + Return a matrix of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is float. + order : {'C', 'F'}, optional + Whether to store the result in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Zero matrix of given shape, dtype, and order. + + See Also + -------- + numpy.zeros : Equivalent array function. + matlib.ones : Return a matrix of ones. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.zeros((2, 3)) + matrix([[0., 0., 0.], + [0., 0., 0.]]) + + >>> np.matlib.zeros(2) + matrix([[0., 0.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(0) + return a + +def identity(n,dtype=None): + """ + Returns the square identity matrix of given size. + + Parameters + ---------- + n : int + Size of the returned identity matrix. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : matrix + `n` x `n` matrix with its main diagonal set to one, + and all other elements zero. + + See Also + -------- + numpy.identity : Equivalent array function. + matlib.eye : More general matrix identity function. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.identity(3, dtype=int) + matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + + """ + a = array([1]+n*[0], dtype=dtype) + b = empty((n, n), dtype=dtype) + b.flat = a + return b + +def eye(n,M=None, k=0, dtype=float, order='C'): + """ + Return a matrix with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + n : int + Number of rows in the output. + M : int, optional + Number of columns in the output, defaults to `n`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, + a positive value refers to an upper diagonal, + and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned matrix. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + .. versionadded:: 1.14.0 + + Returns + ------- + I : matrix + A `n` x `M` matrix where all elements are equal to zero, + except for the `k`-th diagonal, whose values are equal to one. + + See Also + -------- + numpy.eye : Equivalent array function. + identity : Square identity matrix. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.eye(3, k=1, dtype=float) + matrix([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) + +def rand(*args): + """ + Return a matrix of random values with given shape. + + Create a matrix of the given shape and propagate it with + random samples from a uniform distribution over ``[0, 1)``. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. + If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + out : ndarray + The matrix of random values with shape given by `\\*args`. + + See Also + -------- + randn, numpy.random.RandomState.rand + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.rand(2, 3) + matrix([[0.69646919, 0.28613933, 0.22685145], + [0.55131477, 0.71946897, 0.42310646]]) + >>> np.matlib.rand((2, 3)) + matrix([[0.9807642 , 0.68482974, 0.4809319 ], + [0.39211752, 0.34317802, 0.72904971]]) + + If the first argument is a tuple, other arguments are ignored: + + >>> np.matlib.rand((2, 3), 4) + matrix([[0.43857224, 0.0596779 , 0.39804426], + [0.73799541, 0.18249173, 0.17545176]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.rand(*args)) + +def randn(*args): + """ + Return a random matrix with data from the "standard normal" distribution. + + `randn` generates a matrix filled with random floats sampled from a + univariate "normal" (Gaussian) distribution of mean 0 and variance 1. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + Z : matrix of floats + A matrix of floating-point samples drawn from the standard normal + distribution. + + See Also + -------- + rand, numpy.random.RandomState.randn + + Notes + ----- + For random samples from :math:`N(\\mu, \\sigma^2)`, use: + + ``sigma * np.matlib.randn(...) + mu`` + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.randn(1) + matrix([[-1.0856306]]) + >>> np.matlib.randn(1, 2, 3) + matrix([[ 0.99734545, 0.2829785 , -1.50629471], + [-0.57860025, 1.65143654, -2.42667924]]) + + Two-by-four matrix of samples from :math:`N(3, 6.25)`: + + >>> 2.5 * np.matlib.randn((2, 4)) + 3 + matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], + [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.randn(*args)) + +def repmat(a, m, n): + """ + Repeat a 0-D to 2-D array or matrix MxN times. + + Parameters + ---------- + a : array_like + The array or matrix to be repeated. + m, n : int + The number of times `a` is repeated along the first and second axes. + + Returns + ------- + out : ndarray + The result of repeating `a`. + + Examples + -------- + >>> import numpy.matlib + >>> a0 = np.array(1) + >>> np.matlib.repmat(a0, 2, 3) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> a1 = np.arange(4) + >>> np.matlib.repmat(a1, 2, 2) + array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + + >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) + >>> np.matlib.repmat(a2, 2, 3) + matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5], + [0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5]]) + + """ + a = asanyarray(a) + ndim = a.ndim + if ndim == 0: + origrows, origcols = (1, 1) + elif ndim == 1: + origrows, origcols = (1, a.shape[0]) + else: + origrows, origcols = a.shape + rows = origrows * m + cols = origcols * n + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) + return c.reshape(rows, cols) diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/__init__.py b/MLPY/Lib/site-packages/numpy/matrixlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fbd98119ee5b99da9710db4f29a721fe69fe316c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matrixlib/__init__.py @@ -0,0 +1,10 @@ +"""Sub-package containing the matrix class and related functions. + +""" +from .defmatrix import * + +__all__ = defmatrix.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/__init__.pyi b/MLPY/Lib/site-packages/numpy/matrixlib/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..93e37574f74d8e396517c2570356e2fb500bce8b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matrixlib/__init__.pyi @@ -0,0 +1,11 @@ +from typing import Any, List + +from numpy import ( + matrix as matrix, +) + +__all__: List[str] + +def bmat(obj, ldict=..., gdict=...): ... +def asmatrix(data, dtype=...): ... +mat = asmatrix diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d39b7b68d522fdcd280bd05d8be6d7b4fe6ad18 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1054c60a808a0b13260829eaf5107babc31c7b11 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/__pycache__/defmatrix.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee828960a1501cea8bcadf24782571bb1852040d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/defmatrix.py b/MLPY/Lib/site-packages/numpy/matrixlib/defmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..b324108ef698aa44b10e9989a4a55febc57eb7c7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matrixlib/defmatrix.py @@ -0,0 +1,1113 @@ +__all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] + +import sys +import warnings +import ast +import numpy.core.numeric as N +from numpy.core.numeric import concatenate, isscalar +from numpy.core.overrides import set_module +# While not in __all__, matrix_power used to be defined here, so we import +# it for backward compatibility. +from numpy.linalg import matrix_power + + +def _convert_from_string(data): + for char in '[]': + data = data.replace(char, '') + + rows = data.split(';') + newdata = [] + count = 0 + for row in rows: + trow = row.split(',') + newrow = [] + for col in trow: + temp = col.split() + newrow.extend(map(ast.literal_eval, temp)) + if count == 0: + Ncols = len(newrow) + elif len(newrow) != Ncols: + raise ValueError("Rows not the same size.") + count += 1 + newdata.append(newrow) + return newdata + + +@set_module('numpy') +def asmatrix(data, dtype=None): + """ + Interpret the input as a matrix. + + Unlike `matrix`, `asmatrix` does not make a copy if the input is already + a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. + + Parameters + ---------- + data : array_like + Input data. + dtype : data-type + Data-type of the output matrix. + + Returns + ------- + mat : matrix + `data` interpreted as a matrix. + + Examples + -------- + >>> x = np.array([[1, 2], [3, 4]]) + + >>> m = np.asmatrix(x) + + >>> x[0,0] = 5 + + >>> m + matrix([[5, 2], + [3, 4]]) + + """ + return matrix(data, dtype=dtype, copy=False) + + +@set_module('numpy') +class matrix(N.ndarray): + """ + matrix(data, dtype=None, copy=True) + + .. note:: It is no longer recommended to use this class, even for linear + algebra. Instead use regular arrays. The class may be removed + in the future. + + Returns a matrix from an array-like object, or from a string of data. + A matrix is a specialized 2-D array that retains its 2-D nature + through operations. It has certain special operators, such as ``*`` + (matrix multiplication) and ``**`` (matrix power). + + Parameters + ---------- + data : array_like or string + If `data` is a string, it is interpreted as a matrix with commas + or spaces separating columns, and semicolons separating rows. + dtype : data-type + Data-type of the output matrix. + copy : bool + If `data` is already an `ndarray`, then this flag determines + whether the data is copied (the default), or whether a view is + constructed. + + See Also + -------- + array + + Examples + -------- + >>> a = np.matrix('1 2; 3 4') + >>> a + matrix([[1, 2], + [3, 4]]) + + >>> np.matrix([[1, 2], [3, 4]]) + matrix([[1, 2], + [3, 4]]) + + """ + __array_priority__ = 10.0 + def __new__(subtype, data, dtype=None, copy=True): + warnings.warn('the matrix subclass is not the recommended way to ' + 'represent matrices or deal with linear algebra (see ' + 'https://docs.scipy.org/doc/numpy/user/' + 'numpy-for-matlab-users.html). ' + 'Please adjust your code to use regular ndarray.', + PendingDeprecationWarning, stacklevel=2) + if isinstance(data, matrix): + dtype2 = data.dtype + if (dtype is None): + dtype = dtype2 + if (dtype2 == dtype) and (not copy): + return data + return data.astype(dtype) + + if isinstance(data, N.ndarray): + if dtype is None: + intype = data.dtype + else: + intype = N.dtype(dtype) + new = data.view(subtype) + if intype != data.dtype: + return new.astype(intype) + if copy: return new.copy() + else: return new + + if isinstance(data, str): + data = _convert_from_string(data) + + # now convert data to an array + arr = N.array(data, dtype=dtype, copy=copy) + ndim = arr.ndim + shape = arr.shape + if (ndim > 2): + raise ValueError("matrix must be 2-dimensional") + elif ndim == 0: + shape = (1, 1) + elif ndim == 1: + shape = (1, shape[0]) + + order = 'C' + if (ndim == 2) and arr.flags.fortran: + order = 'F' + + if not (order or arr.flags.contiguous): + arr = arr.copy() + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=order) + return ret + + def __array_finalize__(self, obj): + self._getitem = False + if (isinstance(obj, matrix) and obj._getitem): return + ndim = self.ndim + if (ndim == 2): + return + if (ndim > 2): + newshape = tuple([x for x in self.shape if x > 1]) + ndim = len(newshape) + if ndim == 2: + self.shape = newshape + return + elif (ndim > 2): + raise ValueError("shape too large to be a matrix.") + else: + newshape = self.shape + if ndim == 0: + self.shape = (1, 1) + elif ndim == 1: + self.shape = (1, newshape[0]) + return + + def __getitem__(self, index): + self._getitem = True + + try: + out = N.ndarray.__getitem__(self, index) + finally: + self._getitem = False + + if not isinstance(out, N.ndarray): + return out + + if out.ndim == 0: + return out[()] + if out.ndim == 1: + sh = out.shape[0] + # Determine when we should have a column array + try: + n = len(index) + except Exception: + n = 0 + if n > 1 and isscalar(index[1]): + out.shape = (sh, 1) + else: + out.shape = (1, sh) + return out + + def __mul__(self, other): + if isinstance(other, (N.ndarray, list, tuple)) : + # This promotes 1-D vectors to row vectors + return N.dot(self, asmatrix(other)) + if isscalar(other) or not hasattr(other, '__rmul__') : + return N.dot(self, other) + return NotImplemented + + def __rmul__(self, other): + return N.dot(other, self) + + def __imul__(self, other): + self[:] = self * other + return self + + def __pow__(self, other): + return matrix_power(self, other) + + def __ipow__(self, other): + self[:] = self ** other + return self + + def __rpow__(self, other): + return NotImplemented + + def _align(self, axis): + """A convenience function for operations that need to preserve axis + orientation. + """ + if axis is None: + return self[0, 0] + elif axis==0: + return self + elif axis==1: + return self.transpose() + else: + raise ValueError("unsupported axis") + + def _collapse(self, axis): + """A convenience function for operations that want to collapse + to a scalar like _align, but are using keepdims=True + """ + if axis is None: + return self[0, 0] + else: + return self + + # Necessary because base-class tolist expects dimension + # reduction by x[0] + def tolist(self): + """ + Return the matrix as a (possibly nested) list. + + See `ndarray.tolist` for full documentation. + + See Also + -------- + ndarray.tolist + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.tolist() + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + + """ + return self.__array__().tolist() + + # To preserve orientation of result... + def sum(self, axis=None, dtype=None, out=None): + """ + Returns the sum of the matrix elements, along the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum + + Notes + ----- + This is the same as `ndarray.sum`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix([[1, 2], [4, 3]]) + >>> x.sum() + 10 + >>> x.sum(axis=1) + matrix([[3], + [7]]) + >>> x.sum(axis=1, dtype='float') + matrix([[3.], + [7.]]) + >>> out = np.zeros((2, 1), dtype='float') + >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + matrix([[3.], + [7.]]) + + """ + return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) + + + # To update docstring from array to matrix... + def squeeze(self, axis=None): + """ + Return a possibly reshaped matrix. + + Refer to `numpy.squeeze` for more documentation. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Selects a subset of the axes of length one in the shape. + If an axis is selected with shape entry greater than one, + an error is raised. + + Returns + ------- + squeezed : matrix + The matrix, but as a (1, N) matrix if it had shape (N, 1). + + See Also + -------- + numpy.squeeze : related function + + Notes + ----- + If `m` has a single column then that column is returned + as the single row of a matrix. Otherwise `m` is returned. + The returned matrix is always either `m` itself or a view into `m`. + Supplying an axis keyword argument will not affect the returned matrix + but it may cause an error to be raised. + + Examples + -------- + >>> c = np.matrix([[1], [2]]) + >>> c + matrix([[1], + [2]]) + >>> c.squeeze() + matrix([[1, 2]]) + >>> r = c.T + >>> r + matrix([[1, 2]]) + >>> r.squeeze() + matrix([[1, 2]]) + >>> m = np.matrix([[1, 2], [3, 4]]) + >>> m.squeeze() + matrix([[1, 2], + [3, 4]]) + + """ + return N.ndarray.squeeze(self, axis=axis) + + + # To update docstring from array to matrix... + def flatten(self, order='C'): + """ + Return a flattened copy of the matrix. + + All `N` elements of the matrix are placed into a single row. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. 'F' means to + flatten in column-major (Fortran-style) order. 'A' means to + flatten in column-major order if `m` is Fortran *contiguous* in + memory, row-major order otherwise. 'K' means to flatten `m` in + the order the elements occur in memory. The default is 'C'. + + Returns + ------- + y : matrix + A copy of the matrix, flattened to a `(1, N)` matrix where `N` + is the number of elements in the original matrix. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the matrix. + + Examples + -------- + >>> m = np.matrix([[1,2], [3,4]]) + >>> m.flatten() + matrix([[1, 2, 3, 4]]) + >>> m.flatten('F') + matrix([[1, 3, 2, 4]]) + + """ + return N.ndarray.flatten(self, order=order) + + def mean(self, axis=None, dtype=None, out=None): + """ + Returns the average of the matrix elements along the given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean + + Notes + ----- + Same as `ndarray.mean` except that, where that returns an `ndarray`, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.mean() + 5.5 + >>> x.mean(0) + matrix([[4., 5., 6., 7.]]) + >>> x.mean(1) + matrix([[ 1.5], + [ 5.5], + [ 9.5]]) + + """ + return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def std(self, axis=None, dtype=None, out=None, ddof=0): + """ + Return the standard deviation of the array elements along the given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std + + Notes + ----- + This is the same as `ndarray.std`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.std() + 3.4520525295346629 # may vary + >>> x.std(0) + matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary + >>> x.std(1) + matrix([[ 1.11803399], + [ 1.11803399], + [ 1.11803399]]) + + """ + return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0): + """ + Returns the variance of the matrix elements, along the given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var + + Notes + ----- + This is the same as `ndarray.var`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.var() + 11.916666666666666 + >>> x.var(0) + matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary + >>> x.var(1) + matrix([[1.25], + [1.25], + [1.25]]) + + """ + return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def prod(self, axis=None, dtype=None, out=None): + """ + Return the product of the array elements over the given axis. + + Refer to `prod` for full documentation. + + See Also + -------- + prod, ndarray.prod + + Notes + ----- + Same as `ndarray.prod`, except, where that returns an `ndarray`, this + returns a `matrix` object instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.prod() + 0 + >>> x.prod(0) + matrix([[ 0, 45, 120, 231]]) + >>> x.prod(1) + matrix([[ 0], + [ 840], + [7920]]) + + """ + return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def any(self, axis=None, out=None): + """ + Test whether any array element along a given axis evaluates to True. + + Refer to `numpy.any` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which logical OR is performed + out : ndarray, optional + Output to existing array instead of creating new one, must have + same shape as expected output + + Returns + ------- + any : bool, ndarray + Returns a single bool if `axis` is ``None``; otherwise, + returns `ndarray` + + """ + return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) + + def all(self, axis=None, out=None): + """ + Test whether all matrix elements along a given axis evaluate to True. + + Parameters + ---------- + See `numpy.all` for complete descriptions + + See Also + -------- + numpy.all + + Notes + ----- + This is the same as `ndarray.all`, but it returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> y = x[0]; y + matrix([[0, 1, 2, 3]]) + >>> (x == y) + matrix([[ True, True, True, True], + [False, False, False, False], + [False, False, False, False]]) + >>> (x == y).all() + False + >>> (x == y).all(0) + matrix([[False, False, False, False]]) + >>> (x == y).all(1) + matrix([[ True], + [False], + [False]]) + + """ + return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) + + def max(self, axis=None, out=None): + """ + Return the maximum value along an axis. + + Parameters + ---------- + See `amax` for complete descriptions + + See Also + -------- + amax, ndarray.max + + Notes + ----- + This is the same as `ndarray.max`, but returns a `matrix` object + where `ndarray.max` would return an ndarray. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.max() + 11 + >>> x.max(0) + matrix([[ 8, 9, 10, 11]]) + >>> x.max(1) + matrix([[ 3], + [ 7], + [11]]) + + """ + return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) + + def argmax(self, axis=None, out=None): + """ + Indexes of the maximum values along an axis. + + Return the indexes of the first occurrences of the maximum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmax` for complete descriptions + + See Also + -------- + numpy.argmax + + Notes + ----- + This is the same as `ndarray.argmax`, but returns a `matrix` object + where `ndarray.argmax` would return an `ndarray`. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.argmax() + 11 + >>> x.argmax(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmax(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmax(self, axis, out)._align(axis) + + def min(self, axis=None, out=None): + """ + Return the minimum value along an axis. + + Parameters + ---------- + See `amin` for complete descriptions. + + See Also + -------- + amin, ndarray.min + + Notes + ----- + This is the same as `ndarray.min`, but returns a `matrix` object + where `ndarray.min` would return an ndarray. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.min() + -11 + >>> x.min(0) + matrix([[ -8, -9, -10, -11]]) + >>> x.min(1) + matrix([[ -3], + [ -7], + [-11]]) + + """ + return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) + + def argmin(self, axis=None, out=None): + """ + Indexes of the minimum values along an axis. + + Return the indexes of the first occurrences of the minimum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmin` for complete descriptions. + + See Also + -------- + numpy.argmin + + Notes + ----- + This is the same as `ndarray.argmin`, but returns a `matrix` object + where `ndarray.argmin` would return an `ndarray`. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.argmin() + 11 + >>> x.argmin(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmin(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmin(self, axis, out)._align(axis) + + def ptp(self, axis=None, out=None): + """ + Peak-to-peak (maximum - minimum) value along the given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp + + Notes + ----- + Same as `ndarray.ptp`, except, where that would return an `ndarray` object, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.ptp() + 11 + >>> x.ptp(0) + matrix([[8, 8, 8, 8]]) + >>> x.ptp(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.ptp(self, axis, out)._align(axis) + + @property + def I(self): + """ + Returns the (multiplicative) inverse of invertible `self`. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + If `self` is non-singular, `ret` is such that ``ret * self`` == + ``self * ret`` == ``np.matrix(np.eye(self[0,:].size))`` all return + ``True``. + + Raises + ------ + numpy.linalg.LinAlgError: Singular matrix + If `self` is singular. + + See Also + -------- + linalg.inv + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]'); m + matrix([[1, 2], + [3, 4]]) + >>> m.getI() + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> m.getI() * m + matrix([[ 1., 0.], # may vary + [ 0., 1.]]) + + """ + M, N = self.shape + if M == N: + from numpy.linalg import inv as func + else: + from numpy.linalg import pinv as func + return asmatrix(func(self)) + + @property + def A(self): + """ + Return `self` as an `ndarray` object. + + Equivalent to ``np.asarray(self)``. + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self` as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA() + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + """ + return self.__array__() + + @property + def A1(self): + """ + Return `self` as a flattened `ndarray`. + + Equivalent to ``np.asarray(x).ravel()`` + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self`, 1-D, as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA1() + array([ 0, 1, 2, ..., 9, 10, 11]) + + + """ + return self.__array__().ravel() + + + def ravel(self, order='C'): + """ + Return a flattened matrix. + + Refer to `numpy.ravel` for more documentation. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `m` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + ret : matrix + Return the matrix flattened to shape `(1, N)` where `N` + is the number of elements in the original matrix. + A copy is made only if necessary. + + See Also + -------- + matrix.flatten : returns a similar output matrix but always a copy + matrix.flat : a flat iterator on the array. + numpy.ravel : related function which returns an ndarray + + """ + return N.ndarray.ravel(self, order=order) + + @property + def T(self): + """ + Returns the transpose of the matrix. + + Does *not* conjugate! For the complex conjugate transpose, use ``.H``. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + The (non-conjugated) transpose of the matrix. + + See Also + -------- + transpose, getH + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]') + >>> m + matrix([[1, 2], + [3, 4]]) + >>> m.getT() + matrix([[1, 3], + [2, 4]]) + + """ + return self.transpose() + + @property + def H(self): + """ + Returns the (complex) conjugate transpose of `self`. + + Equivalent to ``np.transpose(self)`` if `self` is real-valued. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + complex conjugate transpose of `self` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))) + >>> z = x - 1j*x; z + matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], + [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], + [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) + >>> z.getH() + matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], + [ 1. +1.j, 5. +5.j, 9. +9.j], + [ 2. +2.j, 6. +6.j, 10.+10.j], + [ 3. +3.j, 7. +7.j, 11.+11.j]]) + + """ + if issubclass(self.dtype.type, N.complexfloating): + return self.transpose().conjugate() + else: + return self.transpose() + + # kept for compatibility + getT = T.fget + getA = A.fget + getA1 = A1.fget + getH = H.fget + getI = I.fget + +def _from_string(str, gdict, ldict): + rows = str.split(';') + rowtup = [] + for row in rows: + trow = row.split(',') + newrow = [] + for x in trow: + newrow.extend(x.split()) + trow = newrow + coltup = [] + for col in trow: + col = col.strip() + try: + thismat = ldict[col] + except KeyError: + try: + thismat = gdict[col] + except KeyError as e: + raise NameError(f"name {col!r} is not defined") from None + + coltup.append(thismat) + rowtup.append(concatenate(coltup, axis=-1)) + return concatenate(rowtup, axis=0) + + +@set_module('numpy') +def bmat(obj, ldict=None, gdict=None): + """ + Build a matrix object from a string, nested sequence, or array. + + Parameters + ---------- + obj : str or array_like + Input data. If a string, variables in the current scope may be + referenced by name. + ldict : dict, optional + A dictionary that replaces local operands in current frame. + Ignored if `obj` is not a string or `gdict` is None. + gdict : dict, optional + A dictionary that replaces global operands in current frame. + Ignored if `obj` is not a string. + + Returns + ------- + out : matrix + Returns a matrix object, which is a specialized 2-D array. + + See Also + -------- + block : + A generalization of this function for N-d arrays, that returns normal + ndarrays. + + Examples + -------- + >>> A = np.mat('1 1; 1 1') + >>> B = np.mat('2 2; 2 2') + >>> C = np.mat('3 4; 5 6') + >>> D = np.mat('7 8; 9 0') + + All the following expressions construct the same block matrix: + + >>> np.bmat([[A, B], [C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat('A,B; C,D') + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + + """ + if isinstance(obj, str): + if gdict is None: + # get previous frame + frame = sys._getframe().f_back + glob_dict = frame.f_globals + loc_dict = frame.f_locals + else: + glob_dict = gdict + loc_dict = ldict + + return matrix(_from_string(obj, glob_dict, loc_dict)) + + if isinstance(obj, (tuple, list)): + # [[A,B],[C,D]] + arr_rows = [] + for row in obj: + if isinstance(row, N.ndarray): # not 2-d + return matrix(concatenate(obj, axis=-1)) + else: + arr_rows.append(concatenate(row, axis=-1)) + return matrix(concatenate(arr_rows, axis=0)) + if isinstance(obj, N.ndarray): + return matrix(obj) + +mat = asmatrix diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/setup.py b/MLPY/Lib/site-packages/numpy/matrixlib/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..eb4c96adadd31ff8193391e1e52ee17acd017d57 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matrixlib/setup.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('matrixlib', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_files('*.pyi') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + config = configuration(top_path='').todict() + setup(**config) diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/__init__.py b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0e6517581098c0270608c011a3c1b9513df8198 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13e9f47f61cb5fb7f49799fcca605b866ec5fed3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6053c50d052d55bb543605a781d980df07274b45 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e6d9b802f04fcf5969c7d66dfc3a488ecfa0083 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caff0e4be4f3f46e55d6aa5ba873b83fca4279ec Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0cc49ae938fd05b2aa7fb59731d8bd3b25c92b7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22dc74d0f7b5c58778d94f3c4690c5dc793e12c1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5623d8f093a2e6c3b69158cf2996e8b9a4eece3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/matrixlib/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_defmatrix.py b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_defmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..dfa4496ecfb55c0d16ee6b5ffb7939433f04b455 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_defmatrix.py @@ -0,0 +1,453 @@ +import collections.abc + +import numpy as np +from numpy import matrix, asmatrix, bmat +from numpy.testing import ( + assert_, assert_equal, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_raises + ) +from numpy.linalg import matrix_power +from numpy.matrixlib import mat + +class TestCtor: + def test_basic(self): + A = np.array([[1, 2], [3, 4]]) + mA = matrix(A) + assert_(np.all(mA.A == A)) + + B = bmat("A,A;A,A") + C = bmat([[A, A], [A, A]]) + D = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + assert_(np.all(B.A == D)) + assert_(np.all(C.A == D)) + + E = np.array([[5, 6], [7, 8]]) + AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) + assert_(np.all(bmat([A, E]) == AEresult)) + + vec = np.arange(5) + mvec = matrix(vec) + assert_(mvec.shape == (1, 5)) + + def test_exceptions(self): + # Check for ValueError when called with invalid string data. + assert_raises(ValueError, matrix, "invalid") + + def test_bmat_nondefault_str(self): + A = np.array([[1, 2], [3, 4]]) + B = np.array([[5, 6], [7, 8]]) + Aresult = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + mixresult = np.array([[1, 2, 5, 6], + [3, 4, 7, 8], + [5, 6, 1, 2], + [7, 8, 3, 4]]) + assert_(np.all(bmat("A,A;A,A") == Aresult)) + assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) + assert_( + np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) + assert_(np.all(b2 == mixresult)) + + +class TestProperties: + def test_sum(self): + """Test whether matrix.sum(axis=1) preserves orientation. + Fails in NumPy <= 0.9.6.2127. + """ + M = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + sum0 = matrix([8, 12, 4, 6]) + sum1 = matrix([3, 7, 6, 14]).T + sumall = 30 + assert_array_equal(sum0, M.sum(axis=0)) + assert_array_equal(sum1, M.sum(axis=1)) + assert_equal(sumall, M.sum()) + + assert_array_equal(sum0, np.sum(M, axis=0)) + assert_array_equal(sum1, np.sum(M, axis=1)) + assert_equal(sumall, np.sum(M)) + + def test_prod(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.prod(), 720) + assert_equal(x.prod(0), matrix([[4, 10, 18]])) + assert_equal(x.prod(1), matrix([[6], [120]])) + + assert_equal(np.prod(x), 720) + assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) + assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) + + y = matrix([0, 1, 3]) + assert_(y.prod() == 0) + + def test_max(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.max(), 6) + assert_equal(x.max(0), matrix([[4, 5, 6]])) + assert_equal(x.max(1), matrix([[3], [6]])) + + assert_equal(np.max(x), 6) + assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) + assert_equal(np.max(x, axis=1), matrix([[3], [6]])) + + def test_min(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.min(), 1) + assert_equal(x.min(0), matrix([[1, 2, 3]])) + assert_equal(x.min(1), matrix([[1], [4]])) + + assert_equal(np.min(x), 1) + assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) + assert_equal(np.min(x, axis=1), matrix([[1], [4]])) + + def test_ptp(self): + x = np.arange(4).reshape((2, 2)) + assert_(x.ptp() == 3) + assert_(np.all(x.ptp(0) == np.array([2, 2]))) + assert_(np.all(x.ptp(1) == np.array([1, 1]))) + + def test_var(self): + x = np.arange(9).reshape((3, 3)) + mx = x.view(np.matrix) + assert_equal(x.var(ddof=0), mx.var(ddof=0)) + assert_equal(x.var(ddof=1), mx.var(ddof=1)) + + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], + [3., 4.]]) + mA = matrix(A) + assert_(np.allclose(linalg.inv(A), mA.I)) + assert_(np.all(np.array(np.transpose(A) == mA.T))) + assert_(np.all(np.array(np.transpose(A) == mA.H))) + assert_(np.all(A == mA.A)) + + B = A + 2j*A + mB = matrix(B) + assert_(np.allclose(linalg.inv(B), mB.I)) + assert_(np.all(np.array(np.transpose(B) == mB.T))) + assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) + + def test_pinv(self): + x = matrix(np.arange(6).reshape(2, 3)) + xpinv = matrix([[-0.77777778, 0.27777778], + [-0.11111111, 0.11111111], + [ 0.55555556, -0.05555556]]) + assert_almost_equal(x.I, xpinv) + + def test_comparisons(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + mB = matrix(A) + 0.1 + assert_(np.all(mB == A+0.1)) + assert_(np.all(mB == matrix(A+0.1))) + assert_(not np.any(mB == matrix(A-0.1))) + assert_(np.all(mA < mB)) + assert_(np.all(mA <= mB)) + assert_(np.all(mA <= mA)) + assert_(not np.any(mA < mA)) + + assert_(not np.any(mB < mA)) + assert_(np.all(mB >= mA)) + assert_(np.all(mB >= mB)) + assert_(not np.any(mB > mB)) + + assert_(np.all(mA == mA)) + assert_(not np.any(mA == mB)) + assert_(np.all(mB != mA)) + + assert_(not np.all(abs(mA) > 0)) + assert_(np.all(abs(mB > 0))) + + def test_asmatrix(self): + A = np.arange(100).reshape(10, 10) + mA = asmatrix(A) + A[0, 0] = -10 + assert_(A[0, 0] == mA[0, 0]) + + def test_noaxis(self): + A = matrix([[1, 0], [0, 1]]) + assert_(A.sum() == matrix(2)) + assert_(A.mean() == matrix(0.5)) + + def test_repr(self): + A = matrix([[1, 0], [0, 1]]) + assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") + + def test_make_bool_matrix_from_str(self): + A = matrix('True; True; False') + B = matrix([[True], [True], [False]]) + assert_array_equal(A, B) + +class TestCasting: + def test_basic(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + + mB = mA.copy() + O = np.ones((10, 10), np.float64) * 0.1 + mB = mB + O + assert_(mB.dtype.type == np.float64) + assert_(np.all(mA != mB)) + assert_(np.all(mB == mA+0.1)) + + mC = mA.copy() + O = np.ones((10, 10), np.complex128) + mC = mC * O + assert_(mC.dtype.type == np.complex128) + assert_(np.all(mA != mB)) + + +class TestAlgebra: + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], [3., 4.]]) + mA = matrix(A) + + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** i).A, B)) + B = np.dot(B, A) + + Ainv = linalg.inv(A) + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** -i).A, B)) + B = np.dot(B, Ainv) + + assert_(np.allclose((mA * mA).A, np.dot(A, A))) + assert_(np.allclose((mA + mA).A, (A + A))) + assert_(np.allclose((3*mA).A, (3*A))) + + mA2 = matrix(A) + mA2 *= 3 + assert_(np.allclose(mA2.A, 3*A)) + + def test_pow(self): + """Test raising a matrix to an integer power works as expected.""" + m = matrix("1. 2.; 3. 4.") + m2 = m.copy() + m2 **= 2 + mi = m.copy() + mi **= -1 + m4 = m2.copy() + m4 **= 2 + assert_array_almost_equal(m2, m**2) + assert_array_almost_equal(m4, np.dot(m2, m2)) + assert_array_almost_equal(np.dot(mi, m), np.eye(2)) + + def test_scalar_type_pow(self): + m = matrix([[1, 2], [3, 4]]) + for scalar_t in [np.int8, np.uint8]: + two = scalar_t(2) + assert_array_almost_equal(m ** 2, m ** two) + + def test_notimplemented(self): + '''Check that 'not implemented' operations produce a failure.''' + A = matrix([[1., 2.], + [3., 4.]]) + + # __rpow__ + with assert_raises(TypeError): + 1.0**A + + # __mul__ with something not a list, ndarray, tuple, or scalar + with assert_raises(TypeError): + A*object() + + +class TestMatrixReturn: + def test_instance_methods(self): + a = matrix([1.0], dtype='f8') + methodargs = { + 'astype': ('intc',), + 'clip': (0.0, 1.0), + 'compress': ([1],), + 'repeat': (1,), + 'reshape': (1,), + 'swapaxes': (0, 0), + 'dot': np.array([1.0]), + } + excluded_methods = [ + 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', + 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', + 'searchsorted', 'setflags', 'setfield', 'sort', + 'partition', 'argpartition', + 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', + 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', + 'prod', 'std', 'ctypes', 'itemset', + ] + for attrib in dir(a): + if attrib.startswith('_') or attrib in excluded_methods: + continue + f = getattr(a, attrib) + if isinstance(f, collections.abc.Callable): + # reset contents of a + a.astype('f8') + a.fill(1.0) + if attrib in methodargs: + args = methodargs[attrib] + else: + args = () + b = f(*args) + assert_(type(b) is matrix, "%s" % attrib) + assert_(type(a.real) is matrix) + assert_(type(a.imag) is matrix) + c, d = matrix([0.0]).nonzero() + assert_(type(c) is np.ndarray) + assert_(type(d) is np.ndarray) + + +class TestIndexing: + def test_basic(self): + x = asmatrix(np.zeros((3, 2), float)) + y = np.zeros((3, 1), float) + y[:, 0] = [0.8, 0.2, 0.3] + x[:, 1] = y > 0.5 + assert_equal(x, [[0, 1], [0, 0], [0, 0]]) + + +class TestNewScalarIndexing: + a = matrix([[1, 2], [3, 4]]) + + def test_dimesions(self): + a = self.a + x = a[0] + assert_equal(x.ndim, 2) + + def test_array_from_matrix_list(self): + a = self.a + x = np.array([a, a]) + assert_equal(x.shape, [2, 2, 2]) + + def test_array_to_list(self): + a = self.a + assert_equal(a.tolist(), [[1, 2], [3, 4]]) + + def test_fancy_indexing(self): + a = self.a + x = a[1, [0, 1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4, 3]])) + x = a[[1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4], [1, 2]])) + x = a[[[1], [0]], [[1, 0], [0, 1]]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[4, 3], [1, 2]])) + + def test_matrix_element(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x[0][0], matrix([[1, 2, 3]])) + assert_equal(x[0][0].shape, (1, 3)) + assert_equal(x[0].shape, (1, 3)) + assert_equal(x[:, 0].shape, (2, 1)) + + x = matrix(0) + assert_equal(x[0, 0], 0) + assert_equal(x[0], 0) + assert_equal(x[:, 0].shape, x.shape) + + def test_scalar_indexing(self): + x = asmatrix(np.zeros((3, 2), float)) + assert_equal(x[0, 0], x[0][0]) + + def test_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0,:], [[1, 0]]) + assert_array_equal(x[1,:], [[0, 1]]) + assert_array_equal(x[:, 0], [[1], [0]]) + assert_array_equal(x[:, 1], [[0], [1]]) + + def test_boolean_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, np.array([True, False])], x[:, 0]) + assert_array_equal(x[np.array([True, False, False]),:], x[0,:]) + + def test_list_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, [1, 0]], x[:, ::-1]) + assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) + + +class TestPower: + def test_returntype(self): + a = np.array([[0, 1], [0, 0]]) + assert_(type(matrix_power(a, 2)) is np.ndarray) + a = mat(a) + assert_(type(matrix_power(a, 2)) is matrix) + + def test_list(self): + assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) + + +class TestShape: + + a = np.array([[1], [2]]) + m = matrix([[1], [2]]) + + def test_shape(self): + assert_equal(self.a.shape, (2, 1)) + assert_equal(self.m.shape, (2, 1)) + + def test_numpy_ravel(self): + assert_equal(np.ravel(self.a).shape, (2,)) + assert_equal(np.ravel(self.m).shape, (2,)) + + def test_member_ravel(self): + assert_equal(self.a.ravel().shape, (2,)) + assert_equal(self.m.ravel().shape, (1, 2)) + + def test_member_flatten(self): + assert_equal(self.a.flatten().shape, (2,)) + assert_equal(self.m.flatten().shape, (1, 2)) + + def test_numpy_ravel_order(self): + x = np.array([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + + def test_matrix_ravel_order(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) + assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) + + def test_array_memory_sharing(self): + assert_(np.may_share_memory(self.a, self.a.ravel())) + assert_(not np.may_share_memory(self.a, self.a.flatten())) + + def test_matrix_memory_sharing(self): + assert_(np.may_share_memory(self.m, self.m.ravel())) + assert_(not np.may_share_memory(self.m, self.m.flatten())) + + def test_expand_dims_matrix(self): + # matrices are always 2d - so expand_dims only makes sense when the + # type is changed away from matrix. + a = np.arange(10).reshape((2, 5)).view(np.matrix) + expanded = np.expand_dims(a, axis=1) + assert_equal(expanded.ndim, 3) + assert_(not isinstance(expanded, np.matrix)) diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_interaction.py b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_interaction.py new file mode 100644 index 0000000000000000000000000000000000000000..0f24c92dcab548cdbb2adbe0b56d93e54fbdfd4f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_interaction.py @@ -0,0 +1,354 @@ +"""Tests of interaction of matrix with other parts of numpy. + +Note that tests with MaskedArray and linalg are done in separate files. +""" +import pytest + +import textwrap +import warnings + +import numpy as np +from numpy.testing import (assert_, assert_equal, assert_raises, + assert_raises_regex, assert_array_equal, + assert_almost_equal, assert_array_almost_equal) + + +def test_fancy_indexing(): + # The matrix class messes with the shape. While this is always + # weird (getitem is not used, it does not have setitem nor knows + # about fancy indexing), this tests gh-3110 + # 2018-04-29: moved here from core.tests.test_index. + m = np.matrix([[1, 2], [3, 4]]) + + assert_(isinstance(m[[0, 1, 0], :], np.matrix)) + + # gh-3110. Note the transpose currently because matrices do *not* + # support dimension fixing for fancy indexing correctly. + x = np.asmatrix(np.arange(50).reshape(5, 10)) + assert_equal(x[:2, np.array(-1)], x[:2, -1].T) + + +def test_polynomial_mapdomain(): + # test that polynomial preserved matrix subtype. + # 2018-04-29: moved here from polynomial.tests.polyutils. + dom1 = [0, 4] + dom2 = [1, 3] + x = np.matrix([dom1, dom1]) + res = np.polynomial.polyutils.mapdomain(x, dom1, dom2) + assert_(isinstance(res, np.matrix)) + + +def test_sort_matrix_none(): + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.sort(a, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_partition_matrix_none(): + # gh-4301 + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.partition(a, 1, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_dot_scalar_and_matrix_of_objects(): + # Ticket #2469 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.dot(arr, 3), desired) + assert_equal(np.dot(3, arr), desired) + + +def test_inner_scalar_and_matrix(): + # 2018-04-29: moved here from core.tests.test_multiarray + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + arr = np.matrix([[1, 2], [3, 4]], dtype=dt) + desired = np.matrix([[3, 6], [9, 12]], dtype=dt) + assert_equal(np.inner(arr, sca), desired) + assert_equal(np.inner(sca, arr), desired) + + +def test_inner_scalar_and_matrix_of_objects(): + # Ticket #4482 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.inner(arr, 3), desired) + assert_equal(np.inner(3, arr), desired) + + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + # 2018-04-29: moved here from core.tests.test_nditer, given the + # matrix specific shape test. + + # matrix vs ndarray + a = np.matrix([[1, 2], [3, 4]]) + b = np.arange(4).reshape(2, 2).T + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(type(i.operands[2]) is np.matrix) + assert_(type(i.operands[2]) is not np.ndarray) + assert_equal(i.operands[2].shape, (2, 2)) + + # matrix always wants things to be 2D + b = np.arange(4).reshape(1, 2, 2) + assert_raises(RuntimeError, np.nditer, [a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + # but if subtypes are disabled, the result can still work + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_(type(i.operands[2]) is np.ndarray) + assert_(type(i.operands[2]) is not np.matrix) + assert_equal(i.operands[2].shape, (1, 2, 2)) + + +def like_function(): + # 2018-04-29: moved here from core.tests.test_numeric + a = np.matrix([[1, 2], [3, 4]]) + for like_function in np.zeros_like, np.ones_like, np.empty_like: + b = like_function(a) + assert_(type(b) is np.matrix) + + c = like_function(a, subok=False) + assert_(type(c) is not np.matrix) + + +def test_array_astype(): + # 2018-04-29: copied here from core.tests.test_api + # subok=True passes through a matrix + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), np.matrix) + + # subok=False never returns a matrix + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not np.matrix) + + +def test_stack(): + # 2018-04-29: copied here from core.tests.test_shape_base + # check np.matrix cannot be stacked + m = np.matrix([[1, 2], [3, 4]]) + assert_raises_regex(ValueError, 'shape too large to be a matrix', + np.stack, [m, m]) + + +def test_object_scalar_multiply(): + # Tickets #2469 and #4482 + # 2018-04-29: moved here from core.tests.test_ufunc + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.multiply(arr, 3), desired) + assert_equal(np.multiply(3, arr), desired) + + +def test_nanfunctions_matrices(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in [np.nanmin, np.nanmax]: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + # check that rows of nan are dealt with for subclasses (#4628) + mat[1] = np.nan + for f in [np.nanmin, np.nanmax]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) + and not np.isnan(res[2, 0])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat) + assert_(np.isscalar(res)) + assert_(res != np.nan) + assert_(len(w) == 0) + + +def test_nanfunctions_matrices_general(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod, + np.nanmean, np.nanvar, np.nanstd): + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + for f in np.nancumsum, np.nancumprod: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3*3)) + + +def test_average_matrix(): + # 2018-04-29: moved here from core.tests.test_function_base. + y = np.matrix(np.random.rand(5, 5)) + assert_array_equal(y.mean(0), np.average(y, 0)) + + a = np.matrix([[1, 2], [3, 4]]) + w = np.matrix([[1, 2], [3, 4]]) + + r = np.average(a, axis=0, weights=w) + assert_equal(type(r), np.matrix) + assert_equal(r, [[2.5, 10.0/3]]) + + +def test_trapz_matrix(): + # Test to make sure matrices give the same answer as ndarrays + # 2018-04-29: moved here from core.tests.test_function_base. + x = np.linspace(0, 5) + y = x * x + r = np.trapz(y, x) + mx = np.matrix(x) + my = np.matrix(y) + mr = np.trapz(my, mx) + assert_almost_equal(mr, r) + + +def test_ediff1d_matrix(): + # 2018-04-29: moved here from core.tests.test_arraysetops. + assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) + assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix)) + + +def test_apply_along_axis_matrix(): + # this test is particularly malicious because matrix + # refuses to become 1d + # 2018-04-29: moved here from core.tests.test_shape_base. + def double(row): + return row * 2 + + m = np.matrix([[0, 1], [2, 3]]) + expected = np.matrix([[0, 2], [4, 6]]) + + result = np.apply_along_axis(double, 0, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + result = np.apply_along_axis(double, 1, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + +def test_kron_matrix(): + # 2018-04-29: moved here from core.tests.test_shape_base. + a = np.ones([2, 2]) + m = np.asmatrix(a) + assert_equal(type(np.kron(a, a)), np.ndarray) + assert_equal(type(np.kron(m, m)), np.matrix) + assert_equal(type(np.kron(a, m)), np.matrix) + assert_equal(type(np.kron(m, a)), np.matrix) + + +class TestConcatenatorMatrix: + # 2018-04-29: moved here from core.tests.test_index_tricks. + def test_matrix(self): + a = [1, 2] + b = [3, 4] + + ab_r = np.r_['r', a, b] + ab_c = np.r_['c', a, b] + + assert_equal(type(ab_r), np.matrix) + assert_equal(type(ab_c), np.matrix) + + assert_equal(np.array(ab_r), [[1, 2, 3, 4]]) + assert_equal(np.array(ab_c), [[1], [2], [3], [4]]) + + assert_raises(ValueError, lambda: np.r_['rc', a, b]) + + def test_matrix_scalar(self): + r = np.r_['r', [1, 2], 3] + assert_equal(type(r), np.matrix) + assert_equal(np.array(r), [[1, 2, 3]]) + + def test_matrix_builder(self): + a = np.array([1]) + b = np.array([2]) + c = np.array([3]) + d = np.array([4]) + actual = np.r_['a, b; c, d'] + expected = np.bmat([[a, b], [c, d]]) + + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + +def test_array_equal_error_message_matrix(): + # 2018-04-29: moved here from testing.tests.test_utils. + with pytest.raises(AssertionError) as exc_info: + assert_equal(np.array([1, 2]), np.matrix([1, 2])) + msg = str(exc_info.value) + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + x: array([1, 2]) + y: matrix([[1, 2]])""") + assert_equal(msg, msg_reference) + + +def test_array_almost_equal_matrix(): + # Matrix slicing keeps things 2-D, while array does not necessarily. + # See gh-8452. + # 2018-04-29: moved here from testing.tests.test_utils. + m1 = np.matrix([[1., 2.]]) + m2 = np.matrix([[1., np.nan]]) + m3 = np.matrix([[1., -np.inf]]) + m4 = np.matrix([[np.nan, np.inf]]) + m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) + for assert_func in assert_array_almost_equal, assert_almost_equal: + for m in m1, m2, m3, m4, m5: + assert_func(m, m) + a = np.array(m) + assert_func(a, m) + assert_func(m, a) diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_masked_matrix.py b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_masked_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..60ab8d808def1e24a78af40dab1f773eda3fa8d1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_masked_matrix.py @@ -0,0 +1,231 @@ +import numpy as np +from numpy.testing import assert_warns +from numpy.ma.testutils import (assert_, assert_equal, assert_raises, + assert_array_equal) +from numpy.ma.core import (masked_array, masked_values, masked, allequal, + MaskType, getmask, MaskedArray, nomask, + log, add, hypot, divide) +from numpy.ma.extras import mr_ +from numpy.compat import pickle + + +class MMatrix(MaskedArray, np.matrix,): + + def __new__(cls, data, mask=nomask): + mat = np.matrix(data) + _data = MaskedArray.__new__(cls, data=mat, mask=mask) + return _data + + def __array_finalize__(self, obj): + np.matrix.__array_finalize__(self, obj) + MaskedArray.__array_finalize__(self, obj) + return + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + + +class TestMaskedMatrix: + def test_matrix_indexing(self): + # Tests conversions and indexing + x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) + x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) + x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) + x4 = masked_array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + # tests of indexing + assert_(type(x2[1, 0]) is type(x1[1, 0])) + assert_(x1[1, 0] == x2[1, 0]) + assert_(x2[1, 1] is masked) + assert_equal(x1[0, 2], x2[0, 2]) + assert_equal(x1[0, 1:], x2[0, 1:]) + assert_equal(x1[:, 2], x2[:, 2]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[0, 2] = 9 + x2[0, 2] = 9 + assert_equal(x1, x2) + x1[0, 1:] = 99 + x2[0, 1:] = 99 + assert_equal(x1, x2) + x2[0, 1] = masked + assert_equal(x1, x2) + x2[0, 1:] = masked + assert_equal(x1, x2) + x2[0, :] = x1[0, :] + x2[0, 1] = masked + assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) + x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) + assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) + x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) + assert_(allequal(x4[1], masked_array([1, 2, 3]))) + x1 = np.matrix(np.arange(5) * 1.0) + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), + x2.mask)) + assert_equal(3.0, x2.fill_value) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.matrix)) + + def test_count_mean_with_matrix(self): + m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) + + assert_equal(m.count(axis=0).shape, (1, 2)) + assert_equal(m.count(axis=1).shape, (2, 1)) + + # Make sure broadcasting inside mean and var work + assert_equal(m.mean(axis=0), [[2., 3.]]) + assert_equal(m.mean(axis=1), [[1.5], [3.5]]) + + def test_flat(self): + # Test that flat can return items even for matrices [#4585, #4615] + # test simple access + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + assert_equal(test.flat[1], 2) + assert_equal(test.flat[2], masked) + assert_(np.all(test.flat[0:2] == test[0, 0:2])) + # Test flat on masked_matrices + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # Test setting + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) + testflat[0] = 9 + # test that matrices keep the correct shape (#4615) + a = masked_array(np.matrix(np.eye(2)), mask=0) + b = a.flat + b01 = b[:2] + assert_equal(b01.data, np.array([[1., 0.]])) + assert_equal(b01.mask, np.array([[False, False]])) + + def test_allany_onmatrices(self): + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + X = np.matrix(x) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool_) + mX = masked_array(X, mask=m) + mXbig = (mX > 0.5) + mXsmall = (mX < 0.5) + + assert_(not mXbig.all()) + assert_(mXbig.any()) + assert_equal(mXbig.all(0), np.matrix([False, False, True])) + assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), np.matrix([False, False, True])) + assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) + + assert_(not mXsmall.all()) + assert_(mXsmall.any()) + assert_equal(mXsmall.all(0), np.matrix([True, True, False])) + assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), np.matrix([True, True, False])) + assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) + + def test_compressed(self): + a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + assert_(isinstance(b, np.matrix)) + a[0, 0] = masked + b = a.compressed() + assert_equal(b, [[2, 3, 4]]) + + def test_ravel(self): + a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel.shape, (1, 5)) + assert_equal(aravel._mask.shape, a.shape) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = masked_array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + assert_(isinstance(test, np.matrix)) + assert_(not isinstance(test, MaskedArray)) + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup(self): + x = np.arange(5, dtype='float') + mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, np.matrix)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), MMatrix)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a MMatrix + assert_(isinstance(add(mx, mx), MMatrix)) + assert_(isinstance(add(mx, x), MMatrix)) + # Result should work + assert_equal(add(mx, x), mx+x) + assert_(isinstance(add(mx, mx)._data, np.matrix)) + with assert_warns(DeprecationWarning): + assert_(isinstance(add.outer(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, x), MMatrix)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), MMatrix)) + assert_(isinstance(divide(mx, x), MMatrix)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_matrix_builder(self): + assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) + + def test_matrix(self): + # Test consistency with unmasked version. If we ever deprecate + # matrix, this test should either still pass, or both actual and + # expected should fail to be build. + actual = mr_['r', 1, 2, 3] + expected = np.ma.array(np.r_['r', 1, 2, 3]) + assert_array_equal(actual, expected) + + # outer type is masked array, inner type is matrix + assert_equal(type(actual), type(expected)) + assert_equal(type(actual.data), type(expected.data)) diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..abfa0183f24778c4cec37674b1210c148f48c04b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py @@ -0,0 +1,93 @@ +""" Test functions for linalg module using the matrix class.""" +import numpy as np + +from numpy.linalg.tests.test_linalg import ( + LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase, + _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base, + SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases, + PinvCases, DetCases, LstsqCases) + + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("0x0_matrix", + np.empty((0, 0), dtype=np.double).view(np.matrix), + np.empty((0, 1), dtype=np.double).view(np.matrix), + tags={'size-0'}), + LinalgCase("matrix_b_only", + np.array([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), + LinalgCase("matrix_a_and_b", + np.matrix([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hmatrix_a_and_b", + np.matrix([[1., 2.], [2., 1.]]), + None), +]) +# No need to make generalized or strided cases for matrices. + + +class MatrixTestCase(LinalgTestCase): + TEST_CASES = CASES + + +class TestSolveMatrix(SolveCases, MatrixTestCase): + pass + + +class TestInvMatrix(InvCases, MatrixTestCase): + pass + + +class TestEigvalsMatrix(EigvalsCases, MatrixTestCase): + pass + + +class TestEigMatrix(EigCases, MatrixTestCase): + pass + + +class TestSVDMatrix(SVDCases, MatrixTestCase): + pass + + +class TestCondMatrix(CondCases, MatrixTestCase): + pass + + +class TestPinvMatrix(PinvCases, MatrixTestCase): + pass + + +class TestDetMatrix(DetCases, MatrixTestCase): + pass + + +class TestLstsqMatrix(LstsqCases, MatrixTestCase): + pass + + +class _TestNorm2DMatrix(_TestNorm2D): + array = np.matrix + + +class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase): + pass + + +class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase): + pass + + +class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base): + pass + + +class TestQRMatrix(_TestQR): + array = np.matrix diff --git a/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_multiarray.py b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_multiarray.py new file mode 100644 index 0000000000000000000000000000000000000000..71198be5e56370462d5a4d3bb4b5a86b7202a052 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/matrixlib/tests/test_multiarray.py @@ -0,0 +1,16 @@ +import numpy as np +from numpy.testing import assert_, assert_equal, assert_array_equal + +class TestView: + def test_type(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x.view(np.matrix), np.matrix)) + + def test_keywords(self): + x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype='>> from numpy.polynomial import Chebyshev + >>> c = Chebyshev.fit(xdata, ydata, deg=1) + +is preferred over the `chebyshev.chebfit` function from the +``np.polynomial.chebyshev`` module:: + + >>> from numpy.polynomial.chebyshev import chebfit + >>> c = chebfit(xdata, ydata, deg=1) + +See :doc:`routines.polynomials.classes` for more details. + +Convenience Classes +=================== + +The following lists the various constants and methods common to all of +the classes representing the various kinds of polynomials. In the following, +the term ``Poly`` represents any one of the convenience classes (e.g. +`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.) +while the lowercase ``p`` represents an **instance** of a polynomial class. + +Constants +--------- + +- ``Poly.domain`` -- Default domain +- ``Poly.window`` -- Default window +- ``Poly.basis_name`` -- String used to represent the basis +- ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed +- ``Poly.nickname`` -- String used in printing + +Creation +-------- + +Methods for creating polynomial instances. + +- ``Poly.basis(degree)`` -- Basis polynomial of given degree +- ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x`` +- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients + determined by the least-squares fit to the data ``x``, ``y`` +- ``Poly.fromroots(roots)`` -- ``p`` with specified roots +- ``p.copy()`` -- Create a copy of ``p`` + +Conversion +---------- + +Methods for converting a polynomial instance of one kind to another. + +- ``p.cast(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` +- ``p.convert(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` or map + between ``domain`` and ``window`` + +Calculus +-------- +- ``p.deriv()`` -- Take the derivative of ``p`` +- ``p.integ()`` -- Integrate ``p`` + +Validation +---------- +- ``Poly.has_samecoef(p1, p2)`` -- Check if coefficients match +- ``Poly.has_samedomain(p1, p2)`` -- Check if domains match +- ``Poly.has_sametype(p1, p2)`` -- Check if types match +- ``Poly.has_samewindow(p1, p2)`` -- Check if windows match + +Misc +---- +- ``p.linspace()`` -- Return ``x, p(x)`` at equally-spaced points in ``domain`` +- ``p.mapparms()`` -- Return the parameters for the linear mapping between + ``domain`` and ``window``. +- ``p.roots()`` -- Return the roots of `p`. +- ``p.trim()`` -- Remove trailing coefficients. +- ``p.cutdeg(degree)`` -- Truncate p to given degree +- ``p.truncate(size)`` -- Truncate p to given size + +""" +from .polynomial import Polynomial +from .chebyshev import Chebyshev +from .legendre import Legendre +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre + +__all__ = [ + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] + + +def set_default_printstyle(style): + """ + Set the default format for the string representation of polynomials. + + Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii' + or 'unicode'. + + Parameters + ---------- + style : str + Format string for default printing style. Must be either 'ascii' or + 'unicode'. + + Notes + ----- + The default format depends on the platform: 'unicode' is used on + Unix-based systems and 'ascii' on Windows. This determination is based on + default font support for the unicode superscript and subscript ranges. + + Examples + -------- + >>> p = np.polynomial.Polynomial([1, 2, 3]) + >>> c = np.polynomial.Chebyshev([1, 2, 3]) + >>> np.polynomial.set_default_printstyle('unicode') + >>> print(p) + 1.0 + 2.0·x¹ + 3.0·x² + >>> print(c) + 1.0 + 2.0·T₁(x) + 3.0·T₂(x) + >>> np.polynomial.set_default_printstyle('ascii') + >>> print(p) + 1.0 + 2.0 x**1 + 3.0 x**2 + >>> print(c) + 1.0 + 2.0 T_1(x) + 3.0 T_2(x) + >>> # Formatting supercedes all class/package-level defaults + >>> print(f"{p:unicode}") + 1.0 + 2.0·x¹ + 3.0·x² + """ + if style not in ('unicode', 'ascii'): + raise ValueError( + f"Unsupported format string '{style}'. Valid options are 'ascii' " + f"and 'unicode'" + ) + _use_unicode = True + if style == 'ascii': + _use_unicode = False + from ._polybase import ABCPolyBase + ABCPolyBase._use_unicode = _use_unicode + + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__init__.pyi b/MLPY/Lib/site-packages/numpy/polynomial/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..aa4c01696cca12b681b4635511321bb68f872a4b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/__init__.pyi @@ -0,0 +1,20 @@ +from typing import List + +from numpy.polynomial import ( + chebyshev as chebyshev, + hermite as hermite, + hermite_e as hermite_e, + laguerre as laguerre, + legendre as legendre, + polynomial as polynomial, +) +from numpy.polynomial.chebyshev import Chebyshev as Chebyshev +from numpy.polynomial.hermite import Hermite as Hermite +from numpy.polynomial.hermite_e import HermiteE as HermiteE +from numpy.polynomial.laguerre import Laguerre as Laguerre +from numpy.polynomial.legendre import Legendre as Legendre +from numpy.polynomial.polynomial import Polynomial as Polynomial + +__all__: List[str] + +def set_default_printstyle(style): ... diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38129ad5a1b54d7b6a9a96b3258589b45cb190bc Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/_polybase.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/_polybase.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a690235140506c8329534f904bd8847cb180dd5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/_polybase.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d0ff3737e2c14ab8b92c3a37cf9ad7f3b5c35b3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/chebyshev.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/hermite.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/hermite.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d82dcdf75521cc4926e2feb050b314bdfbe0422 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/hermite.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a6419f1ec178fa37616e339aaca95aa57771337 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/hermite_e.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..272cc61490d73a248cd79f915bde509ef6ea50d6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/laguerre.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/legendre.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/legendre.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39625cc286a95219b0beaed99cc09a169205d9a0 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/legendre.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..871fb6f2e1013e4125c7fd3ec53c77e590e50b01 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/polynomial.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/polyutils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/polyutils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4127ca711ab9902fe1181f61e9305c4fe6fe39bd Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/polyutils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abe5c3a4aae3f7e71be9b7a5b76d0f6efd433ffe Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/_polybase.py b/MLPY/Lib/site-packages/numpy/polynomial/_polybase.py new file mode 100644 index 0000000000000000000000000000000000000000..9bc0c81bca6db9134bc5b0c968765ca26581fc0b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/_polybase.py @@ -0,0 +1,1141 @@ +""" +Abstract base class for the various polynomial Classes. + +The ABCPolyBase class provides the methods needed to implement the common API +for the various polynomial classes. It operates as a mixin, but uses the +abc module from the stdlib, hence it is only available for Python >= 2.6. + +""" +import os +import abc +import numbers + +import numpy as np +from . import polyutils as pu + +__all__ = ['ABCPolyBase'] + +class ABCPolyBase(abc.ABC): + """An abstract base class for immutable series classes. + + ABCPolyBase provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the + methods listed below. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + coef : array_like + Series coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where + ``P_i`` is the basis polynomials of degree ``i``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is the derived class domain. + window : (2,) array_like, optional + Window, see domain for its use. The default value is the + derived class window. + + Attributes + ---------- + coef : (N,) ndarray + Series coefficients in order of increasing degree. + domain : (2,) ndarray + Domain that is mapped to window. + window : (2,) ndarray + Window that domain is mapped to. + + Class Attributes + ---------------- + maxpower : int + Maximum power allowed, i.e., the largest number ``n`` such that + ``p(x)**n`` is allowed. This is to limit runaway polynomial size. + domain : (2,) ndarray + Default domain of the class. + window : (2,) ndarray + Default window of the class. + + """ + + # Not hashable + __hash__ = None + + # Opt out of numpy ufuncs and Python ops with ndarray subclasses. + __array_ufunc__ = None + + # Limit runaway size. T_n^m has degree n*m + maxpower = 100 + + # Unicode character mappings for improved __str__ + _superscript_mapping = str.maketrans({ + "0": "⁰", + "1": "¹", + "2": "²", + "3": "³", + "4": "⁴", + "5": "⁵", + "6": "⁶", + "7": "⁷", + "8": "⁸", + "9": "⁹" + }) + _subscript_mapping = str.maketrans({ + "0": "₀", + "1": "₁", + "2": "₂", + "3": "₃", + "4": "₄", + "5": "₅", + "6": "₆", + "7": "₇", + "8": "₈", + "9": "₉" + }) + # Some fonts don't support full unicode character ranges necessary for + # the full set of superscripts and subscripts, including common/default + # fonts in Windows shells/terminals. Therefore, default to ascii-only + # printing on windows. + _use_unicode = not os.name == 'nt' + + @property + @abc.abstractmethod + def domain(self): + pass + + @property + @abc.abstractmethod + def window(self): + pass + + @property + @abc.abstractmethod + def basis_name(self): + pass + + @staticmethod + @abc.abstractmethod + def _add(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _sub(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _mul(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _div(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _pow(c, pow, maxpower=None): + pass + + @staticmethod + @abc.abstractmethod + def _val(x, c): + pass + + @staticmethod + @abc.abstractmethod + def _int(c, m, k, lbnd, scl): + pass + + @staticmethod + @abc.abstractmethod + def _der(c, m, scl): + pass + + @staticmethod + @abc.abstractmethod + def _fit(x, y, deg, rcond, full): + pass + + @staticmethod + @abc.abstractmethod + def _line(off, scl): + pass + + @staticmethod + @abc.abstractmethod + def _roots(c): + pass + + @staticmethod + @abc.abstractmethod + def _fromroots(r): + pass + + def has_samecoef(self, other): + """Check if coefficients match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``coef`` attribute. + + Returns + ------- + bool : boolean + True if the coefficients are the same, False otherwise. + + """ + if len(self.coef) != len(other.coef): + return False + elif not np.all(self.coef == other.coef): + return False + else: + return True + + def has_samedomain(self, other): + """Check if domains match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``domain`` attribute. + + Returns + ------- + bool : boolean + True if the domains are the same, False otherwise. + + """ + return np.all(self.domain == other.domain) + + def has_samewindow(self, other): + """Check if windows match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``window`` attribute. + + Returns + ------- + bool : boolean + True if the windows are the same, False otherwise. + + """ + return np.all(self.window == other.window) + + def has_sametype(self, other): + """Check if types match. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + other : object + Class instance. + + Returns + ------- + bool : boolean + True if other is same class as self + + """ + return isinstance(other, self.__class__) + + def _get_coefficients(self, other): + """Interpret other as polynomial coefficients. + + The `other` argument is checked to see if it is of the same + class as self with identical domain and window. If so, + return its coefficients, otherwise return `other`. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + other : anything + Object to be checked. + + Returns + ------- + coef + The coefficients of`other` if it is a compatible instance, + of ABCPolyBase, otherwise `other`. + + Raises + ------ + TypeError + When `other` is an incompatible instance of ABCPolyBase. + + """ + if isinstance(other, ABCPolyBase): + if not isinstance(other, self.__class__): + raise TypeError("Polynomial types differ") + elif not np.all(self.domain == other.domain): + raise TypeError("Domains differ") + elif not np.all(self.window == other.window): + raise TypeError("Windows differ") + return other.coef + return other + + def __init__(self, coef, domain=None, window=None): + [coef] = pu.as_series([coef], trim=False) + self.coef = coef + + if domain is not None: + [domain] = pu.as_series([domain], trim=False) + if len(domain) != 2: + raise ValueError("Domain has wrong number of elements.") + self.domain = domain + + if window is not None: + [window] = pu.as_series([window], trim=False) + if len(window) != 2: + raise ValueError("Window has wrong number of elements.") + self.window = window + + def __repr__(self): + coef = repr(self.coef)[6:-1] + domain = repr(self.domain)[6:-1] + window = repr(self.window)[6:-1] + name = self.__class__.__name__ + return f"{name}({coef}, domain={domain}, window={window})" + + def __format__(self, fmt_str): + if fmt_str == '': + return self.__str__() + if fmt_str not in ('ascii', 'unicode'): + raise ValueError( + f"Unsupported format string '{fmt_str}' passed to " + f"{self.__class__}.__format__. Valid options are " + f"'ascii' and 'unicode'" + ) + if fmt_str == 'ascii': + return self._generate_string(self._str_term_ascii) + return self._generate_string(self._str_term_unicode) + + def __str__(self): + if self._use_unicode: + return self._generate_string(self._str_term_unicode) + return self._generate_string(self._str_term_ascii) + + def _generate_string(self, term_method): + """ + Generate the full string representation of the polynomial, using + ``term_method`` to generate each polynomial term. + """ + # Get configuration for line breaks + linewidth = np.get_printoptions().get('linewidth', 75) + if linewidth < 1: + linewidth = 1 + out = f"{self.coef[0]}" + for i, coef in enumerate(self.coef[1:]): + out += " " + power = str(i + 1) + # Polynomial coefficient + # The coefficient array can be an object array with elements that + # will raise a TypeError with >= 0 (e.g. strings or Python + # complex). In this case, represent the coeficient as-is. + try: + if coef >= 0: + next_term = f"+ {coef}" + else: + next_term = f"- {-coef}" + except TypeError: + next_term = f"+ {coef}" + # Polynomial term + next_term += term_method(power, "x") + # Length of the current line with next term added + line_len = len(out.split('\n')[-1]) + len(next_term) + # If not the last term in the polynomial, it will be two + # characters longer due to the +/- with the next term + if i < len(self.coef[1:]) - 1: + line_len += 2 + # Handle linebreaking + if line_len >= linewidth: + next_term = next_term.replace(" ", "\n", 1) + out += next_term + return out + + @classmethod + def _str_term_unicode(cls, i, arg_str): + """ + String representation of single polynomial term using unicode + characters for superscripts and subscripts. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_unicode(cls, i, arg_str)" + ) + return (f"·{cls.basis_name}{i.translate(cls._subscript_mapping)}" + f"({arg_str})") + + @classmethod + def _str_term_ascii(cls, i, arg_str): + """ + String representation of a single polynomial term using ** and _ to + represent superscripts and subscripts, respectively. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_ascii(cls, i, arg_str)" + ) + return f" {cls.basis_name}_{i}({arg_str})" + + @classmethod + def _repr_latex_term(cls, i, arg_str, needs_parens): + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis name, or override " + "_repr_latex_term(i, arg_str, needs_parens)") + # since we always add parens, we don't care if the expression needs them + return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})" + + @staticmethod + def _repr_latex_scalar(x): + # TODO: we're stuck with disabling math formatting until we handle + # exponents in this function + return r'\text{{{}}}'.format(x) + + def _repr_latex_(self): + # get the scaled argument string to the basis functions + off, scale = self.mapparms() + if off == 0 and scale == 1: + term = 'x' + needs_parens = False + elif scale == 1: + term = f"{self._repr_latex_scalar(off)} + x" + needs_parens = True + elif off == 0: + term = f"{self._repr_latex_scalar(scale)}x" + needs_parens = True + else: + term = ( + f"{self._repr_latex_scalar(off)} + " + f"{self._repr_latex_scalar(scale)}x" + ) + needs_parens = True + + mute = r"\color{{LightGray}}{{{}}}".format + + parts = [] + for i, c in enumerate(self.coef): + # prevent duplication of + and - signs + if i == 0: + coef_str = f"{self._repr_latex_scalar(c)}" + elif not isinstance(c, numbers.Real): + coef_str = f" + ({self._repr_latex_scalar(c)})" + elif not np.signbit(c): + coef_str = f" + {self._repr_latex_scalar(c)}" + else: + coef_str = f" - {self._repr_latex_scalar(-c)}" + + # produce the string for the term + term_str = self._repr_latex_term(i, term, needs_parens) + if term_str == '1': + part = coef_str + else: + part = rf"{coef_str}\,{term_str}" + + if c == 0: + part = mute(part) + + parts.append(part) + + if parts: + body = ''.join(parts) + else: + # in case somehow there are no coefficients at all + body = '0' + + return rf"$x \mapsto {body}$" + + + + # Pickle and copy + + def __getstate__(self): + ret = self.__dict__.copy() + ret['coef'] = self.coef.copy() + ret['domain'] = self.domain.copy() + ret['window'] = self.window.copy() + return ret + + def __setstate__(self, dict): + self.__dict__ = dict + + # Call + + def __call__(self, arg): + off, scl = pu.mapparms(self.domain, self.window) + arg = off + scl*arg + return self._val(arg, self.coef) + + def __iter__(self): + return iter(self.coef) + + def __len__(self): + return len(self.coef) + + # Numeric properties. + + def __neg__(self): + return self.__class__(-self.coef, self.domain, self.window) + + def __pos__(self): + return self + + def __add__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._add(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __sub__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._sub(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __mul__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._mul(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __truediv__(self, other): + # there is no true divide if the rhs is not a Number, although it + # could return the first n elements of an infinite series. + # It is hard to see where n would come from, though. + if not isinstance(other, numbers.Number) or isinstance(other, bool): + raise TypeError( + f"unsupported types for true division: " + f"'{type(self)}', '{type(other)}'" + ) + return self.__floordiv__(other) + + def __floordiv__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __mod__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __divmod__(self, other): + othercoef = self._get_coefficients(other) + try: + quo, rem = self._div(self.coef, othercoef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window) + rem = self.__class__(rem, self.domain, self.window) + return quo, rem + + def __pow__(self, other): + coef = self._pow(self.coef, other, maxpower=self.maxpower) + res = self.__class__(coef, self.domain, self.window) + return res + + def __radd__(self, other): + try: + coef = self._add(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rsub__(self, other): + try: + coef = self._sub(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rmul__(self, other): + try: + coef = self._mul(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window) + + def __rdiv__(self, other): + # set to __floordiv__ /. + return self.__rfloordiv__(other) + + def __rtruediv__(self, other): + # An instance of ABCPolyBase is not considered a + # Number. + return NotImplemented + + def __rfloordiv__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __rmod__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __rdivmod__(self, other): + try: + quo, rem = self._div(other, self.coef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window) + rem = self.__class__(rem, self.domain, self.window) + return quo, rem + + def __eq__(self, other): + res = (isinstance(other, self.__class__) and + np.all(self.domain == other.domain) and + np.all(self.window == other.window) and + (self.coef.shape == other.coef.shape) and + np.all(self.coef == other.coef)) + return res + + def __ne__(self, other): + return not self.__eq__(other) + + # + # Extra methods. + # + + def copy(self): + """Return a copy. + + Returns + ------- + new_series : series + Copy of self. + + """ + return self.__class__(self.coef, self.domain, self.window) + + def degree(self): + """The degree of the series. + + .. versionadded:: 1.5.0 + + Returns + ------- + degree : int + Degree of the series, one less than the number of coefficients. + + """ + return len(self) - 1 + + def cutdeg(self, deg): + """Truncate series to the given degree. + + Reduce the degree of the series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns + ------- + new_series : series + New instance of series with reduced degree. + + """ + return self.truncate(deg + 1) + + def trim(self, tol=0): + """Remove trailing coefficients + + Remove trailing coefficients until a coefficient is reached whose + absolute value greater than `tol` or the beginning of the series is + reached. If all the coefficients would be removed the series is set + to ``[0]``. A new series instance is returned with the new + coefficients. The current instance remains unchanged. + + Parameters + ---------- + tol : non-negative number. + All trailing coefficients less than `tol` will be removed. + + Returns + ------- + new_series : series + New instance of series with trimmed coefficients. + + """ + coef = pu.trimcoef(self.coef, tol) + return self.__class__(coef, self.domain, self.window) + + def truncate(self, size): + """Truncate series to length `size`. + + Reduce the series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. + + Parameters + ---------- + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. + + Returns + ------- + new_series : series + New instance of series with truncated coefficients. + + """ + isize = int(size) + if isize != size or isize < 1: + raise ValueError("size must be a positive integer") + if isize >= len(self.coef): + coef = self.coef + else: + coef = self.coef[:isize] + return self.__class__(coef, self.domain, self.window) + + def convert(self, domain=None, kind=None, window=None): + """Convert series to a different kind and/or domain and/or window. + + Parameters + ---------- + domain : array_like, optional + The domain of the converted series. If the value is None, + the default domain of `kind` is used. + kind : class, optional + The polynomial series type class to which the current instance + should be converted. If kind is None, then the class of the + current instance is used. + window : array_like, optional + The window of the converted series. If the value is None, + the default window of `kind` is used. + + Returns + ------- + new_series : series + The returned class can be of different type than the current + instance and/or have a different domain and/or different + window. + + Notes + ----- + Conversion between domains and class types can result in + numerically ill defined series. + + """ + if kind is None: + kind = self.__class__ + if domain is None: + domain = kind.domain + if window is None: + window = kind.window + return self(kind.identity(domain, window=window)) + + def mapparms(self): + """Return the mapping parameters. + + The returned values define a linear map ``off + scl*x`` that is + applied to the input arguments before the series is evaluated. The + map depends on the ``domain`` and ``window``; if the current + ``domain`` is equal to the ``window`` the resulting map is the + identity. If the coefficients of the series instance are to be + used by themselves outside this class, then the linear function + must be substituted for the ``x`` in the standard representation of + the base polynomials. + + Returns + ------- + off, scl : float or complex + The mapping function is defined by ``off + scl*x``. + + Notes + ----- + If the current domain is the interval ``[l1, r1]`` and the window + is ``[l2, r2]``, then the linear mapping function ``L`` is + defined by the equations:: + + L(l1) = l2 + L(r1) = r2 + + """ + return pu.mapparms(self.domain, self.window) + + def integ(self, m=1, k=[], lbnd=None): + """Integrate. + + Return a series instance that is the definite integral of the + current series. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + k : array_like + Integration constants. The first constant is applied to the + first integration, the second to the second, and so on. The + list of values must less than or equal to `m` in length and any + missing values are set to zero. + lbnd : Scalar + The lower bound of the definite integral. + + Returns + ------- + new_series : series + A new series representing the integral. The domain is the same + as the domain of the integrated series. + + """ + off, scl = self.mapparms() + if lbnd is None: + lbnd = 0 + else: + lbnd = off + scl*lbnd + coef = self._int(self.coef, m, k, lbnd, 1./scl) + return self.__class__(coef, self.domain, self.window) + + def deriv(self, m=1): + """Differentiate. + + Return a series instance of that is the derivative of the current + series. + + Parameters + ---------- + m : non-negative int + Find the derivative of order `m`. + + Returns + ------- + new_series : series + A new series representing the derivative. The domain is the same + as the domain of the differentiated series. + + """ + off, scl = self.mapparms() + coef = self._der(self.coef, m, scl) + return self.__class__(coef, self.domain, self.window) + + def roots(self): + """Return the roots of the series polynomial. + + Compute the roots for the series. Note that the accuracy of the + roots decrease the further outside the domain they lie. + + Returns + ------- + roots : ndarray + Array containing the roots of the series. + + """ + roots = self._roots(self.coef) + return pu.mapdomain(roots, self.window, self.domain) + + def linspace(self, n=100, domain=None): + """Return x, y values at equally spaced points in domain. + + Returns the x, y values at `n` linearly spaced points across the + domain. Here y is the value of the polynomial at the points x. By + default the domain is the same as that of the series instance. + This method is intended mostly as a plotting aid. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + n : int, optional + Number of point pairs to return. The default value is 100. + domain : {None, array_like}, optional + If not None, the specified domain is used instead of that of + the calling instance. It should be of the form ``[beg,end]``. + The default is None which case the class domain is used. + + Returns + ------- + x, y : ndarray + x is equal to linspace(self.domain[0], self.domain[1], n) and + y is the series evaluated at element of x. + + """ + if domain is None: + domain = self.domain + x = np.linspace(domain[0], domain[1], n) + y = self(x) + return x, y + + @classmethod + def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, + window=None): + """Least squares fit to data. + + Return a series instance that is the least squares fit to the data + `y` sampled at `x`. The domain of the returned instance can be + specified and this will often result in a superior fit with less + chance of ill conditioning. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) + y-coordinates of the M sample points ``(x[i], y[i])``. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + domain : {None, [beg, end], []}, optional + Domain to use for the returned series. If ``None``, + then a minimal domain that covers the points `x` is chosen. If + ``[]`` the class domain is used. The default value was the + class domain in NumPy 1.4 and ``None`` in later versions. + The ``[]`` option was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is len(x)*eps, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the + weights are chosen so that the errors of the products + ``w[i]*y[i]`` all have the same variance. The default value is + None. + + .. versionadded:: 1.5.0 + window : {[beg, end]}, optional + Window to use for the returned series. The default + value is the default class domain + + .. versionadded:: 1.6.0 + + Returns + ------- + new_series : series + A series that represents the least squares fit to the data and + has the domain and window specified in the call. If the + coefficients for the unscaled and unshifted basis polynomials are + of interest, do ``new_series.convert().coef``. + + [resid, rank, sv, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + """ + if domain is None: + domain = pu.getdomain(x) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + xnew = pu.mapdomain(x, domain, window) + res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full: + [coef, status] = res + return cls(coef, domain=domain, window=window), status + else: + coef = res + return cls(coef, domain=domain, window=window) + + @classmethod + def fromroots(cls, roots, domain=[], window=None): + """Return series instance that has the specified roots. + + Returns a series representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {[], None, array_like}, optional + Domain for the resulting series. If None the domain is the + interval from the smallest root to the largest. If [] the + domain is the class domain. The default is []. + window : {None, array_like}, optional + Window for the returned series. If None the class window is + used. The default is None. + + Returns + ------- + new_series : series + Series with the specified roots. + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None: + domain = pu.getdomain(roots) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl*roots + coef = cls._fromroots(rnew) / scl**deg + return cls(coef, domain=domain, window=window) + + @classmethod + def identity(cls, domain=None, window=None): + """Identity function. + + If ``p`` is the returned series, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + Series of representing the identity. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + off, scl = pu.mapparms(window, domain) + coef = cls._line(off, scl) + return cls(coef, domain, window) + + @classmethod + def basis(cls, deg, domain=None, window=None): + """Series basis polynomial of degree `deg`. + + Returns the series representing the basis polynomial of degree `deg`. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + deg : int + Degree of the basis polynomial for the series. Must be >= 0. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series with the coefficient of the `deg` term set to one and + all others zero. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + ideg = int(deg) + + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return cls([0]*ideg + [1], domain, window) + + @classmethod + def cast(cls, series, domain=None, window=None): + """Convert series to series of this class. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + series : series + The series instance to be converted. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series of the same kind as the calling class and equal to + `series` when evaluated. + + See Also + -------- + convert : similar instance method + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + return series.convert(domain, cls, window) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/_polybase.pyi b/MLPY/Lib/site-packages/numpy/polynomial/_polybase.pyi new file mode 100644 index 0000000000000000000000000000000000000000..136f2391303cef06ab4272ca2370918be0b9ae88 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/_polybase.pyi @@ -0,0 +1,69 @@ +import abc +from typing import Any, List, ClassVar + +__all__: List[str] + +class ABCPolyBase(abc.ABC): + __hash__: ClassVar[None] # type: ignore[assignment] + __array_ufunc__: ClassVar[None] + maxpower: ClassVar[int] + coef: Any + @property + @abc.abstractmethod + def domain(self): ... + @property + @abc.abstractmethod + def window(self): ... + @property + @abc.abstractmethod + def basis_name(self): ... + def has_samecoef(self, other): ... + def has_samedomain(self, other): ... + def has_samewindow(self, other): ... + def has_sametype(self, other): ... + def __init__(self, coef, domain=..., window=...): ... + def __format__(self, fmt_str): ... + def __call__(self, arg): ... + def __iter__(self): ... + def __len__(self): ... + def __neg__(self): ... + def __pos__(self): ... + def __add__(self, other): ... + def __sub__(self, other): ... + def __mul__(self, other): ... + def __truediv__(self, other): ... + def __floordiv__(self, other): ... + def __mod__(self, other): ... + def __divmod__(self, other): ... + def __pow__(self, other): ... + def __radd__(self, other): ... + def __rsub__(self, other): ... + def __rmul__(self, other): ... + def __rdiv__(self, other): ... + def __rtruediv__(self, other): ... + def __rfloordiv__(self, other): ... + def __rmod__(self, other): ... + def __rdivmod__(self, other): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def copy(self): ... + def degree(self): ... + def cutdeg(self, deg): ... + def trim(self, tol=...): ... + def truncate(self, size): ... + def convert(self, domain=..., kind=..., window=...): ... + def mapparms(self): ... + def integ(self, m=..., k = ..., lbnd=...): ... + def deriv(self, m=...): ... + def roots(self): ... + def linspace(self, n=..., domain=...): ... + @classmethod + def fit(cls, x, y, deg, domain=..., rcond=..., full=..., w=..., window=...): ... + @classmethod + def fromroots(cls, roots, domain = ..., window=...): ... + @classmethod + def identity(cls, domain=..., window=...): ... + @classmethod + def basis(cls, deg, domain=..., window=...): ... + @classmethod + def cast(cls, series, domain=..., window=...): ... diff --git a/MLPY/Lib/site-packages/numpy/polynomial/chebyshev.py b/MLPY/Lib/site-packages/numpy/polynomial/chebyshev.py new file mode 100644 index 0000000000000000000000000000000000000000..498234bbd486c8b8a18796e6942ac256b6e0ddd1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/chebyshev.py @@ -0,0 +1,2074 @@ +""" +==================================================== +Chebyshev Series (:mod:`numpy.polynomial.chebyshev`) +==================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Chebyshev series, including a `Chebyshev` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- + +.. autosummary:: + :toctree: generated/ + + Chebyshev + + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + chebdomain + chebzero + chebone + chebx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + chebadd + chebsub + chebmulx + chebmul + chebdiv + chebpow + chebval + chebval2d + chebval3d + chebgrid2d + chebgrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + chebder + chebint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + chebfromroots + chebroots + chebvander + chebvander2d + chebvander3d + chebgauss + chebweight + chebcompanion + chebfit + chebpts1 + chebpts2 + chebtrim + chebline + cheb2poly + poly2cheb + chebinterpolate + +See also +-------- +`numpy.polynomial` + +Notes +----- +The implementations of multiplication, division, integration, and +differentiation use the algebraic identities [1]_: + +.. math :: + T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ + z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. + +where + +.. math :: x = \\frac{z + z^{-1}}{2}. + +These identities allow a Chebyshev series to be expressed as a finite, +symmetric Laurent series. In this module, this sort of Laurent series +is referred to as a "z-series." + +References +---------- +.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev + Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 + (https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', + 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', + 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', + 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', + 'chebgauss', 'chebweight', 'chebinterpolate'] + +chebtrim = pu.trimcoef + +# +# A collection of functions for manipulating z-series. These are private +# functions and do minimal error checking. +# + +def _cseries_to_zseries(c): + """Covert Chebyshev series to z-series. + + Covert a Chebyshev series to the equivalent z-series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high + + Returns + ------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + """ + n = c.size + zs = np.zeros(2*n-1, dtype=c.dtype) + zs[n-1:] = c/2 + return zs + zs[::-1] + + +def _zseries_to_cseries(zs): + """Covert z-series to a Chebyshev series. + + Covert a z series to the equivalent Chebyshev series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + Returns + ------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high. + + """ + n = (zs.size + 1)//2 + c = zs[n-1:].copy() + c[1:n] *= 2 + return c + + +def _zseries_mul(z1, z2): + """Multiply two z-series. + + Multiply two z-series to produce a z-series. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D but this is not checked. + + Returns + ------- + product : 1-D ndarray + The product z-series. + + Notes + ----- + This is simply convolution. If symmetric/anti-symmetric z-series are + denoted by S/A then the following rules apply: + + S*S, A*A -> S + S*A, A*S -> A + + """ + return np.convolve(z1, z2) + + +def _zseries_div(z1, z2): + """Divide the first z-series by the second. + + Divide `z1` by `z2` and return the quotient and remainder as z-series. + Warning: this implementation only applies when both z1 and z2 have the + same symmetry, which is sufficient for present purposes. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D and have the same symmetry, but this is not + checked. + + Returns + ------- + + (quotient, remainder) : 1-D ndarrays + Quotient and remainder as z-series. + + Notes + ----- + This is not the same as polynomial division on account of the desired form + of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A + then the following rules apply: + + S/S -> S,S + A/A -> S,A + + The restriction to types of the same symmetry could be fixed but seems like + unneeded generality. There is no natural form for the remainder in the case + where there is no symmetry. + + """ + z1 = z1.copy() + z2 = z2.copy() + lc1 = len(z1) + lc2 = len(z2) + if lc2 == 1: + z1 /= z2 + return z1, z1[:1]*0 + elif lc1 < lc2: + return z1[:1]*0, z1 + else: + dlen = lc1 - lc2 + scl = z2[0] + z2 /= scl + quo = np.empty(dlen + 1, dtype=z1.dtype) + i = 0 + j = dlen + while i < j: + r = z1[i] + quo[i] = z1[i] + quo[dlen - i] = r + tmp = r*z2 + z1[i:i+lc2] -= tmp + z1[j:j+lc2] -= tmp + i += 1 + j -= 1 + r = z1[i] + quo[i] = r + tmp = r*z2 + z1[i:i+lc2] -= tmp + quo /= scl + rem = z1[i+1:i-1+lc2].copy() + return quo, rem + + +def _zseries_der(zs): + """Differentiate a z-series. + + The derivative is with respect to x, not z. This is achieved using the + chain rule and the value of dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to differentiate. + + Returns + ------- + derivative : z-series + The derivative + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + multiplying the value of zs by two also so that the two cancels in the + division. + + """ + n = len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs *= np.arange(-n, n+1)*2 + d, r = _zseries_div(zs, ns) + return d + + +def _zseries_int(zs): + """Integrate a z-series. + + The integral is with respect to x, not z. This is achieved by a change + of variable using dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to integrate + + Returns + ------- + integral : z-series + The indefinite integral + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + dividing the resulting zs by two. + + """ + n = 1 + len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs = _zseries_mul(zs, ns) + div = np.arange(-n, n+1)*2 + zs[:n] /= div[:n] + zs[n+1:] /= div[n+1:] + zs[n] = 0 + return zs + +# +# Chebyshev series functions +# + + +def poly2cheb(pol): + """ + Convert a polynomial to a Chebyshev series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Chebyshev series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Chebyshev + series. + + See Also + -------- + cheb2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(range(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> c = p.convert(kind=P.Chebyshev) + >>> c + Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., 1.]) + >>> P.chebyshev.poly2cheb(range(4)) + array([1. , 3.25, 1. , 0.75]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = chebadd(chebmulx(res), pol[i]) + return res + + +def cheb2poly(c): + """ + Convert a Chebyshev series to a polynomial. + + Convert an array representing the coefficients of a Chebyshev series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Chebyshev series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2cheb + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Chebyshev(range(4)) + >>> c + Chebyshev([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.]) + >>> P.chebyshev.cheb2poly(range(4)) + array([-2., -8., 4., 12.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Chebyshev default domain. +chebdomain = np.array([-1, 1]) + +# Chebyshev coefficients representing zero. +chebzero = np.array([0]) + +# Chebyshev coefficients representing one. +chebone = np.array([1]) + +# Chebyshev coefficients representing the identity x. +chebx = np.array([0, 1]) + + +def chebline(off, scl): + """ + Chebyshev series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Chebyshev series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebline(3,2) + array([3, 2]) + >>> C.chebval(-3, C.chebline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def chebfromroots(roots): + """ + Generate a Chebyshev series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Chebyshev form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Chebyshev form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.25, 0. , 0.25]) + >>> j = complex(0,1) + >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([1.5+0.j, 0. +0.j, 0.5+0.j]) + + """ + return pu._fromroots(chebline, chebmul, roots) + + +def chebadd(c1, c2): + """ + Add one Chebyshev series to another. + + Returns the sum of two Chebyshev series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Chebyshev series of their sum. + + See Also + -------- + chebsub, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Chebyshev series + is a Chebyshev series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def chebsub(c1, c2): + """ + Subtract one Chebyshev series from another. + + Returns the difference of two Chebyshev series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their difference. + + See Also + -------- + chebadd, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Chebyshev + series is a Chebyshev series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebsub(c1,c2) + array([-2., 0., 2.]) + >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def chebmulx(c): + """Multiply a Chebyshev series by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebmulx([1,2,3]) + array([1. , 2.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + if len(c) > 1: + tmp = c[1:]/2 + prd[2:] = tmp + prd[0:-2] += tmp + return prd + + +def chebmul(c1, c2): + """ + Multiply one Chebyshev series by another. + + Returns the product of two Chebyshev series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their product. + + See Also + -------- + chebadd, chebsub, chebmulx, chebdiv, chebpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Chebyshev polynomial basis set. Thus, to express + the product as a C-series, it is typically necessary to "reproject" + the product onto said basis set, which typically produces + "unintuitive live" (but correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebmul(c1,c2) # multiplication requires "reprojection" + array([ 6.5, 12. , 12. , 4. , 1.5]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + prd = _zseries_mul(z1, z2) + ret = _zseries_to_cseries(prd) + return pu.trimseq(ret) + + +def chebdiv(c1, c2): + """ + Divide one Chebyshev series by another. + + Returns the quotient-with-remainder of two Chebyshev series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Chebyshev series coefficients representing the quotient and + remainder. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebpow + + Notes + ----- + In general, the (polynomial) division of one C-series by another + results in quotient and remainder terms that are not in the Chebyshev + polynomial basis set. Thus, to express these results as C-series, it + is typically necessary to "reproject" the results onto said basis + set, which typically produces "unintuitive" (but correct) results; + see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> C.chebdiv(c2,c1) # neither "intuitive" + (array([0., 2.]), array([-2., -4.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + # note: this is more efficient than `pu._div(chebmul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + quo, rem = _zseries_div(z1, z2) + quo = pu.trimseq(_zseries_to_cseries(quo)) + rem = pu.trimseq(_zseries_to_cseries(rem)) + return quo, rem + + +def chebpow(c, pow, maxpower=16): + """Raise a Chebyshev series to a power. + + Returns the Chebyshev series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Chebyshev series of power. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebdiv + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebpow([1, 2, 3, 4], 2) + array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ]) + + """ + # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it + # avoids converting between z and c series repeatedly + + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + zs = _cseries_to_zseries(c) + prd = zs + for i in range(2, power + 1): + prd = np.convolve(prd, zs) + return _zseries_to_cseries(prd) + + +def chebder(c, m=1, scl=1, axis=0): + """ + Differentiate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` + while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + + 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Chebyshev series of the derivative. + + See Also + -------- + chebint + + Notes + ----- + In general, the result of differentiating a C-series needs to be + "reprojected" onto the C-series basis set. Thus, typically, the + result of this function is "unintuitive," albeit correct; see Examples + section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3,4) + >>> C.chebder(c) + array([14., 12., 24.]) + >>> C.chebder(c,3) + array([96.]) + >>> C.chebder(c,scl=-1) + array([-14., -12., -24.]) + >>> C.chebder(c,2,-1) + array([12., 96.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j)*c[j] + c[j - 2] += (j*c[j])/(j - 2) + if n > 1: + der[1] = 4*c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] + represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + C-series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + chebder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a`- perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3) + >>> C.chebint(c) + array([ 0.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,3) + array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary + 0.00625 ]) + >>> C.chebint(c, k=3) + array([ 3.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,lbnd=-2) + array([ 8.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,scl=-2) + array([-1., 1., -1., -1.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/4 + for j in range(2, n): + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[j - 1] -= c[j]/(2*(j - 1)) + tmp[0] += k[i] - chebval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebval(x, c, tensor=True): + """ + Evaluate a Chebyshev series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + chebval2d, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + x2 = 2*x + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + c0 = c[-i] - c1 + c1 = tmp + c1*x2 + return c0 + c1*x + + +def chebval2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than 2 the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + chebval, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(chebval, c, x, y) + + +def chebgrid2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b), + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(chebval, c, x, y) + + +def chebval3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(chebval, c, x, y, z) + + +def chebgrid3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(chebval, c, x, y, z) + + +def chebvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = T_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Chebyshev polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and + ``chebval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Chebyshev series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Chebyshev polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. + v[0] = x*0 + 1 + if ideg > 0: + x2 = 2*x + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x2 - v[i-2] + return np.moveaxis(v, 0, -1) + + +def chebvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Chebyshev polynomials. + + If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg) + + +def chebvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Chebyshev polynomials. + + If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg) + + +def chebfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Chebyshev series to data. + + Return the coefficients of a Chebyshev series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer, + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Chebyshev coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + chebval : Evaluates a Chebyshev series. + chebvander : Vandermonde matrix of Chebyshev series. + chebweight : Chebyshev weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Chebyshev series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Chebyshev series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(chebvander, x, y, deg, rcond, full, w) + + +def chebcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is a Chebyshev basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[0] = np.sqrt(.5) + top[1:] = 1/2 + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + return mat + + +def chebroots(c): + """ + Compute the roots of a Chebyshev series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * T_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Chebyshev series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as cheb + >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots + array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = chebcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def chebinterpolate(func, deg, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the Chebyshev series that interpolates `func` at the Chebyshev + points of the first kind in the interval [-1, 1]. The interpolating + series tends to a minmax approximation to `func` with increasing `deg` + if the function is continuous in the interval. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be approximated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial + args : tuple, optional + Extra arguments to be used in the function call. Default is no extra + arguments. + + Returns + ------- + coef : ndarray, shape (deg + 1,) + Chebyshev coefficients of the interpolating series ordered from low to + high. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8) + array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17, + -5.42457905e-02, -2.71387850e-16, 4.51658839e-03, + 2.46716228e-17, -3.79694221e-04, -3.26899002e-16]) + + Notes + ----- + + The Chebyshev polynomials used in the interpolation are orthogonal when + sampled at the Chebyshev points of the first kind. If it is desired to + constrain some of the coefficients they can simply be set to the desired + value after the interpolation, no new interpolation or fit is needed. This + is especially useful if it is known apriori that some of coefficients are + zero. For instance, if the function is even then the coefficients of the + terms of odd degree in the result can be set to zero. + + """ + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int") + if deg < 0: + raise ValueError("expected deg >= 0") + + order = deg + 1 + xcheb = chebpts1(order) + yfunc = func(xcheb, *args) + m = chebvander(xcheb, deg) + c = np.dot(m.T, yfunc) + c[0] /= order + c[1:] /= 0.5*order + + return c + + +def chebgauss(deg): + """ + Gauss-Chebyshev quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. For Gauss-Chebyshev there are closed form solutions for + the sample points and weights. If n = `deg`, then + + .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n)) + + .. math:: w_i = \\pi / n + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) + w = np.ones(ideg)*(np.pi/ideg) + + return x, w + + +def chebweight(x): + """ + The weight function of the Chebyshev polynomials. + + The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of + integration is :math:`[-1, 1]`. The Chebyshev polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) + return w + + +def chebpts1(npts): + """ + Chebyshev points of the first kind. + + The Chebyshev points of the first kind are the points ``cos(x)``, + where ``x = [pi*(k + .5)/npts for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the first kind. + + See Also + -------- + chebpts2 + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 1: + raise ValueError("npts must be >= 1") + + x = 0.5 * np.pi / _npts * np.arange(-_npts+1, _npts+1, 2) + return np.sin(x) + + +def chebpts2(npts): + """ + Chebyshev points of the second kind. + + The Chebyshev points of the second kind are the points ``cos(x)``, + where ``x = [pi*k/(npts - 1) for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the second kind. + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 2: + raise ValueError("npts must be >= 2") + + x = np.linspace(-np.pi, 0, _npts) + return np.cos(x) + + +# +# Chebyshev series class +# + +class Chebyshev(ABCPolyBase): + """A Chebyshev series class. + + The Chebyshev class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + methods listed below. + + Parameters + ---------- + coef : array_like + Chebyshev coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(chebadd) + _sub = staticmethod(chebsub) + _mul = staticmethod(chebmul) + _div = staticmethod(chebdiv) + _pow = staticmethod(chebpow) + _val = staticmethod(chebval) + _int = staticmethod(chebint) + _der = staticmethod(chebder) + _fit = staticmethod(chebfit) + _line = staticmethod(chebline) + _roots = staticmethod(chebroots) + _fromroots = staticmethod(chebfromroots) + + @classmethod + def interpolate(cls, func, deg, domain=None, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the series that interpolates `func` at the Chebyshev points of + the first kind scaled and shifted to the `domain`. The resulting series + tends to a minmax approximation of `func` when the function is + continuous in the domain. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be interpolated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial. + domain : {None, [beg, end]}, optional + Domain over which `func` is interpolated. The default is None, in + which case the domain is [-1, 1]. + args : tuple, optional + Extra arguments to be used in the function call. Default is no + extra arguments. + + Returns + ------- + polynomial : Chebyshev instance + Interpolating Chebyshev instance. + + Notes + ----- + See `numpy.polynomial.chebfromfunction` for more details. + + """ + if domain is None: + domain = cls.domain + xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args) + coef = chebinterpolate(xfunc, deg) + return cls(coef, domain=domain) + + # Virtual properties + domain = np.array(chebdomain) + window = np.array(chebdomain) + basis_name = 'T' diff --git a/MLPY/Lib/site-packages/numpy/polynomial/chebyshev.pyi b/MLPY/Lib/site-packages/numpy/polynomial/chebyshev.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b762da612998d9a82acc95b43590429b1388ba73 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/chebyshev.pyi @@ -0,0 +1,51 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: List[str] + +chebtrim = trimcoef + +def poly2cheb(pol): ... +def cheb2poly(c): ... + +chebdomain: ndarray[Any, dtype[int_]] +chebzero: ndarray[Any, dtype[int_]] +chebone: ndarray[Any, dtype[int_]] +chebx: ndarray[Any, dtype[int_]] + +def chebline(off, scl): ... +def chebfromroots(roots): ... +def chebadd(c1, c2): ... +def chebsub(c1, c2): ... +def chebmulx(c): ... +def chebmul(c1, c2): ... +def chebdiv(c1, c2): ... +def chebpow(c, pow, maxpower=...): ... +def chebder(c, m=..., scl=..., axis=...): ... +def chebint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def chebval(x, c, tensor=...): ... +def chebval2d(x, y, c): ... +def chebgrid2d(x, y, c): ... +def chebval3d(x, y, z, c): ... +def chebgrid3d(x, y, z, c): ... +def chebvander(x, deg): ... +def chebvander2d(x, y, deg): ... +def chebvander3d(x, y, z, deg): ... +def chebfit(x, y, deg, rcond=..., full=..., w=...): ... +def chebcompanion(c): ... +def chebroots(c): ... +def chebinterpolate(func, deg, args = ...): ... +def chebgauss(deg): ... +def chebweight(x): ... +def chebpts1(npts): ... +def chebpts2(npts): ... + +class Chebyshev(ABCPolyBase): + @classmethod + def interpolate(cls, func, deg, domain=..., args = ...): ... + domain: Any + window: Any + basis_name: Any diff --git a/MLPY/Lib/site-packages/numpy/polynomial/hermite.py b/MLPY/Lib/site-packages/numpy/polynomial/hermite.py new file mode 100644 index 0000000000000000000000000000000000000000..2339e78d2f12792a8b6ba4857c0d27ab39b41f48 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/hermite.py @@ -0,0 +1,1696 @@ +""" +============================================================== +Hermite Series, "Physicists" (:mod:`numpy.polynomial.hermite`) +============================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite series, including a `Hermite` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Hermite + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermdomain + hermzero + hermone + hermx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermadd + hermsub + hermmulx + hermmul + hermdiv + hermpow + hermval + hermval2d + hermval3d + hermgrid2d + hermgrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermder + hermint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermfromroots + hermroots + hermvander + hermvander2d + hermvander3d + hermgauss + hermweight + hermcompanion + hermfit + hermtrim + hermline + herm2poly + poly2herm + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', + 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', + 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', + 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', + 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] + +hermtrim = pu.trimcoef + + +def poly2herm(pol): + """ + poly2herm(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herm2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import poly2herm + >>> poly2herm(np.arange(4)) + array([1. , 2.75 , 0.5 , 0.375]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermadd(hermmulx(res), pol[i]) + return res + + +def herm2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herm + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import herm2poly + >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + c[1] *= 2 + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(2*(i - 1))) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)*2) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermdomain = np.array([-1, 1]) + +# Hermite coefficients representing zero. +hermzero = np.array([0]) + +# Hermite coefficients representing one. +hermone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermx = np.array([0, 1/2]) + + +def hermline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.hermite import hermline, hermval + >>> hermval(0,hermline(3, 2)) + 3.0 + >>> hermval(1,hermline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl/2]) + else: + return np.array([off]) + + +def hermfromroots(roots): + """ + Generate a Hermite series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Hermite form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Hermite form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfromroots, hermval + >>> coef = hermfromroots((-1, 0, 1)) + >>> hermval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermfromroots((-1j, 1j)) + >>> hermval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermline, hermmul, roots) + + +def hermadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermsub, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermadd + >>> hermadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermsub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermadd, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermsub + >>> hermsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermmulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermadd, hermsub, hermmul, hermdiv, hermpow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmulx + >>> hermmulx([1, 2, 3]) + array([2. , 6.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0]/2 + for i in range(1, len(c)): + prd[i + 1] = c[i]/2 + prd[i - 1] += c[i]*i + return prd + + +def hermmul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermadd, hermsub, hermmulx, hermdiv, hermpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmul + >>> hermmul([1, 2, 3], [0, 1, 2]) + array([52., 29., 52., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) + c1 = hermadd(tmp, hermmulx(c1)*2) + return hermadd(c0, hermmulx(c1)*2) + + +def hermdiv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermpow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermdiv + >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([2., 2.])) + >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(hermmul, c1, c2) + + +def hermpow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermdiv + + Examples + -------- + >>> from numpy.polynomial.hermite import hermpow + >>> hermpow([1, 2, 3], 2) + array([81., 52., 82., 12., 9.]) + + """ + return pu._pow(hermmul, c, pow, maxpower) + + +def hermder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite series. + + Returns the Hermite series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` + while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If `c` is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermder + >>> hermder([ 1. , 0.5, 0.5, 0.5]) + array([1., 2., 3.]) + >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = (2*j)*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite series. + + Returns the Hermite series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermint + >>> hermint([1,2,3]) # integrate once, value 0 at 0. + array([1. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. + array([2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 + array([-2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) + array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0]/2 + for j in range(1, n): + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[0] += k[i] - hermval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermval(x, c, tensor=True): + """ + Evaluate an Hermite series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermval2d, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval + >>> coef = [1,2,3] + >>> hermval(1, coef) + 11.0 + >>> hermval([[1,2],[3,4]], coef) + array([[ 11., 51.], + [115., 203.]]) + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + x2 = x*2 + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(2*(nd - 1)) + c1 = tmp + c1*x2 + return c0 + c1*x2 + + +def hermval2d(x, y, c): + """ + Evaluate a 2-D Hermite series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermval, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermval, c, x, y) + + +def hermgrid2d(x, y, c): + """ + Evaluate a 2-D Hermite series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermval, c, x, y) + + +def hermval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermval, c, x, y, z) + + +def hermgrid3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermval, c, x, y, z) + + +def hermvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = H_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Hermite polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and + ``hermval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Hermite series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Hermite polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander + >>> x = np.array([-1, 0, 1]) + >>> hermvander(x, 3) + array([[ 1., -2., 2., 4.], + [ 1., 0., -2., -0.], + [ 1., 2., 2., -4.]]) + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + x2 = x*2 + v[1] = x2 + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) + return np.moveaxis(v, 0, -1) + + +def hermvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Hermite polynomials. + + If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg) + + +def hermvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Hermite polynomials. + + If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) + + +def hermfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a Hermite series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite_e.hermefit + hermval : Evaluates a Hermite series. + hermvander : Vandermonde matrix of Hermite series. + hermweight : Hermite weight function + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Hermite series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Hermite series are probably most useful when the data can be + approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfit, hermval + >>> x = np.linspace(-10, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = hermval(x, [1, 2, 3]) + err + >>> hermfit(x, y, 2) + array([1.0218, 1.9986, 2.9999]) # may vary + + """ + return pu._fit(hermvander, x, y, deg, rcond, full, w) + + +def hermcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Hermite basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-.5*c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(.5*np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl*c[:-1]/(2.0*c[-1]) + return mat + + +def hermroots(c): + """ + Compute the roots of a Hermite series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * H_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Hermite series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermroots, hermfromroots + >>> coef = hermfromroots([-1, 0, 1]) + >>> coef + array([0. , 0.25 , 0. , 0.125]) + >>> hermroots(coef) + array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-.5*c[0]/c[1]]) + + # rotated companion matrix reduces error + m = hermcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_n(x, n): + """ + Evaluate a normalized Hermite polynomial. + + Compute the value of the normalized Hermite polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized Hermite function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard Hermite functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi))) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(2./nd) + nd = nd - 1.0 + return c0 + c1*x*np.sqrt(2) + + +def hermgauss(deg): + """ + Gauss-Hermite quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`H_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1], dtype=np.float64) + m = hermcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_n(x, ideg) + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1/(fm * fm) + + # for Hermite we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(np.pi) / w.sum() + + return x, w + + +def hermweight(x): + """ + Weight function of the Hermite polynomials. + + The weight function is :math:`\\exp(-x^2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-x**2) + return w + + +# +# Hermite series class +# + +class Hermite(ABCPolyBase): + """An Hermite series class. + + The Hermite class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Hermite coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(hermadd) + _sub = staticmethod(hermsub) + _mul = staticmethod(hermmul) + _div = staticmethod(hermdiv) + _pow = staticmethod(hermpow) + _val = staticmethod(hermval) + _int = staticmethod(hermint) + _der = staticmethod(hermder) + _fit = staticmethod(hermfit) + _line = staticmethod(hermline) + _roots = staticmethod(hermroots) + _fromroots = staticmethod(hermfromroots) + + # Virtual properties + domain = np.array(hermdomain) + window = np.array(hermdomain) + basis_name = 'H' diff --git a/MLPY/Lib/site-packages/numpy/polynomial/hermite.pyi b/MLPY/Lib/site-packages/numpy/polynomial/hermite.pyi new file mode 100644 index 0000000000000000000000000000000000000000..663f4fb35104088b157ce69edd86fdd8e97f19a7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/hermite.pyi @@ -0,0 +1,46 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_, float_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +hermtrim = trimcoef + +def poly2herm(pol): ... +def herm2poly(c): ... + +hermdomain: ndarray[Any, dtype[int_]] +hermzero: ndarray[Any, dtype[int_]] +hermone: ndarray[Any, dtype[int_]] +hermx: ndarray[Any, dtype[float_]] + +def hermline(off, scl): ... +def hermfromroots(roots): ... +def hermadd(c1, c2): ... +def hermsub(c1, c2): ... +def hermmulx(c): ... +def hermmul(c1, c2): ... +def hermdiv(c1, c2): ... +def hermpow(c, pow, maxpower=...): ... +def hermder(c, m=..., scl=..., axis=...): ... +def hermint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def hermval(x, c, tensor=...): ... +def hermval2d(x, y, c): ... +def hermgrid2d(x, y, c): ... +def hermval3d(x, y, z, c): ... +def hermgrid3d(x, y, z, c): ... +def hermvander(x, deg): ... +def hermvander2d(x, y, deg): ... +def hermvander3d(x, y, z, deg): ... +def hermfit(x, y, deg, rcond=..., full=..., w=...): ... +def hermcompanion(c): ... +def hermroots(c): ... +def hermgauss(deg): ... +def hermweight(x): ... + +class Hermite(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/MLPY/Lib/site-packages/numpy/polynomial/hermite_e.py b/MLPY/Lib/site-packages/numpy/polynomial/hermite_e.py new file mode 100644 index 0000000000000000000000000000000000000000..22a080b45364588f9a2c67de9fe57a99fcaa8783 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/hermite_e.py @@ -0,0 +1,1688 @@ +""" +=================================================================== +HermiteE Series, "Probabilists" (:mod:`numpy.polynomial.hermite_e`) +=================================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite_e series, including a `HermiteE` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + HermiteE + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermedomain + hermezero + hermeone + hermex + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermeadd + hermesub + hermemulx + hermemul + hermediv + hermepow + hermeval + hermeval2d + hermeval3d + hermegrid2d + hermegrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermeder + hermeint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermefromroots + hermeroots + hermevander + hermevander2d + hermevander3d + hermegauss + hermeweight + hermecompanion + hermefit + hermetrim + hermeline + herme2poly + poly2herme + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', + 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', + 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', + 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', + 'hermegauss', 'hermeweight'] + +hermetrim = pu.trimcoef + + +def poly2herme(pol): + """ + poly2herme(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herme2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import poly2herme + >>> poly2herme(np.arange(4)) + array([ 2., 10., 2., 3.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermeadd(hermemulx(res), pol[i]) + return res + + +def herme2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herme + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import herme2poly + >>> herme2poly([ 2., 10., 2., 3.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(i - 1)) + c1 = polyadd(tmp, polymulx(c1)) + return polyadd(c0, polymulx(c1)) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermedomain = np.array([-1, 1]) + +# Hermite coefficients representing zero. +hermezero = np.array([0]) + +# Hermite coefficients representing one. +hermeone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermex = np.array([0, 1]) + + +def hermeline(off, scl): + """ + Hermite series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeline + >>> from numpy.polynomial.hermite_e import hermeline, hermeval + >>> hermeval(0,hermeline(3, 2)) + 3.0 + >>> hermeval(1,hermeline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def hermefromroots(roots): + """ + Generate a HermiteE series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in HermiteE form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in HermiteE form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.chebyshev.chebfromroots + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval + >>> coef = hermefromroots((-1, 0, 1)) + >>> hermeval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermefromroots((-1j, 1j)) + >>> hermeval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermeline, hermemul, roots) + + +def hermeadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermesub, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeadd + >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermesub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermeadd, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermesub + >>> hermesub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermemulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemulx + >>> hermemulx([1, 2, 3]) + array([2., 7., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + prd[i + 1] = c[i] + prd[i - 1] += c[i]*i + return prd + + +def hermemul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermediv, hermepow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemul + >>> hermemul([1, 2, 3], [0, 1, 2]) + array([14., 15., 28., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermesub(c[-i]*xs, c1*(nd - 1)) + c1 = hermeadd(tmp, hermemulx(c1)) + return hermeadd(c0, hermemulx(c1)) + + +def hermediv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermepow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermediv + >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 2.])) + + """ + return pu._div(hermemul, c1, c2) + + +def hermepow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermediv + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermepow + >>> hermepow([1, 2, 3], 2) + array([23., 28., 46., 12., 9.]) + + """ + return pu._pow(hermemul, c, pow, maxpower) + + +def hermeder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite_e series. + + Returns the series coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` + while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 + is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermeint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeder + >>> hermeder([ 1., 1., 1., 1.]) + array([1., 2., 3.]) + >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + return c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite_e series. + + Returns the Hermite_e series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite_e series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermeder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeint + >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. + array([1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary + >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. + array([2., 1., 1., 1.]) + >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 + array([-1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) + array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - hermeval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeval(x, c, tensor=True): + """ + Evaluate an HermiteE series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermeval2d, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeval + >>> coef = [1,2,3] + >>> hermeval(1, coef) + 3.0 + >>> hermeval([[1,2],[3,4]], coef) + array([[ 3., 14.], + [31., 54.]]) + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(nd - 1) + c1 = tmp + c1*x + return c0 + c1*x + + +def hermeval2d(x, y, c): + """ + Evaluate a 2-D HermiteE series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermeval, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermeval, c, x, y) + + +def hermegrid2d(x, y, c): + """ + Evaluate a 2-D HermiteE series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermeval, c, x, y) + + +def hermeval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite_e series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermeval, c, x, y, z) + + +def hermegrid3d(x, y, z, c): + """ + Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermeval, c, x, y, z) + + +def hermevander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = He_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the HermiteE polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and + ``hermeval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of HermiteE series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding HermiteE polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermevander + >>> x = np.array([-1, 0, 1]) + >>> hermevander(x, 3) + array([[ 1., -1., 0., 2.], + [ 1., 0., -1., -0.], + [ 1., 1., 0., -2.]]) + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x - v[i-2]*(i - 1)) + return np.moveaxis(v, 0, -1) + + +def hermevander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the HermiteE polynomials. + + If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg) + + +def hermevander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then Hehe pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the HermiteE polynomials. + + If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg) + + +def hermefit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a HermiteE series of degree `deg` that is + the least squares fit to the data values `y` given at points `x`. If + `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D + multiple fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.laguerre.lagfit + hermeval : Evaluates a Hermite series. + hermevander : pseudo Vandermonde matrix of Hermite series. + hermeweight : HermiteE weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the HermiteE series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` + are the coefficients to be solved for, and the elements of `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using HermiteE series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermeweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefit, hermeval + >>> x = np.linspace(-10, 10) + >>> np.random.seed(123) + >>> err = np.random.randn(len(x))/10 + >>> y = hermeval(x, [1, 2, 3]) + err + >>> hermefit(x, y, 2) + array([ 1.01690445, 1.99951418, 2.99948696]) # may vary + + """ + return pu._fit(hermevander, x, y, deg, rcond, full, w) + + +def hermecompanion(c): + """ + Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an HermiteE basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of HermiteE series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl*c[:-1]/c[-1] + return mat + + +def hermeroots(c): + """ + Compute the roots of a HermiteE series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * He_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.chebyshev.chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The HermiteE series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots + >>> coef = hermefromroots([-1, 0, 1]) + >>> coef + array([0., 2., 0., 1.]) + >>> hermeroots(coef) + array([-1., 0., 1.]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = hermecompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_e_n(x, n): + """ + Evaluate a normalized HermiteE polynomial. + + Compute the value of the normalized HermiteE polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized HermiteE function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard HermiteE functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi))) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(2*np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(1./nd) + nd = nd - 1.0 + return c0 + c1*x + + +def hermegauss(deg): + """ + Gauss-HermiteE quadrature. + + Computes the sample points and weights for Gauss-HermiteE quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2/2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`He_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = hermecompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_e_n(x, ideg) + df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_e_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1/(fm * fm) + + # for Hermite_e we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(2*np.pi) / w.sum() + + return x, w + + +def hermeweight(x): + """Weight function of the Hermite_e polynomials. + + The weight function is :math:`\\exp(-x^2/2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-.5*x**2) + return w + + +# +# HermiteE series class +# + +class HermiteE(ABCPolyBase): + """An HermiteE series class. + + The HermiteE class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + HermiteE coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(hermeadd) + _sub = staticmethod(hermesub) + _mul = staticmethod(hermemul) + _div = staticmethod(hermediv) + _pow = staticmethod(hermepow) + _val = staticmethod(hermeval) + _int = staticmethod(hermeint) + _der = staticmethod(hermeder) + _fit = staticmethod(hermefit) + _line = staticmethod(hermeline) + _roots = staticmethod(hermeroots) + _fromroots = staticmethod(hermefromroots) + + # Virtual properties + domain = np.array(hermedomain) + window = np.array(hermedomain) + basis_name = 'He' diff --git a/MLPY/Lib/site-packages/numpy/polynomial/hermite_e.pyi b/MLPY/Lib/site-packages/numpy/polynomial/hermite_e.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1cac330495f2c8b6f684ffb6fde1a3e82c42a542 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/hermite_e.pyi @@ -0,0 +1,46 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +hermetrim = trimcoef + +def poly2herme(pol): ... +def herme2poly(c): ... + +hermedomain: ndarray[Any, dtype[int_]] +hermezero: ndarray[Any, dtype[int_]] +hermeone: ndarray[Any, dtype[int_]] +hermex: ndarray[Any, dtype[int_]] + +def hermeline(off, scl): ... +def hermefromroots(roots): ... +def hermeadd(c1, c2): ... +def hermesub(c1, c2): ... +def hermemulx(c): ... +def hermemul(c1, c2): ... +def hermediv(c1, c2): ... +def hermepow(c, pow, maxpower=...): ... +def hermeder(c, m=..., scl=..., axis=...): ... +def hermeint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def hermeval(x, c, tensor=...): ... +def hermeval2d(x, y, c): ... +def hermegrid2d(x, y, c): ... +def hermeval3d(x, y, z, c): ... +def hermegrid3d(x, y, z, c): ... +def hermevander(x, deg): ... +def hermevander2d(x, y, deg): ... +def hermevander3d(x, y, z, deg): ... +def hermefit(x, y, deg, rcond=..., full=..., w=...): ... +def hermecompanion(c): ... +def hermeroots(c): ... +def hermegauss(deg): ... +def hermeweight(x): ... + +class HermiteE(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/MLPY/Lib/site-packages/numpy/polynomial/laguerre.py b/MLPY/Lib/site-packages/numpy/polynomial/laguerre.py new file mode 100644 index 0000000000000000000000000000000000000000..3df1235fe4b8bce589d711929a2bef6864132e7e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/laguerre.py @@ -0,0 +1,1644 @@ +""" +================================================== +Laguerre Series (:mod:`numpy.polynomial.laguerre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Laguerre series, including a `Laguerre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Laguerre + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + lagdomain + lagzero + lagone + lagx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + lagadd + lagsub + lagmulx + lagmul + lagdiv + lagpow + lagval + lagval2d + lagval3d + laggrid2d + laggrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + lagder + lagint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + lagfromroots + lagroots + lagvander + lagvander2d + lagvander3d + laggauss + lagweight + lagcompanion + lagfit + lagtrim + lagline + lag2poly + poly2lag + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', + 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', + 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', + 'laggauss', 'lagweight'] + +lagtrim = pu.trimcoef + + +def poly2lag(pol): + """ + poly2lag(pol) + + Convert a polynomial to a Laguerre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Laguerre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Laguerre + series. + + See Also + -------- + lag2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import poly2lag + >>> poly2lag(np.arange(4)) + array([ 23., -63., 58., -18.]) + + """ + [pol] = pu.as_series([pol]) + res = 0 + for p in pol[::-1]: + res = lagadd(lagmulx(res), p) + return res + + +def lag2poly(c): + """ + Convert a Laguerre series to a polynomial. + + Convert an array representing the coefficients of a Laguerre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Laguerre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2lag + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lag2poly + >>> lag2poly([ 23., -63., 58., -18.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) + return polyadd(c0, polysub(c1, polymulx(c1))) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Laguerre +lagdomain = np.array([0, 1]) + +# Laguerre coefficients representing zero. +lagzero = np.array([0]) + +# Laguerre coefficients representing one. +lagone = np.array([1]) + +# Laguerre coefficients representing the identity x. +lagx = np.array([1, -1]) + + +def lagline(off, scl): + """ + Laguerre series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Laguerre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagline, lagval + >>> lagval(0,lagline(3, 2)) + 3.0 + >>> lagval(1,lagline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off + scl, -scl]) + else: + return np.array([off]) + + +def lagfromroots(roots): + """ + Generate a Laguerre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Laguerre form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Laguerre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfromroots, lagval + >>> coef = lagfromroots((-1, 0, 1)) + >>> lagval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = lagfromroots((-1j, 1j)) + >>> lagval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(lagline, lagmul, roots) + + +def lagadd(c1, c2): + """ + Add one Laguerre series to another. + + Returns the sum of two Laguerre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Laguerre series of their sum. + + See Also + -------- + lagsub, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Laguerre series + is a Laguerre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagadd + >>> lagadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + + """ + return pu._add(c1, c2) + + +def lagsub(c1, c2): + """ + Subtract one Laguerre series from another. + + Returns the difference of two Laguerre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their difference. + + See Also + -------- + lagadd, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Laguerre + series is a Laguerre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagsub + >>> lagsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def lagmulx(c): + """Multiply a Laguerre series by x. + + Multiply the Laguerre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + lagadd, lagsub, lagmul, lagdiv, lagpow + + Notes + ----- + The multiplication uses the recursion relationship for Laguerre + polynomials in the form + + .. math:: + + xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmulx + >>> lagmulx([1, 2, 3]) + array([-1., -1., 11., -9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] + prd[1] = -c[0] + for i in range(1, len(c)): + prd[i + 1] = -c[i]*(i + 1) + prd[i] += c[i]*(2*i + 1) + prd[i - 1] -= c[i]*i + return prd + + +def lagmul(c1, c2): + """ + Multiply one Laguerre series by another. + + Returns the product of two Laguerre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their product. + + See Also + -------- + lagadd, lagsub, lagmulx, lagdiv, lagpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Laguerre polynomial basis set. Thus, to express + the product as a Laguerre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmul + >>> lagmul([1, 2, 3], [0, 1, 2]) + array([ 8., -13., 38., -51., 36.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) + return lagadd(c0, lagsub(c1, lagmulx(c1))) + + +def lagdiv(c1, c2): + """ + Divide one Laguerre series by another. + + Returns the quotient-with-remainder of two Laguerre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Laguerre series coefficients representing the quotient and + remainder. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagpow + + Notes + ----- + In general, the (polynomial) division of one Laguerre series by another + results in quotient and remainder terms that are not in the Laguerre + polynomial basis set. Thus, to express these results as a Laguerre + series, it is necessary to "reproject" the results onto the Laguerre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagdiv + >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(lagmul, c1, c2) + + +def lagpow(c, pow, maxpower=16): + """Raise a Laguerre series to a power. + + Returns the Laguerre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Laguerre series of power. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagdiv + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagpow + >>> lagpow([1, 2, 3], 2) + array([ 14., -16., 56., -72., 54.]) + + """ + return pu._pow(lagmul, c, pow, maxpower) + + +def lagder(c, m=1, scl=1, axis=0): + """ + Differentiate a Laguerre series. + + Returns the Laguerre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Laguerre series of the derivative. + + See Also + -------- + lagint + + Notes + ----- + In general, the result of differentiating a Laguerre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagder + >>> lagder([ 1., 1., 1., -3.]) + array([1., 2., 3.]) + >>> lagder([ 1., 0., 0., -4., 3.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 1, -1): + der[j - 1] = -c[j] + c[j - 1] += c[j] + der[0] = -c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Laguerre series. + + Returns the Laguerre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Laguerre series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + lagder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagint + >>> lagint([1,2,3]) + array([ 1., 1., 1., -3.]) + >>> lagint([1,2,3], m=2) + array([ 1., 0., 0., -4., 3.]) + >>> lagint([1,2,3], k=1) + array([ 2., 1., 1., -3.]) + >>> lagint([1,2,3], lbnd=-1) + array([11.5, 1. , 1. , -3. ]) + >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) + array([ 11.16666667, -5. , -3. , 2. ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] + tmp[1] = -c[0] + for j in range(1, n): + tmp[j] += c[j] + tmp[j + 1] = -c[j] + tmp[0] += k[i] - lagval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagval(x, c, tensor=True): + """ + Evaluate a Laguerre series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + lagval2d, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval + >>> coef = [1,2,3] + >>> lagval(1, coef) + -0.5 + >>> lagval([[1,2],[3,4]], coef) + array([[-0.5, -4. ], + [-4.5, -2. ]]) + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*((2*nd - 1) - x))/nd + return c0 + c1*(1 - x) + + +def lagval2d(x, y, c): + """ + Evaluate a 2-D Laguerre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + lagval, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(lagval, c, x, y) + + +def laggrid2d(x, y, c): + """ + Evaluate a 2-D Laguerre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(lagval, c, x, y) + + +def lagval3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimension polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + lagval, lagval2d, laggrid2d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(lagval, c, x, y, z) + + +def laggrid3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, laggrid2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(lagval, c, x, y, z) + + +def lagvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Laguerre polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and + ``lagval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Laguerre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Laguerre polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagvander + >>> x = np.array([0, 1, 2]) + >>> lagvander(x, 3) + array([[ 1. , 1. , 1. , 1. ], + [ 1. , 0. , -0.5 , -0.66666667], + [ 1. , -1. , -1. , -0.33333333]]) + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = 1 - x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i + return np.moveaxis(v, 0, -1) + + +def lagvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Laguerre polynomials. + + If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) + + +def lagvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Laguerre polynomials. + + If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg) + + +def lagfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Laguerre series to data. + + Return the coefficients of a Laguerre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Laguerre coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + lagval : Evaluates a Laguerre series. + lagvander : pseudo Vandermonde matrix of Laguerre series. + lagweight : Laguerre weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Laguerre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Laguerre series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `lagweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfit, lagval + >>> x = np.linspace(0, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = lagval(x, [1, 2, 3]) + err + >>> lagfit(x, y, 2) + array([ 0.96971004, 2.00193749, 3.00288744]) # may vary + + """ + return pu._fit(lagvander, x, y, deg, rcond, full, w) + + +def lagcompanion(c): + """ + Return the companion matrix of c. + + The usual companion matrix of the Laguerre polynomials is already + symmetric when `c` is a basis Laguerre polynomial, so no scaling is + applied. + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[1 + c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + top = mat.reshape(-1)[1::n+1] + mid = mat.reshape(-1)[0::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = -np.arange(1, n) + mid[...] = 2.*np.arange(n) + 1. + bot[...] = top + mat[:, -1] += (c[:-1]/c[-1])*n + return mat + + +def lagroots(c): + """ + Compute the roots of a Laguerre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Laguerre series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagroots, lagfromroots + >>> coef = lagfromroots([0, 1, 2]) + >>> coef + array([ 2., -8., 12., -6.]) + >>> lagroots(coef) + array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([1 + c[0]/c[1]]) + + # rotated companion matrix reduces error + m = lagcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def laggauss(deg): + """ + Gauss-Laguerre quadrature. + + Computes the sample points and weights for Gauss-Laguerre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]` + with the weight function :math:`f(x) = \\exp(-x)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100 higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = lagcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = lagval(x, c) + df = lagval(x, lagder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = lagval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # scale w to get the right value, 1 in this case + w /= w.sum() + + return x, w + + +def lagweight(x): + """Weight function of the Laguerre polynomials. + + The weight function is :math:`exp(-x)` and the interval of integration + is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-x) + return w + +# +# Laguerre series class +# + +class Laguerre(ABCPolyBase): + """A Laguerre series class. + + The Laguerre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [0, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [0, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(lagadd) + _sub = staticmethod(lagsub) + _mul = staticmethod(lagmul) + _div = staticmethod(lagdiv) + _pow = staticmethod(lagpow) + _val = staticmethod(lagval) + _int = staticmethod(lagint) + _der = staticmethod(lagder) + _fit = staticmethod(lagfit) + _line = staticmethod(lagline) + _roots = staticmethod(lagroots) + _fromroots = staticmethod(lagfromroots) + + # Virtual properties + domain = np.array(lagdomain) + window = np.array(lagdomain) + basis_name = 'L' diff --git a/MLPY/Lib/site-packages/numpy/polynomial/laguerre.pyi b/MLPY/Lib/site-packages/numpy/polynomial/laguerre.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3b3032df139afd7cbc29cfae4d2cb816dcb231f5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/laguerre.pyi @@ -0,0 +1,46 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +lagtrim = trimcoef + +def poly2lag(pol): ... +def lag2poly(c): ... + +lagdomain: ndarray[Any, dtype[int_]] +lagzero: ndarray[Any, dtype[int_]] +lagone: ndarray[Any, dtype[int_]] +lagx: ndarray[Any, dtype[int_]] + +def lagline(off, scl): ... +def lagfromroots(roots): ... +def lagadd(c1, c2): ... +def lagsub(c1, c2): ... +def lagmulx(c): ... +def lagmul(c1, c2): ... +def lagdiv(c1, c2): ... +def lagpow(c, pow, maxpower=...): ... +def lagder(c, m=..., scl=..., axis=...): ... +def lagint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def lagval(x, c, tensor=...): ... +def lagval2d(x, y, c): ... +def laggrid2d(x, y, c): ... +def lagval3d(x, y, z, c): ... +def laggrid3d(x, y, z, c): ... +def lagvander(x, deg): ... +def lagvander2d(x, y, deg): ... +def lagvander3d(x, y, z, deg): ... +def lagfit(x, y, deg, rcond=..., full=..., w=...): ... +def lagcompanion(c): ... +def lagroots(c): ... +def laggauss(deg): ... +def lagweight(x): ... + +class Laguerre(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/MLPY/Lib/site-packages/numpy/polynomial/legendre.py b/MLPY/Lib/site-packages/numpy/polynomial/legendre.py new file mode 100644 index 0000000000000000000000000000000000000000..0eda5c9de2861854146244c2c047a590f626f2cc --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/legendre.py @@ -0,0 +1,1657 @@ +""" +================================================== +Legendre Series (:mod:`numpy.polynomial.legendre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Legendre series, including a `Legendre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Legendre + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + legdomain + legzero + legone + legx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + legadd + legsub + legmulx + legmul + legdiv + legpow + legval + legval2d + legval3d + leggrid2d + leggrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + legder + legint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + legfromroots + legroots + legvander + legvander2d + legvander3d + leggauss + legweight + legcompanion + legfit + legtrim + legline + leg2poly + poly2leg + +See also +-------- +numpy.polynomial + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', + 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', + 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', + 'leggauss', 'legweight'] + +legtrim = pu.trimcoef + + +def poly2leg(pol): + """ + Convert a polynomial to a Legendre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Legendre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Legendre + series. + + See Also + -------- + leg2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(np.arange(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) + >>> c + Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = legadd(legmulx(res), pol[i]) + return res + + +def leg2poly(c): + """ + Convert a Legendre series to a polynomial. + + Convert an array representing the coefficients of a Legendre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Legendre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2leg + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Legendre(range(4)) + >>> c + Legendre([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., 1.]) + >>> P.leg2poly(range(4)) + array([-1. , -3.5, 3. , 7.5]) + + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) + return polyadd(c0, polymulx(c1)) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Legendre +legdomain = np.array([-1, 1]) + +# Legendre coefficients representing zero. +legzero = np.array([0]) + +# Legendre coefficients representing one. +legone = np.array([1]) + +# Legendre coefficients representing the identity x. +legx = np.array([0, 1]) + + +def legline(off, scl): + """ + Legendre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Legendre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legline(3,2) + array([3, 2]) + >>> L.legval(-3, L.legline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def legfromroots(roots): + """ + Generate a Legendre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Legendre form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Legendre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.4, 0. , 0.4]) + >>> j = complex(0,1) + >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary + + """ + return pu._fromroots(legline, legmul, roots) + + +def legadd(c1, c2): + """ + Add one Legendre series to another. + + Returns the sum of two Legendre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Legendre series of their sum. + + See Also + -------- + legsub, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Legendre series + is a Legendre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def legsub(c1, c2): + """ + Subtract one Legendre series from another. + + Returns the difference of two Legendre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their difference. + + See Also + -------- + legadd, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Legendre + series is a Legendre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legsub(c1,c2) + array([-2., 0., 2.]) + >>> L.legsub(c2,c1) # -C.legsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def legmulx(c): + """Multiply a Legendre series by x. + + Multiply the Legendre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + legadd, legmul, legmul, legdiv, legpow + + Notes + ----- + The multiplication uses the recursion relationship for Legendre + polynomials in the form + + .. math:: + + xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> L.legmulx([1,2,3]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + j = i + 1 + k = i - 1 + s = i + j + prd[j] = (c[i]*j)/s + prd[k] += (c[i]*i)/s + return prd + + +def legmul(c1, c2): + """ + Multiply one Legendre series by another. + + Returns the product of two Legendre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their product. + + See Also + -------- + legadd, legsub, legmulx, legdiv, legpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Legendre polynomial basis set. Thus, to express + the product as a Legendre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2) + >>> L.legmul(c1,c2) # multiplication requires "reprojection" + array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) + return legadd(c0, legmulx(c1)) + + +def legdiv(c1, c2): + """ + Divide one Legendre series by another. + + Returns the quotient-with-remainder of two Legendre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + quo, rem : ndarrays + Of Legendre series coefficients representing the quotient and + remainder. + + See Also + -------- + legadd, legsub, legmulx, legmul, legpow + + Notes + ----- + In general, the (polynomial) division of one Legendre series by another + results in quotient and remainder terms that are not in the Legendre + polynomial basis set. Thus, to express these results as a Legendre + series, it is necessary to "reproject" the results onto the Legendre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> L.legdiv(c2,c1) # neither "intuitive" + (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary + + """ + return pu._div(legmul, c1, c2) + + +def legpow(c, pow, maxpower=16): + """Raise a Legendre series to a power. + + Returns the Legendre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Legendre series of power. + + See Also + -------- + legadd, legsub, legmulx, legmul, legdiv + + """ + return pu._pow(legmul, c, pow, maxpower) + + +def legder(c, m=1, scl=1, axis=0): + """ + Differentiate a Legendre series. + + Returns the Legendre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Legendre series of the derivative. + + See Also + -------- + legint + + Notes + ----- + In general, the result of differentiating a Legendre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3,4) + >>> L.legder(c) + array([ 6., 9., 20.]) + >>> L.legder(c, 3) + array([60.]) + >>> L.legder(c, scl=-1) + array([ -6., -9., -20.]) + >>> L.legder(c, 2,-1) + array([ 9., 60.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j - 1)*c[j] + c[j - 2] += c[j] + if n > 1: + der[1] = 3*c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Legendre series. + + Returns the Legendre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Legendre series coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + legder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3) + >>> L.legint(c) + array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, 3) + array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary + -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) + >>> L.legint(c, k=3) + array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, lbnd=-2) + array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, scl=2) + array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/3 + for j in range(2, n): + t = c[j]/(2*j + 1) + tmp[j + 1] = t + tmp[j - 1] -= t + tmp[0] += k[i] - legval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def legval(x, c, tensor=True): + """ + Evaluate a Legendre series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + legval2d, leggrid2d, legval3d, leggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*x*(2*nd - 1))/nd + return c0 + c1*x + + +def legval2d(x, y, c): + """ + Evaluate a 2-D Legendre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Legendre series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + legval, leggrid2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(legval, c, x, y) + + +def leggrid2d(x, y, c): + """ + Evaluate a 2-D Legendre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + legval, legval2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(legval, c, x, y) + + +def legval3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + legval, legval2d, leggrid2d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(legval, c, x, y, z) + + +def leggrid3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + legval, legval2d, leggrid2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(legval, c, x, y, z) + + +def legvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Legendre polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and + ``legval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Legendre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Legendre polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. This is not as accurate + # as reverse recursion in this application but it is more efficient. + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i + return np.moveaxis(v, 0, -1) + + +def legvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Legendre polynomials. + + If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((legvander, legvander), (x, y), deg) + + +def legvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Legendre polynomials. + + If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg) + + +def legfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Legendre series to data. + + Return the coefficients of a Legendre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Legendre coefficients ordered from low to high. If `y` was + 2-D, the coefficients for the data in column k of `y` are in + column `k`. If `deg` is specified as a list, coefficients for + terms not included in the fit are set equal to zero in the + returned `coef`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if `full` = False. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + legval : Evaluates a Legendre series. + legvander : Vandermonde matrix of Legendre series. + legweight : Legendre weight function (= 1). + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Legendre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Legendre series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(legvander, x, y, deg, rcond, full, w) + + +def legcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Legendre basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = 1./np.sqrt(2*np.arange(n) + 1) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) + return mat + + +def legroots(c): + """ + Compute the roots of a Legendre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such values. + Roots with multiplicity greater than 1 will also show larger errors as + the value of the series near such points is relatively insensitive to + errors in the roots. Isolated roots near the origin can be improved by + a few iterations of Newton's method. + + The Legendre series basis polynomials aren't powers of ``x`` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.legendre as leg + >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots + array([-0.85099543, -0.11407192, 0.51506735]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = legcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def leggauss(deg): + """ + Gauss-Legendre quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = legcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = legval(x, c) + df = legval(x, legder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = legval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # for Legendre we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= 2. / w.sum() + + return x, w + + +def legweight(x): + """ + Weight function of the Legendre polynomials. + + The weight function is :math:`1` and the interval of integration is + :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = x*0.0 + 1.0 + return w + +# +# Legendre series class +# + +class Legendre(ABCPolyBase): + """A Legendre series class. + + The Legendre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Legendre coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(legadd) + _sub = staticmethod(legsub) + _mul = staticmethod(legmul) + _div = staticmethod(legdiv) + _pow = staticmethod(legpow) + _val = staticmethod(legval) + _int = staticmethod(legint) + _der = staticmethod(legder) + _fit = staticmethod(legfit) + _line = staticmethod(legline) + _roots = staticmethod(legroots) + _fromroots = staticmethod(legfromroots) + + # Virtual properties + domain = np.array(legdomain) + window = np.array(legdomain) + basis_name = 'P' diff --git a/MLPY/Lib/site-packages/numpy/polynomial/legendre.pyi b/MLPY/Lib/site-packages/numpy/polynomial/legendre.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2ef5c3bb62a62e94587cf723c6d9ea18899e85a9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/legendre.pyi @@ -0,0 +1,46 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +legtrim = trimcoef + +def poly2leg(pol): ... +def leg2poly(c): ... + +legdomain: ndarray[Any, dtype[int_]] +legzero: ndarray[Any, dtype[int_]] +legone: ndarray[Any, dtype[int_]] +legx: ndarray[Any, dtype[int_]] + +def legline(off, scl): ... +def legfromroots(roots): ... +def legadd(c1, c2): ... +def legsub(c1, c2): ... +def legmulx(c): ... +def legmul(c1, c2): ... +def legdiv(c1, c2): ... +def legpow(c, pow, maxpower=...): ... +def legder(c, m=..., scl=..., axis=...): ... +def legint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def legval(x, c, tensor=...): ... +def legval2d(x, y, c): ... +def leggrid2d(x, y, c): ... +def legval3d(x, y, z, c): ... +def leggrid3d(x, y, z, c): ... +def legvander(x, deg): ... +def legvander2d(x, y, deg): ... +def legvander3d(x, y, z, deg): ... +def legfit(x, y, deg, rcond=..., full=..., w=...): ... +def legcompanion(c): ... +def legroots(c): ... +def leggauss(deg): ... +def legweight(x): ... + +class Legendre(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/MLPY/Lib/site-packages/numpy/polynomial/polynomial.py b/MLPY/Lib/site-packages/numpy/polynomial/polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..4938e69250aad8a70576993d6da166f3b832a80e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/polynomial.py @@ -0,0 +1,1529 @@ +""" +================================================= +Power Series (:mod:`numpy.polynomial.polynomial`) +================================================= + +This module provides a number of objects (mostly functions) useful for +dealing with polynomials, including a `Polynomial` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with polynomial objects is in +the docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Polynomial + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + polydomain + polyzero + polyone + polyx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + polyadd + polysub + polymulx + polymul + polydiv + polypow + polyval + polyval2d + polyval3d + polygrid2d + polygrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + polyder + polyint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + polyfromroots + polyroots + polyvalfromroots + polyvander + polyvander2d + polyvander3d + polycompanion + polyfit + polytrim + polyline + +See Also +-------- +`numpy.polynomial` + +""" +__all__ = [ + 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', + 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', + 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] + +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +polytrim = pu.trimcoef + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Polynomial default domain. +polydomain = np.array([-1, 1]) + +# Polynomial coefficients representing zero. +polyzero = np.array([0]) + +# Polynomial coefficients representing one. +polyone = np.array([1]) + +# Polynomial coefficients representing the identity x. +polyx = np.array([0, 1]) + +# +# Polynomial series functions +# + + +def polyline(off, scl): + """ + Returns an array representing a linear polynomial. + + Parameters + ---------- + off, scl : scalars + The "y-intercept" and "slope" of the line, respectively. + + Returns + ------- + y : ndarray + This module's representation of the linear polynomial ``off + + scl*x``. + + See Also + -------- + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyline(1,-1) + array([ 1, -1]) + >>> P.polyval(1, P.polyline(1,-1)) # should be 0 + 0.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def polyfromroots(roots): + """ + Generate a monic polynomial with given roots. + + Return the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + where the ``r_n`` are the roots specified in `roots`. If a zero has + multiplicity n, then it must appear in `roots` n times. For instance, + if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, + then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear + in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * x + ... + x^n + + The coefficient of the last term is 1 for monic polynomials in this + form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of the polynomial's coefficients If all the roots are + real, then `out` is also real, otherwise it is complex. (see + Examples below). + + See Also + -------- + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Notes + ----- + The coefficients are determined by multiplying together linear factors + of the form ``(x - r_i)``, i.e. + + .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) + + where ``n == len(roots) - 1``; note that this implies that ``1`` is always + returned for :math:`a_n`. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x + array([ 0., -1., 0., 1.]) + >>> j = complex(0,1) + >>> P.polyfromroots((-j,j)) # complex returned, though values are real + array([1.+0.j, 0.+0.j, 1.+0.j]) + + """ + return pu._fromroots(polyline, polymul, roots) + + +def polyadd(c1, c2): + """ + Add one polynomial to another. + + Returns the sum of two polynomials `c1` + `c2`. The arguments are + sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + out : ndarray + The coefficient array representing their sum. + + See Also + -------- + polysub, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> sum = P.polyadd(c1,c2); sum + array([4., 4., 4.]) + >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) + 28.0 + + """ + return pu._add(c1, c2) + + +def polysub(c1, c2): + """ + Subtract one polynomial from another. + + Returns the difference of two polynomials `c1` - `c2`. The arguments + are sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of coefficients representing their difference. + + See Also + -------- + polyadd, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polysub(c1,c2) + array([-2., 0., 2.]) + >>> P.polysub(c2,c1) # -P.polysub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def polymulx(c): + """Multiply a polynomial by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + polyadd, polysub, polymul, polydiv, polypow + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1:] = c + return prd + + +def polymul(c1, c2): + """ + Multiply one polynomial by another. + + Returns the product of two polynomials `c1` * `c2`. The arguments are + sequences of coefficients, from lowest order term to highest, e.g., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of coefficients representing a polynomial, relative to the + "standard" basis, and ordered from lowest order term to highest. + + Returns + ------- + out : ndarray + Of the coefficients of their product. + + See Also + -------- + polyadd, polysub, polymulx, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polymul(c1,c2) + array([ 3., 8., 14., 8., 3.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + ret = np.convolve(c1, c2) + return pu.trimseq(ret) + + +def polydiv(c1, c2): + """ + Divide one polynomial by another. + + Returns the quotient-with-remainder of two polynomials `c1` / `c2`. + The arguments are sequences of coefficients, from lowest order term + to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + [quo, rem] : ndarrays + Of coefficient series representing the quotient and remainder. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polydiv(c1,c2) + (array([3.]), array([-8., -4.])) + >>> P.polydiv(c2,c1) + (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + # note: this is more efficient than `pu._div(polymul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + dlen = lc1 - lc2 + scl = c2[-1] + c2 = c2[:-1]/scl + i = dlen + j = lc1 - 1 + while i >= 0: + c1[i:j] -= c2*c1[j] + i -= 1 + j -= 1 + return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) + + +def polypow(c, pow, maxpower=None): + """Raise a polynomial to a power. + + Returns the polynomial `c` raised to the power `pow`. The argument + `c` is a sequence of coefficients ordered from low to high. i.e., + [1,2,3] is the series ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c : array_like + 1-D array of array of series coefficients ordered from low to + high degree. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Power series of power. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polydiv + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polypow([1,2,3], 2) + array([ 1., 4., 10., 12., 9.]) + + """ + # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it + # avoids calling `as_series` repeatedly + return pu._pow(np.convolve, c, pow, maxpower) + + +def polyder(c, m=1, scl=1, axis=0): + """ + Differentiate a polynomial. + + Returns the polynomial coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The + argument `c` is an array of coefficients from low to high degree along + each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` + while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is + ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of polynomial coefficients. If c is multidimensional the + different axis correspond to different variables with the degree + in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change + of variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Polynomial coefficients of the derivative. + + See Also + -------- + polyint + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 + >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 + array([ 2., 6., 12.]) + >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24 + array([24.]) + >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2 + array([ -2., -6., -12.]) + >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x + array([ 6., 24.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + cdt = c.dtype + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=cdt) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a polynomial. + + Returns the polynomial coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients, from low to high degree along each axis, e.g., [1,2,3] + represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] + represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients, ordered from low to high. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + polyder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. Why + is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1,2,3) + >>> P.polyint(c) # should return array([0, 1, 1, 1]) + array([0., 1., 1., 1.]) + >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) + array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary + 0.05 ]) + >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1]) + array([3., 1., 1., 1.]) + >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) + array([6., 1., 1., 1.]) + >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) + array([ 0., -2., -2., -2.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype doesn't preserve mask attribute. + c = c + 0.0 + cdt = c.dtype + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + k = list(k) + [0]*(cnt - len(k)) + c = np.moveaxis(c, iaxis, 0) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - polyval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyval(x, c, tensor=True): + """ + Evaluate a polynomial at points x. + + If `c` is of length `n + 1`, this function returns the value + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyval2d, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + The evaluation uses Horner's method. + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyval + >>> polyval(1, [1,2,3]) + 6.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyval(a, [1,2,3]) + array([[ 1., 6.], + [17., 34.]]) + >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients + >>> coef + array([[0, 1], + [2, 3]]) + >>> polyval([1,2], coef, tensor=True) + array([[2., 4.], + [4., 7.]]) + >>> polyval([1,2], coef, tensor=False) + array([2., 7.]) + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + c0 = c[-1] + x*0 + for i in range(2, len(c) + 1): + c0 = c[-i] + c0*x + return c0 + + +def polyvalfromroots(x, r, tensor=True): + """ + Evaluate a polynomial specified by its roots at points x. + + If `r` is of length `N`, this function returns the value + + .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `r`. + + If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r` + is multidimensional, then the shape of the result depends on the value of + `tensor`. If `tensor is ``True`` the shape will be r.shape[1:] + x.shape; + that is, each polynomial is evaluated at every value of `x`. If `tensor` is + ``False``, the shape will be r.shape[1:]; that is, each polynomial is + evaluated only for the corresponding broadcast value of `x`. Note that + scalars have shape (,). + + .. versionadded:: 1.12 + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `r`. + r : array_like + Array of roots. If `r` is multidimensional the first index is the + root index, while the remaining indices enumerate multiple + polynomials. For instance, in the two dimensional case the roots + of each polynomial may be thought of as stored in the columns of `r`. + tensor : boolean, optional + If True, the shape of the roots array is extended with ones on the + right, one for each dimension of `x`. Scalars have dimension 0 for this + action. The result is that every column of coefficients in `r` is + evaluated for every element of `x`. If False, `x` is broadcast over the + columns of `r` for the evaluation. This keyword is useful when `r` is + multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyroots, polyfromroots, polyval + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyvalfromroots + >>> polyvalfromroots(1, [1,2,3]) + 0.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyvalfromroots(a, [-1, 0, 1]) + array([[-0., 0.], + [ 6., 24.]]) + >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients + >>> r # each column of r defines one polynomial + array([[-2, -1], + [ 0, 1]]) + >>> b = [-2, 1] + >>> polyvalfromroots(b, r, tensor=True) + array([[-0., 3.], + [ 3., 0.]]) + >>> polyvalfromroots(b, r, tensor=False) + array([-0., 0.]) + """ + r = np.array(r, ndmin=1, copy=False) + if r.dtype.char in '?bBhHiIlLqQpP': + r = r.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray): + if tensor: + r = r.reshape(r.shape + (1,)*x.ndim) + elif x.ndim >= r.ndim: + raise ValueError("x.ndim must be < r.ndim when tensor == False") + return np.prod(x - r, axis=0) + + +def polyval2d(x, y, c): + """ + Evaluate a 2-D polynomial at points (x, y). + + This function returns the value + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in `c[i,j]`. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + polyval, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(polyval, c, x, y) + + +def polygrid2d(x, y, c): + """ + Evaluate a 2-D polynomial on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(polyval, c, x, y) + + +def polyval3d(x, y, z, c): + """ + Evaluate a 3-D polynomial at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(polyval, c, x, y, z) + + +def polygrid3d(x, y, z, c): + """ + Evaluate a 3-D polynomial on the Cartesian product of x, y and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(polyval, c, x, y, z) + + +def polyvander(x, deg): + """Vandermonde matrix of given degree. + + Returns the Vandermonde matrix of degree `deg` and sample points + `x`. The Vandermonde matrix is defined by + + .. math:: V[..., i] = x^i, + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the power of `x`. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and + ``polyval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of polynomials of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray. + The Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where the last index is the power of `x`. + The dtype will be the same as the converted `x`. + + See Also + -------- + polyvander2d, polyvander3d + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x + return np.moveaxis(v, 0, -1) + + +def polyvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the powers of + `x` and `y`. + + If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + """ + return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg) + + +def polyvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the powers of `x`, `y`, and `z`. + + If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least-squares fit of a polynomial to data. + + Return the coefficients of a polynomial of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (`M`,) + x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. + y : array_like, shape (`M`,) or (`M`, `K`) + y-coordinates of the sample points. Several sets of sample points + sharing the same x-coordinates can be (independently) fit with one + call to `polyfit` by passing in for `y` a 2-D array that contains + one data set per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than `rcond`, relative to the largest singular value, will be + ignored. The default value is ``len(x)*eps``, where `eps` is the + relative precision of the platform's float type, about 2e-16 in + most cases. + full : bool, optional + Switch determining the nature of the return value. When ``False`` + (the default) just the coefficients are returned; when ``True``, + diagnostic information from the singular value decomposition (used + to solve the fit's matrix equation) is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the contribution of each point + ``(x[i],y[i])`` to the fit is weighted by ``w[i]``. Ideally the + weights are chosen so that the errors of the products ``w[i]*y[i]`` + all have the same variance. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) + Polynomial coefficients ordered from low to high. If `y` was 2-D, + the coefficients in column `k` of `coef` represent the polynomial + fit to the data in `y`'s `k`-th column. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if `full` = True + + resid -- sum of squared residuals of the least squares fit + rank -- the numerical rank of the scaled Vandermonde matrix + sv -- singular values of the scaled Vandermonde matrix + rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Raises + ------ + RankWarning + Raised if the matrix in the least-squares fit is rank deficient. + The warning is only raised if `full` == False. The warnings can + be turned off by: + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + polyval : Evaluates a polynomial. + polyvander : Vandermonde matrix for powers. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the polynomial `p` that minimizes + the sum of the weighted squared errors + + .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) over-determined matrix equation: + + .. math :: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected (and `full` == ``False``), a `RankWarning` will be raised. + This means that the coefficient values may be poorly determined. + Fitting to a lower order polynomial will usually get rid of the warning + (but may not be what you want, of course; if you have independent + reason(s) for choosing the degree which isn't working, you may have to: + a) reconsider those reasons, and/or b) reconsider the quality of your + data). The `rcond` parameter can also be set to a value smaller than + its default, but the resulting fit may be spurious and have large + contributions from roundoff error. + + Polynomial fits using double precision tend to "fail" at about + (polynomial) degree 20. Fits using Chebyshev or Legendre series are + generally better conditioned, but much can still depend on the + distribution of the sample points and the smoothness of the data. If + the quality of the fit is inadequate, splines may be a good + alternative. + + Examples + -------- + >>> np.random.seed(123) + >>> from numpy.polynomial import polynomial as P + >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] + >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> np.random.seed(123) + >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 + array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary + >>> stats # note the large SSR, explaining the rather poor results + [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary + 0.28853036]), 1.1324274851176597e-014] + + Same thing without the added noise + + >>> y = x**3 - x + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 + array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00]) + >>> stats # note the minuscule SSR + [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary + 0.50443316, 0.28853036]), 1.1324274851176597e-014] + + """ + return pu._fit(polyvander, x, y, deg, rcond, full, w) + + +def polycompanion(c): + """ + Return the companion matrix of c. + + The companion matrix for power series cannot be made symmetric by + scaling the basis, so this function differs from those for the + orthogonal polynomials. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + bot = mat.reshape(-1)[n::n+1] + bot[...] = 1 + mat[:, -1] -= c[:-1]/c[-1] + return mat + + +def polyroots(c): + """ + Compute the roots of a polynomial. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * x^i. + + Parameters + ---------- + c : 1-D array_like + 1-D array of polynomial coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the polynomial. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the power series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + Examples + -------- + >>> import numpy.polynomial.polynomial as poly + >>> poly.polyroots(poly.polyfromroots((-1,0,1))) + array([-1., 0., 1.]) + >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype + dtype('float64') + >>> j = complex(0,1) + >>> poly.polyroots(poly.polyfromroots((-j,0,j))) + array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = polycompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +# +# polynomial class +# + +class Polynomial(ABCPolyBase): + """A power series class. + + The Polynomial class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Polynomial coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + + """ + # Virtual Functions + _add = staticmethod(polyadd) + _sub = staticmethod(polysub) + _mul = staticmethod(polymul) + _div = staticmethod(polydiv) + _pow = staticmethod(polypow) + _val = staticmethod(polyval) + _int = staticmethod(polyint) + _der = staticmethod(polyder) + _fit = staticmethod(polyfit) + _line = staticmethod(polyline) + _roots = staticmethod(polyroots) + _fromroots = staticmethod(polyfromroots) + + # Virtual properties + domain = np.array(polydomain) + window = np.array(polydomain) + basis_name = None + + @classmethod + def _str_term_unicode(cls, i, arg_str): + return f"·{arg_str}{i.translate(cls._superscript_mapping)}" + + @staticmethod + def _str_term_ascii(i, arg_str): + return f" {arg_str}**{i}" + + @staticmethod + def _repr_latex_term(i, arg_str, needs_parens): + if needs_parens: + arg_str = rf"\left({arg_str}\right)" + if i == 0: + return '1' + elif i == 1: + return arg_str + else: + return f"{arg_str}^{{{i}}}" diff --git a/MLPY/Lib/site-packages/numpy/polynomial/polynomial.pyi b/MLPY/Lib/site-packages/numpy/polynomial/polynomial.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ee9a3cf6d3bc214093b652b49a35dca5881ea6be --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/polynomial.pyi @@ -0,0 +1,41 @@ +from typing import Any, List + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +polytrim = trimcoef + +polydomain: ndarray[Any, dtype[int_]] +polyzero: ndarray[Any, dtype[int_]] +polyone: ndarray[Any, dtype[int_]] +polyx: ndarray[Any, dtype[int_]] + +def polyline(off, scl): ... +def polyfromroots(roots): ... +def polyadd(c1, c2): ... +def polysub(c1, c2): ... +def polymulx(c): ... +def polymul(c1, c2): ... +def polydiv(c1, c2): ... +def polypow(c, pow, maxpower=...): ... +def polyder(c, m=..., scl=..., axis=...): ... +def polyint(c, m=..., k=..., lbnd=..., scl=..., axis=...): ... +def polyval(x, c, tensor=...): ... +def polyvalfromroots(x, r, tensor=...): ... +def polyval2d(x, y, c): ... +def polygrid2d(x, y, c): ... +def polyval3d(x, y, z, c): ... +def polygrid3d(x, y, z, c): ... +def polyvander(x, deg): ... +def polyvander2d(x, y, deg): ... +def polyvander3d(x, y, z, deg): ... +def polyfit(x, y, deg, rcond=..., full=..., w=...): ... +def polyroots(c): ... + +class Polynomial(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/MLPY/Lib/site-packages/numpy/polynomial/polyutils.py b/MLPY/Lib/site-packages/numpy/polynomial/polyutils.py new file mode 100644 index 0000000000000000000000000000000000000000..66bb4c26a1d3a3f81482430211c4f81f09a011c0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/polyutils.py @@ -0,0 +1,750 @@ +""" +Utility classes and functions for the polynomial modules. + +This module provides: error and warning objects; a polynomial base class; +and some routines used in both the `polynomial` and `chebyshev` modules. + +Warning objects +--------------- + +.. autosummary:: + :toctree: generated/ + + RankWarning raised in least-squares fit for rank-deficient matrix. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + as_series convert list of array_likes into 1-D arrays of common type. + trimseq remove trailing zeros. + trimcoef remove small trailing coefficients. + getdomain return the domain appropriate for a given set of abscissae. + mapdomain maps points between domains. + mapparms parameters of the linear map between domains. + +""" +import operator +import functools +import warnings + +import numpy as np + +__all__ = [ + 'RankWarning', 'as_series', 'trimseq', + 'trimcoef', 'getdomain', 'mapdomain', 'mapparms'] + +# +# Warnings and Exceptions +# + +class RankWarning(UserWarning): + """Issued by chebfit when the design matrix is rank deficient.""" + pass + +# +# Helper functions to convert inputs to 1-D arrays +# +def trimseq(seq): + """Remove small Poly series coefficients. + + Parameters + ---------- + seq : sequence + Sequence of Poly series coefficients. This routine fails for + empty sequences. + + Returns + ------- + series : sequence + Subsequence with trailing zeros removed. If the resulting sequence + would be empty, return the first element. The returned sequence may + or may not be a view. + + Notes + ----- + Do not lose the type info if the sequence contains unknown objects. + + """ + if len(seq) == 0: + return seq + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: + break + return seq[:i+1] + + +def as_series(alist, trim=True): + """ + Return argument as a list of 1-d arrays. + + The returned list contains array(s) of dtype double, complex double, or + object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of + size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays + of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array + raises a Value Error if it is not first reshaped into either a 1-d or 2-d + array. + + Parameters + ---------- + alist : array_like + A 1- or 2-d array_like + trim : boolean, optional + When True, trailing zeros are removed from the inputs. + When False, the inputs are passed through intact. + + Returns + ------- + [a1, a2,...] : list of 1-D arrays + A copy of the input data as a list of 1-d arrays. + + Raises + ------ + ValueError + Raised when `as_series` cannot convert its input to 1-d arrays, or at + least one of the resulting arrays is empty. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> a = np.arange(4) + >>> pu.as_series(a) + [array([0.]), array([1.]), array([2.]), array([3.])] + >>> b = np.arange(6).reshape((2,3)) + >>> pu.as_series(b) + [array([0., 1., 2.]), array([3., 4., 5.])] + + >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) + [array([1.]), array([0., 1., 2.]), array([0., 1.])] + + >>> pu.as_series([2, [1.1, 0.]]) + [array([2.]), array([1.1])] + + >>> pu.as_series([2, [1.1, 0.]], trim=False) + [array([2.]), array([1.1, 0. ])] + + """ + arrays = [np.array(a, ndmin=1, copy=False) for a in alist] + if min([a.size for a in arrays]) == 0: + raise ValueError("Coefficient array is empty") + if any(a.ndim != 1 for a in arrays): + raise ValueError("Coefficient array is not 1-d") + if trim: + arrays = [trimseq(a) for a in arrays] + + if any(a.dtype == np.dtype(object) for a in arrays): + ret = [] + for a in arrays: + if a.dtype != np.dtype(object): + tmp = np.empty(len(a), dtype=np.dtype(object)) + tmp[:] = a[:] + ret.append(tmp) + else: + ret.append(a.copy()) + else: + try: + dtype = np.common_type(*arrays) + except Exception as e: + raise ValueError("Coefficient arrays have no common type") from e + ret = [np.array(a, copy=True, dtype=dtype) for a in arrays] + return ret + + +def trimcoef(c, tol=0): + """ + Remove "small" "trailing" coefficients from a polynomial. + + "Small" means "small in absolute value" and is controlled by the + parameter `tol`; "trailing" means highest order coefficient(s), e.g., in + ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) + both the 3-rd and 4-th order coefficients would be "trimmed." + + Parameters + ---------- + c : array_like + 1-d array of coefficients, ordered from lowest order to highest. + tol : number, optional + Trailing (i.e., highest order) elements with absolute value less + than or equal to `tol` (default value is zero) are removed. + + Returns + ------- + trimmed : ndarray + 1-d array with trailing zeros removed. If the resulting series + would be empty, a series containing a single zero is returned. + + Raises + ------ + ValueError + If `tol` < 0 + + See Also + -------- + trimseq + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.trimcoef((0,0,3,0,5,0,0)) + array([0., 0., 3., 0., 5.]) + >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed + array([0.]) + >>> i = complex(0,1) # works for complex + >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) + array([0.0003+0.j , 0.001 -0.001j]) + + """ + if tol < 0: + raise ValueError("tol must be non-negative") + + [c] = as_series([c]) + [ind] = np.nonzero(np.abs(c) > tol) + if len(ind) == 0: + return c[:1]*0 + else: + return c[:ind[-1] + 1].copy() + +def getdomain(x): + """ + Return a domain suitable for given abscissae. + + Find a domain suitable for a polynomial or Chebyshev series + defined at the values supplied. + + Parameters + ---------- + x : array_like + 1-d array of abscissae whose domain will be determined. + + Returns + ------- + domain : ndarray + 1-d array containing two values. If the inputs are complex, then + the two returned points are the lower left and upper right corners + of the smallest rectangle (aligned with the axes) in the complex + plane containing the points `x`. If the inputs are real, then the + two points are the ends of the smallest interval containing the + points `x`. + + See Also + -------- + mapparms, mapdomain + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> points = np.arange(4)**2 - 5; points + array([-5, -4, -1, 4]) + >>> pu.getdomain(points) + array([-5., 4.]) + >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle + >>> pu.getdomain(c) + array([-1.-1.j, 1.+1.j]) + + """ + [x] = as_series([x], trim=False) + if x.dtype.char in np.typecodes['Complex']: + rmin, rmax = x.real.min(), x.real.max() + imin, imax = x.imag.min(), x.imag.max() + return np.array((complex(rmin, imin), complex(rmax, imax))) + else: + return np.array((x.min(), x.max())) + +def mapparms(old, new): + """ + Linear map parameters between domains. + + Return the parameters of the linear map ``offset + scale*x`` that maps + `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. + + Parameters + ---------- + old, new : array_like + Domains. Each domain must (successfully) convert to a 1-d array + containing precisely two values. + + Returns + ------- + offset, scale : scalars + The map ``L(x) = offset + scale*x`` maps the first domain to the + second. + + See Also + -------- + getdomain, mapdomain + + Notes + ----- + Also works for complex numbers, and thus can be used to calculate the + parameters required to map any line in the complex plane to any other + line therein. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.mapparms((-1,1),(-1,1)) + (0.0, 1.0) + >>> pu.mapparms((1,-1),(-1,1)) + (-0.0, -1.0) + >>> i = complex(0,1) + >>> pu.mapparms((-i,-1),(1,i)) + ((1+1j), (1-0j)) + + """ + oldlen = old[1] - old[0] + newlen = new[1] - new[0] + off = (old[1]*new[0] - old[0]*new[1])/oldlen + scl = newlen/oldlen + return off, scl + +def mapdomain(x, old, new): + """ + Apply linear map to input points. + + The linear map ``offset + scale*x`` that maps the domain `old` to + the domain `new` is applied to the points `x`. + + Parameters + ---------- + x : array_like + Points to be mapped. If `x` is a subtype of ndarray the subtype + will be preserved. + old, new : array_like + The two domains that determine the map. Each must (successfully) + convert to 1-d arrays containing precisely two values. + + Returns + ------- + x_out : ndarray + Array of points of the same shape as `x`, after application of the + linear map between the two domains. + + See Also + -------- + getdomain, mapparms + + Notes + ----- + Effectively, this implements: + + .. math :: + x\\_out = new[0] + m(x - old[0]) + + where + + .. math :: + m = \\frac{new[1]-new[0]}{old[1]-old[0]} + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> old_domain = (-1,1) + >>> new_domain = (0,2*np.pi) + >>> x = np.linspace(-1,1,6); x + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) + >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out + array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary + 6.28318531]) + >>> x - pu.mapdomain(x_out, new_domain, old_domain) + array([0., 0., 0., 0., 0., 0.]) + + Also works for complex numbers (and thus can be used to map any line in + the complex plane to any other line therein). + + >>> i = complex(0,1) + >>> old = (-1 - i, 1 + i) + >>> new = (-1 + i, 1 - i) + >>> z = np.linspace(old[0], old[1], 6); z + array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) + >>> new_z = pu.mapdomain(z, old, new); new_z + array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary + + """ + x = np.asanyarray(x) + off, scl = mapparms(old, new) + return off + scl*x + + +def _nth_slice(i, ndim): + sl = [np.newaxis] * ndim + sl[i] = slice(None) + return tuple(sl) + + +def _vander_nd(vander_fs, points, degrees): + r""" + A generalization of the Vandermonde matrix for N dimensions + + The result is built by combining the results of 1d Vandermonde matrices, + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]} + + where + + .. math:: + N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\ + M &= \texttt{points[k].ndim} \\ + V_k &= \texttt{vander\_fs[k]} \\ + x_k &= \texttt{points[k]} \\ + 0 \le j_k &\le \texttt{degrees[k]} + + Expanding the one-dimensional :math:`V_k` functions gives: + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])} + + where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along + dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`. + + Parameters + ---------- + vander_fs : Sequence[function(array_like, int) -> ndarray] + The 1d vander function to use for each axis, such as ``polyvander`` + points : Sequence[array_like] + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + This must be the same length as `vander_fs`. + degrees : Sequence[int] + The maximum degree (inclusive) to use for each axis. + This must be the same length as `vander_fs`. + + Returns + ------- + vander_nd : ndarray + An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``. + """ + n_dims = len(vander_fs) + if n_dims != len(points): + raise ValueError( + f"Expected {n_dims} dimensions of sample points, got {len(points)}") + if n_dims != len(degrees): + raise ValueError( + f"Expected {n_dims} dimensions of degrees, got {len(degrees)}") + if n_dims == 0: + raise ValueError("Unable to guess a dtype or shape when no points are given") + + # convert to the same shape and type + points = tuple(np.array(tuple(points), copy=False) + 0.0) + + # produce the vandermonde matrix for each dimension, placing the last + # axis of each in an independent trailing axis of the output + vander_arrays = ( + vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)] + for i in range(n_dims) + ) + + # we checked this wasn't empty already, so no `initial` needed + return functools.reduce(operator.mul, vander_arrays) + + +def _vander_nd_flat(vander_fs, points, degrees): + """ + Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis + + Used to implement the public ``vanderd`` functions. + """ + v = _vander_nd(vander_fs, points, degrees) + return v.reshape(v.shape[:-len(degrees)] + (-1,)) + + +def _fromroots(line_f, mul_f, roots): + """ + Helper function used to implement the ``fromroots`` functions. + + Parameters + ---------- + line_f : function(float, float) -> ndarray + The ``line`` function, such as ``polyline`` + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + roots + See the ``fromroots`` functions for more detail + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = as_series([roots], trim=False) + roots.sort() + p = [line_f(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [mul_f(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = mul_f(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def _valnd(val_f, c, *args): + """ + Helper function used to implement the ``vald`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``vald`` functions for more detail + """ + args = [np.asanyarray(a) for a in args] + shape0 = args[0].shape + if not all((a.shape == shape0 for a in args[1:])): + if len(args) == 3: + raise ValueError('x, y, z are incompatible') + elif len(args) == 2: + raise ValueError('x, y are incompatible') + else: + raise ValueError('ordinates are incompatible') + it = iter(args) + x0 = next(it) + + # use tensor on only the first + c = val_f(x0, c) + for xi in it: + c = val_f(xi, c, tensor=False) + return c + + +def _gridnd(val_f, c, *args): + """ + Helper function used to implement the ``gridd`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``gridd`` functions for more detail + """ + for xi in args: + c = val_f(xi, c) + return c + + +def _div(mul_f, c1, c2): + """ + Helper function used to implement the ``div`` functions. + + Implementation uses repeated subtraction of c2 multiplied by the nth basis. + For some polynomial types, a more efficient approach may be possible. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> array_like + The ``mul`` function, such as ``polymul`` + c1, c2 + See the ``div`` functions for more detail + """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = mul_f([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, trimseq(rem) + + +def _add(c1, c2): + """ Helper function used to implement the ``add`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _sub(c1, c2): + """ Helper function used to implement the ``sub`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): + """ + Helper function used to implement the ``fit`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + c1, c2 + See the ``fit`` functions for more detail + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = vander_f(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = vander_f(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def _pow(mul_f, c, pow, maxpower): + """ + Helper function used to implement the ``pow`` functions. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + c : array_like + 1-D array of array of series coefficients + pow, maxpower + See the ``pow`` functions for more detail + """ + # c is a trimmed copy + [c] = as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = mul_f(prd, c) + return prd + + +def _deprecate_as_int(x, desc): + """ + Like `operator.index`, but emits a deprecation warning when passed a float + + Parameters + ---------- + x : int-like, or float with integral value + Value to interpret as an integer + desc : str + description to include in any error message + + Raises + ------ + TypeError : if x is a non-integral float or non-numeric + DeprecationWarning : if x is an integral float + """ + try: + return operator.index(x) + except TypeError as e: + # Numpy 1.17.0, 2019-03-11 + try: + ix = int(x) + except TypeError: + pass + else: + if ix == x: + warnings.warn( + f"In future, this will raise TypeError, as {desc} will " + "need to be an integer not just an integral float.", + DeprecationWarning, + stacklevel=3 + ) + return ix + + raise TypeError(f"{desc} must be an integer") from e diff --git a/MLPY/Lib/site-packages/numpy/polynomial/polyutils.pyi b/MLPY/Lib/site-packages/numpy/polynomial/polyutils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ce68d7a378ac47a66054c118fcd3239989e97b86 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/polyutils.pyi @@ -0,0 +1,12 @@ +from typing import List + +__all__: List[str] + +class RankWarning(UserWarning): ... + +def trimseq(seq): ... +def as_series(alist, trim=...): ... +def trimcoef(c, tol=...): ... +def getdomain(x): ... +def mapparms(old, new): ... +def mapdomain(x, old, new): ... diff --git a/MLPY/Lib/site-packages/numpy/polynomial/setup.py b/MLPY/Lib/site-packages/numpy/polynomial/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..72fecde1448f2f6a36147b3ff23d00edf75326cb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/setup.py @@ -0,0 +1,10 @@ +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('polynomial', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_files('*.pyi') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__init__.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d942e9d777a2f21039fbbb79bd5928924a241d84 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee28ede841ae5438297f96639dfa035a9be18f4f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8f2590f39ceb7420ef8fff76cb04cadfc508a09 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_classes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8da6d136c82e62ef0c42c35affbf845494421b2d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fea18b86fa56a032885ec22c637aa2d7464f90da Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c300cf65bbc026a066278141dc8f68fe806ad77c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97f7778b020b0763314d1a30e06958e987b5d10c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_legendre.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57918adcf3267c8f37cb3964c710a2812284aab4 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dba901deb6df723f1bb4713284236cfec238dedd Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01ebbf10374dfac0937ed04104b64d956e391939 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/polynomial/tests/__pycache__/test_printing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/test_chebyshev.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_chebyshev.py new file mode 100644 index 0000000000000000000000000000000000000000..a3f8b96f83360e101f61912832e4b2cda8b0a320 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_chebyshev.py @@ -0,0 +1,619 @@ +"""Tests for chebyshev module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.chebyshev as cheb +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + + +def trim(x): + return cheb.chebtrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestPrivate: + + def test__cseries_to_zseries(self): + for i in range(5): + inp = np.array([2] + [1]*i, np.double) + tgt = np.array([.5]*i + [2] + [.5]*i, np.double) + res = cheb._cseries_to_zseries(inp) + assert_equal(res, tgt) + + def test__zseries_to_cseries(self): + for i in range(5): + inp = np.array([.5]*i + [2] + [.5]*i, np.double) + tgt = np.array([2] + [1]*i, np.double) + res = cheb._zseries_to_cseries(inp) + assert_equal(res, tgt) + + +class TestConstants: + + def test_chebdomain(self): + assert_equal(cheb.chebdomain, [-1, 1]) + + def test_chebzero(self): + assert_equal(cheb.chebzero, [0]) + + def test_chebone(self): + assert_equal(cheb.chebone, [1]) + + def test_chebx(self): + assert_equal(cheb.chebx, [0, 1]) + + +class TestArithmetic: + + def test_chebadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = cheb.chebadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = cheb.chebsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebmulx(self): + assert_equal(cheb.chebmulx([0]), [0]) + assert_equal(cheb.chebmulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [.5, 0, .5] + assert_equal(cheb.chebmulx(ser), tgt) + + def test_chebmul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += .5 + tgt[abs(i - j)] += .5 + res = cheb.chebmul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = cheb.chebadd(ci, cj) + quo, rem = cheb.chebdiv(tgt, ci) + res = cheb.chebadd(cheb.chebmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(cheb.chebmul, [c]*j, np.array([1])) + res = cheb.chebpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 2., 1.5]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_chebval(self): + #check empty input + assert_equal(cheb.chebval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Tlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = cheb.chebval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(cheb.chebval(x, [1]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) + + def test_chebval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = cheb.chebval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_chebval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = cheb.chebval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_chebgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = cheb.chebgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_chebgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = cheb.chebgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_chebint(self): + # check exceptions + assert_raises(TypeError, cheb.chebint, [0], .5) + assert_raises(ValueError, cheb.chebint, [0], -1) + assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) + assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) + assert_raises(ValueError, cheb.chebint, [0], scl=[0]) + assert_raises(TypeError, cheb.chebint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = cheb.chebint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i]) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(cheb.chebval(-1, chebint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1) + res = cheb.chebint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k]) + res = cheb.chebint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) + res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) + res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T + res = cheb.chebint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c) for c in c2d]) + res = cheb.chebint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) + res = cheb.chebint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_chebder(self): + # check exceptions + assert_raises(TypeError, cheb.chebder, [0], .5) + assert_raises(ValueError, cheb.chebder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = cheb.chebder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T + res = cheb.chebder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebder(c) for c in c2d]) + res = cheb.chebder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_chebvander(self): + # check for 1d x + x = np.arange(3) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + def test_chebvander2d(self): + # also tests chebval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = cheb.chebvander2d(x1, x2, [1, 2]) + tgt = cheb.chebval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_chebvander3d(self): + # also tests chebval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) + tgt = cheb.chebval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_chebfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, cheb.chebfit, [1], [1], -1) + assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) + assert_raises(TypeError, cheb.chebfit, [], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) + assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, cheb.chebfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = cheb.chebfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + # + coef4 = cheb.chebfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # + coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = cheb.chebfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) + assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) + # test fitting only even polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = cheb.chebfit(x, y, 4) + assert_almost_equal(cheb.chebval(x, coef1), y) + coef2 = cheb.chebfit(x, y, [0, 2, 4]) + assert_almost_equal(cheb.chebval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) + assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(-1, 1, 10) + for deg in range(0, 10): + for p in range(0, deg + 1): + c = cheb.chebinterpolate(powx, deg, (p,)) + assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, cheb.chebcompanion, []) + assert_raises(ValueError, cheb.chebcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(cheb.chebcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = cheb.chebgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = cheb.chebvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.pi + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_chebfromroots(self): + res = cheb.chebfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = [0]*i + [1] + res = cheb.chebfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebroots(self): + assert_almost_equal(cheb.chebroots([1]), []) + assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = cheb.chebroots(cheb.chebfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, cheb.chebtrim, coef, -1) + + # Test results + assert_equal(cheb.chebtrim(coef), coef[:-1]) + assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) + assert_equal(cheb.chebtrim(coef, 2), [0]) + + def test_chebline(self): + assert_equal(cheb.chebline(3, 4), [3, 4]) + + def test_cheb2poly(self): + for i in range(10): + assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) + + def test_poly2cheb(self): + for i in range(10): + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11)[1:-1] + tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) + res = cheb.chebweight(x) + assert_almost_equal(res, tgt) + + def test_chebpts1(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts1, 1.5) + assert_raises(ValueError, cheb.chebpts1, 0) + + #test points + tgt = [0] + assert_almost_equal(cheb.chebpts1(1), tgt) + tgt = [-0.70710678118654746, 0.70710678118654746] + assert_almost_equal(cheb.chebpts1(2), tgt) + tgt = [-0.86602540378443871, 0, 0.86602540378443871] + assert_almost_equal(cheb.chebpts1(3), tgt) + tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] + assert_almost_equal(cheb.chebpts1(4), tgt) + + def test_chebpts2(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts2, 1.5) + assert_raises(ValueError, cheb.chebpts2, 1) + + #test points + tgt = [-1, 1] + assert_almost_equal(cheb.chebpts2(2), tgt) + tgt = [-1, 0, 1] + assert_almost_equal(cheb.chebpts2(3), tgt) + tgt = [-1, -0.5, .5, 1] + assert_almost_equal(cheb.chebpts2(4), tgt) + tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] + assert_almost_equal(cheb.chebpts2(5), tgt) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/test_classes.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..e182a0baba69d6988fcfc98209e2a2cae12cc35c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_classes.py @@ -0,0 +1,600 @@ +"""Test inter-conversion of different polynomial classes. + +This tests the convert and cast methods of all the polynomial classes. + +""" +import operator as op +from numbers import Number + +import pytest +import numpy as np +from numpy.polynomial import ( + Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) +from numpy.polynomial.polyutils import RankWarning + +# +# fixtures +# + +classes = ( + Polynomial, Legendre, Chebyshev, Laguerre, + Hermite, HermiteE + ) +classids = tuple(cls.__name__ for cls in classes) + +@pytest.fixture(params=classes, ids=classids) +def Poly(request): + return request.param + +# +# helper functions +# +random = np.random.random + + +def assert_poly_almost_equal(p1, p2, msg=""): + try: + assert_(np.all(p1.domain == p2.domain)) + assert_(np.all(p1.window == p2.window)) + assert_almost_equal(p1.coef, p2.coef) + except AssertionError: + msg = f"Result: {p1}\nTarget: {p2}" + raise AssertionError(msg) + + +# +# Test conversion methods that depend on combinations of two classes. +# + +Poly1 = Poly +Poly2 = Poly + + +def test_conversion(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = p1.convert(kind=Poly2, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +def test_cast(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = Poly2.cast(p1, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +# +# test methods that depend on one class +# + + +def test_identity(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + x = np.linspace(d[0], d[1], 11) + p = Poly.identity(domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_almost_equal(p(x), x) + + +def test_basis(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.basis(5, domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_equal(p.coef, [0]*5 + [1]) + + +def test_fromroots(Poly): + # check that requested roots are zeros of a polynomial + # of correct degree, domain, and window. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + r = random((5,)) + p1 = Poly.fromroots(r, domain=d, window=w) + assert_equal(p1.degree(), len(r)) + assert_equal(p1.domain, d) + assert_equal(p1.window, w) + assert_almost_equal(p1(r), 0) + + # check that polynomial is monic + pdom = Polynomial.domain + pwin = Polynomial.window + p2 = Polynomial.cast(p1, domain=pdom, window=pwin) + assert_almost_equal(p2.coef[-1], 1) + + +def test_bad_conditioned_fit(Poly): + + x = [0., 0., 1.] + y = [1., 2., 3.] + + # check RankWarning is raised + with pytest.warns(RankWarning) as record: + Poly.fit(x, y, 2) + assert record[0].message.args[0] == "The fit may be poorly conditioned" + + +def test_fit(Poly): + + def f(x): + return x*(x - 1)*(x - 2) + x = np.linspace(0, 3) + y = f(x) + + # check default value of domain and window + p = Poly.fit(x, y, 3) + assert_almost_equal(p.domain, [0, 3]) + assert_almost_equal(p(x), y) + assert_equal(p.degree(), 3) + + # check with given domains and window + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.fit(x, y, 3, domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + + # check with class domain default + p = Poly.fit(x, y, 3, []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + p = Poly.fit(x, y, [0, 1, 2, 3], []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + + # check that fit accepts weights. + w = np.zeros_like(x) + z = y + random(y.shape)*.25 + w[::2] = 1 + p1 = Poly.fit(x[::2], z[::2], 3) + p2 = Poly.fit(x, z, 3, w=w) + p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) + assert_almost_equal(p1(x), p2(x)) + assert_almost_equal(p2(x), p3(x)) + + +def test_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(p1 == p1) + assert_(not p1 == p2) + assert_(not p1 == p3) + assert_(not p1 == p4) + + +def test_not_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(not p1 != p1) + assert_(p1 != p2) + assert_(p1 != p3) + assert_(p1 != p4) + + +def test_add(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 + p2 + assert_poly_almost_equal(p2 + p1, p3) + assert_poly_almost_equal(p1 + c2, p3) + assert_poly_almost_equal(c2 + p1, p3) + assert_poly_almost_equal(p1 + tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) + p1, p3) + assert_poly_almost_equal(p1 + np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) + p1, p3) + assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.add, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.add, p1, Polynomial([0])) + + +def test_sub(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 - p2 + assert_poly_almost_equal(p2 - p1, -p3) + assert_poly_almost_equal(p1 - c2, p3) + assert_poly_almost_equal(c2 - p1, -p3) + assert_poly_almost_equal(p1 - tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) - p1, -p3) + assert_poly_almost_equal(p1 - np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) - p1, -p3) + assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.sub, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.sub, p1, Polynomial([0])) + + +def test_mul(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 * p2 + assert_poly_almost_equal(p2 * p1, p3) + assert_poly_almost_equal(p1 * c2, p3) + assert_poly_almost_equal(c2 * p1, p3) + assert_poly_almost_equal(p1 * tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) * p1, p3) + assert_poly_almost_equal(p1 * np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) * p1, p3) + assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) + assert_poly_almost_equal(2 * p1, p1 * Poly([2])) + assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mul, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mul, p1, Polynomial([0])) + + +def test_floordiv(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 // p2, p1) + assert_poly_almost_equal(p4 // c2, p1) + assert_poly_almost_equal(c4 // p2, p1) + assert_poly_almost_equal(p4 // tuple(c2), p1) + assert_poly_almost_equal(tuple(c4) // p2, p1) + assert_poly_almost_equal(p4 // np.array(c2), p1) + assert_poly_almost_equal(np.array(c4) // p2, p1) + assert_poly_almost_equal(2 // p2, Poly([0])) + assert_poly_almost_equal(p2 // 2, 0.5*p2) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) + + +def test_truediv(Poly): + # true division is valid only if the denominator is a Number and + # not a python bool. + p1 = Poly([1,2,3]) + p2 = p1 * 5 + + for stype in np.ScalarType: + if not issubclass(stype, Number) or issubclass(stype, bool): + continue + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in (int, float): + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in [complex]: + s = stype(5, 0) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for s in [tuple(), list(), dict(), bool(), np.array([1])]: + assert_raises(TypeError, op.truediv, p2, s) + assert_raises(TypeError, op.truediv, s, p2) + for ptype in classes: + assert_raises(TypeError, op.truediv, p2, ptype(1)) + + +def test_mod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 % p2, p3) + assert_poly_almost_equal(p4 % c2, p3) + assert_poly_almost_equal(c4 % p2, p3) + assert_poly_almost_equal(p4 % tuple(c2), p3) + assert_poly_almost_equal(tuple(c4) % p2, p3) + assert_poly_almost_equal(p4 % np.array(c2), p3) + assert_poly_almost_equal(np.array(c4) % p2, p3) + assert_poly_almost_equal(2 % p2, Poly([2])) + assert_poly_almost_equal(p2 % 2, Poly([0])) + assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mod, p1, Polynomial([0])) + + +def test_divmod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + quo, rem = divmod(p4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, c2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(c4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, tuple(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(tuple(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, np.array(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(np.array(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p2, 2) + assert_poly_almost_equal(quo, 0.5*p2) + assert_poly_almost_equal(rem, Poly([0])) + quo, rem = divmod(2, p2) + assert_poly_almost_equal(quo, Poly([0])) + assert_poly_almost_equal(rem, Poly([2])) + assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, divmod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, divmod, p1, Polynomial([0])) + + +def test_roots(Poly): + d = Poly.domain * 1.25 + .25 + w = Poly.window + tgt = np.linspace(d[0], d[1], 5) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window + res = np.sort(Poly.fromroots(tgt).roots()) + assert_almost_equal(res, tgt) + + +def test_degree(Poly): + p = Poly.basis(5) + assert_equal(p.degree(), 5) + + +def test_copy(Poly): + p1 = Poly.basis(5) + p2 = p1.copy() + assert_(p1 == p2) + assert_(p1 is not p2) + assert_(p1.coef is not p2.coef) + assert_(p1.domain is not p2.domain) + assert_(p1.window is not p2.window) + + +def test_integ(Poly): + P = Polynomial + # Check defaults + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + # Check with k + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(k=1)) + p2 = P.cast(p0.integ(2, k=[1, 1])) + assert_poly_almost_equal(p1, P([1, 2, 3, 4])) + assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) + # Check with lbnd + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(lbnd=1)) + p2 = P.cast(p0.integ(2, lbnd=1)) + assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) + assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) + # Check scaling + d = 2*Poly.domain + p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + + +def test_deriv(Poly): + # Check that the derivative is the inverse of integration. It is + # assumes that the integration has been checked elsewhere. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p1 = Poly([1, 2, 3], domain=d, window=w) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + + +def test_linspace(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly([1, 2, 3], domain=d, window=w) + # check default domain + xtgt = np.linspace(d[0], d[1], 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + # check specified domain + xtgt = np.linspace(0, 2, 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20, domain=[0, 2]) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + + +def test_pow(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # check error for invalid powers + assert_raises(ValueError, op.pow, tgt, 1.5) + assert_raises(ValueError, op.pow, tgt, -1) + + +def test_call(Poly): + P = Polynomial + d = Poly.domain + x = np.linspace(d[0], d[1], 11) + + # Check defaults + p = Poly.cast(P([1, 2, 3])) + tgt = 1 + x*(2 + 3*x) + res = p(x) + assert_almost_equal(res, tgt) + + +def test_cutdeg(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.cutdeg, .5) + assert_raises(ValueError, p.cutdeg, -1) + assert_equal(len(p.cutdeg(3)), 3) + assert_equal(len(p.cutdeg(2)), 3) + assert_equal(len(p.cutdeg(1)), 2) + assert_equal(len(p.cutdeg(0)), 1) + + +def test_truncate(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.truncate, .5) + assert_raises(ValueError, p.truncate, 0) + assert_equal(len(p.truncate(4)), 3) + assert_equal(len(p.truncate(3)), 3) + assert_equal(len(p.truncate(2)), 2) + assert_equal(len(p.truncate(1)), 1) + + +def test_trim(Poly): + c = [1, 1e-6, 1e-12, 0] + p = Poly(c) + assert_equal(p.trim().coef, c[:3]) + assert_equal(p.trim(1e-10).coef, c[:2]) + assert_equal(p.trim(1e-5).coef, c[:1]) + + +def test_mapparms(Poly): + # check with defaults. Should be identity. + d = Poly.domain + w = Poly.window + p = Poly([1], domain=d, window=w) + assert_almost_equal([0, 1], p.mapparms()) + # + w = 2*d + 1 + p = Poly([1], domain=d, window=w) + assert_almost_equal([1, 2], p.mapparms()) + + +def test_ufunc_override(Poly): + p = Poly([1, 2, 3]) + x = np.ones(3) + assert_raises(TypeError, np.add, p, x) + assert_raises(TypeError, np.add, x, p) + + +# +# Test class method that only exists for some classes +# + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) + assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(0, 2, 10) + for deg in range(0, 10): + for t in range(0, deg + 1): + p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) + assert_almost_equal(p(x), powx(x, t), decimal=12) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/test_hermite.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_hermite.py new file mode 100644 index 0000000000000000000000000000000000000000..c6cdc98a29b4b56a7d7d4a0b25f30fbf07221ab7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_hermite.py @@ -0,0 +1,555 @@ +"""Tests for hermite module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite as herm +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +H0 = np.array([1]) +H1 = np.array([0, 2]) +H2 = np.array([-2, 0, 4]) +H3 = np.array([0, -12, 0, 8]) +H4 = np.array([12, 0, -48, 0, 16]) +H5 = np.array([0, 120, 0, -160, 0, 32]) +H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) +H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) +H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) +H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) + +Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] + + +def trim(x): + return herm.hermtrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermdomain(self): + assert_equal(herm.hermdomain, [-1, 1]) + + def test_hermzero(self): + assert_equal(herm.hermzero, [0]) + + def test_hermone(self): + assert_equal(herm.hermone, [1]) + + def test_hermx(self): + assert_equal(herm.hermx, [0, .5]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herm.hermadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herm.hermsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermmulx(self): + assert_equal(herm.hermmulx([0]), [0]) + assert_equal(herm.hermmulx([1]), [0, .5]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, .5] + assert_equal(herm.hermmulx(ser), tgt) + + def test_hermmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herm.hermval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = herm.hermval(self.x, pol2) + pol3 = herm.hermmul(pol1, pol2) + val3 = herm.hermval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herm.hermadd(ci, cj) + quo, rem = herm.hermdiv(tgt, ci) + res = herm.hermadd(herm.hermmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herm.hermmul, [c]*j, np.array([1])) + res = herm.hermpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 1., .75]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermval(self): + #check empty input + assert_equal(herm.hermval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Hlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herm.hermval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herm.hermval(x, [1]).shape, dims) + assert_equal(herm.hermval(x, [1, 0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) + + def test_hermval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herm.hermval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herm.hermval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herm.hermgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herm.hermgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_hermint(self): + # check exceptions + assert_raises(TypeError, herm.hermint, [0], .5) + assert_raises(ValueError, herm.hermint, [0], -1) + assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) + assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) + assert_raises(ValueError, herm.hermint, [0], scl=[0]) + assert_raises(TypeError, herm.hermint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herm.hermint([0], m=i, k=k) + assert_almost_equal(res, [0, .5]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i]) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herm.hermval(-1, hermint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1) + res = herm.hermint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k]) + res = herm.hermint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) + res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], scl=2) + res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T + res = herm.hermint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c) for c in c2d]) + res = herm.hermint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) + res = herm.hermint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermder(self): + # check exceptions + assert_raises(TypeError, herm.hermder, [0], .5) + assert_raises(ValueError, herm.hermder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herm.hermder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T + res = herm.hermder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermder(c) for c in c2d]) + res = herm.hermder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermvander(self): + # check for 1d x + x = np.arange(3) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + def test_hermvander2d(self): + # also tests hermval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herm.hermvander2d(x1, x2, [1, 2]) + tgt = herm.hermval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermvander3d(self): + # also tests hermval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) + tgt = herm.hermval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herm.hermfit, [1], [1], -1) + assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) + assert_raises(TypeError, herm.hermfit, [], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) + assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) + assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herm.hermfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herm.hermfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + # + coef4 = herm.hermfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # + coef2d = herm.hermfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herm.hermfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) + assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herm.hermfit(x, y, 4) + assert_almost_equal(herm.hermval(x, coef1), y) + coef2 = herm.hermfit(x, y, [0, 2, 4]) + assert_almost_equal(herm.hermval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herm.hermcompanion, []) + assert_raises(ValueError, herm.hermcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herm.hermcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) + + +class TestGauss: + + def test_100(self): + x, w = herm.hermgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herm.hermvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermfromroots(self): + res = herm.hermfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herm.hermfromroots(roots) + res = herm.hermval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herm.herm2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermroots(self): + assert_almost_equal(herm.hermroots([1]), []) + assert_almost_equal(herm.hermroots([1, 1]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herm.hermroots(herm.hermfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herm.hermtrim, coef, -1) + + # Test results + assert_equal(herm.hermtrim(coef), coef[:-1]) + assert_equal(herm.hermtrim(coef, 1), coef[:-3]) + assert_equal(herm.hermtrim(coef, 2), [0]) + + def test_hermline(self): + assert_equal(herm.hermline(3, 4), [3, 2]) + + def test_herm2poly(self): + for i in range(10): + assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) + + def test_poly2herm(self): + for i in range(10): + assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-x**2) + res = herm.hermweight(x) + assert_almost_equal(res, tgt) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/test_hermite_e.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_hermite_e.py new file mode 100644 index 0000000000000000000000000000000000000000..5b92f011bbcccf9c71bc767954b2cfa136e7db3a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_hermite_e.py @@ -0,0 +1,556 @@ +"""Tests for hermite_e module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite_e as herme +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +He0 = np.array([1]) +He1 = np.array([0, 1]) +He2 = np.array([-1, 0, 1]) +He3 = np.array([0, -3, 0, 1]) +He4 = np.array([3, 0, -6, 0, 1]) +He5 = np.array([0, 15, 0, -10, 0, 1]) +He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) +He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) +He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) +He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) + +Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] + + +def trim(x): + return herme.hermetrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermedomain(self): + assert_equal(herme.hermedomain, [-1, 1]) + + def test_hermezero(self): + assert_equal(herme.hermezero, [0]) + + def test_hermeone(self): + assert_equal(herme.hermeone, [1]) + + def test_hermex(self): + assert_equal(herme.hermex, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermeadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herme.hermeadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermesub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herme.hermesub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermemulx(self): + assert_equal(herme.hermemulx([0]), [0]) + assert_equal(herme.hermemulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, 1] + assert_equal(herme.hermemulx(ser), tgt) + + def test_hermemul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herme.hermeval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = herme.hermeval(self.x, pol2) + pol3 = herme.hermemul(pol1, pol2) + val3 = herme.hermeval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermediv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herme.hermeadd(ci, cj) + quo, rem = herme.hermediv(tgt, ci) + res = herme.hermeadd(herme.hermemul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermepow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herme.hermemul, [c]*j, np.array([1])) + res = herme.hermepow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([4., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermeval(self): + #check empty input + assert_equal(herme.hermeval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Helist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herme.hermeval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herme.hermeval(x, [1]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) + + def test_hermeval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herme.hermeval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermeval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herme.hermeval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermegrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herme.hermegrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermegrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herme.hermegrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_hermeint(self): + # check exceptions + assert_raises(TypeError, herme.hermeint, [0], .5) + assert_raises(ValueError, herme.hermeint, [0], -1) + assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) + assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) + assert_raises(ValueError, herme.hermeint, [0], scl=[0]) + assert_raises(TypeError, herme.hermeint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herme.hermeint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i]) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herme.hermeval(-1, hermeint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1) + res = herme.hermeint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k]) + res = herme.hermeint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) + res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) + res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T + res = herme.hermeint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c) for c in c2d]) + res = herme.hermeint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) + res = herme.hermeint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermeder(self): + # check exceptions + assert_raises(TypeError, herme.hermeder, [0], .5) + assert_raises(ValueError, herme.hermeder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herme.hermeder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder( + herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T + res = herme.hermeder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeder(c) for c in c2d]) + res = herme.hermeder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermevander(self): + # check for 1d x + x = np.arange(3) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + def test_hermevander2d(self): + # also tests hermeval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herme.hermevander2d(x1, x2, [1, 2]) + tgt = herme.hermeval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermevander3d(self): + # also tests hermeval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) + tgt = herme.hermeval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermefit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herme.hermefit, [1], [1], -1) + assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) + assert_raises(TypeError, herme.hermefit, [], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) + assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) + assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herme.hermefit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herme.hermefit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + # + coef4 = herme.hermefit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # + coef2d = herme.hermefit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herme.hermefit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) + assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herme.hermefit(x, y, 4) + assert_almost_equal(herme.hermeval(x, coef1), y) + coef2 = herme.hermefit(x, y, [0, 2, 4]) + assert_almost_equal(herme.hermeval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herme.hermecompanion, []) + assert_raises(ValueError, herme.hermecompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herme.hermecompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = herme.hermegauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herme.hermevander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(2*np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermefromroots(self): + res = herme.hermefromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herme.hermefromroots(roots) + res = herme.hermeval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herme.herme2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermeroots(self): + assert_almost_equal(herme.hermeroots([1]), []) + assert_almost_equal(herme.hermeroots([1, 1]), [-1]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herme.hermeroots(herme.hermefromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermetrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herme.hermetrim, coef, -1) + + # Test results + assert_equal(herme.hermetrim(coef), coef[:-1]) + assert_equal(herme.hermetrim(coef, 1), coef[:-3]) + assert_equal(herme.hermetrim(coef, 2), [0]) + + def test_hermeline(self): + assert_equal(herme.hermeline(3, 4), [3, 4]) + + def test_herme2poly(self): + for i in range(10): + assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) + + def test_poly2herme(self): + for i in range(10): + assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-.5*x**2) + res = herme.hermeweight(x) + assert_almost_equal(res, tgt) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/test_laguerre.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_laguerre.py new file mode 100644 index 0000000000000000000000000000000000000000..b889f49a53b6888189bb3460294a7237b398d651 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_laguerre.py @@ -0,0 +1,537 @@ +"""Tests for laguerre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.laguerre as lag +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +L0 = np.array([1])/1 +L1 = np.array([1, -1])/1 +L2 = np.array([2, -4, 1])/2 +L3 = np.array([6, -18, 9, -1])/6 +L4 = np.array([24, -96, 72, -16, 1])/24 +L5 = np.array([120, -600, 600, -200, 25, -1])/120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 + +Llist = [L0, L1, L2, L3, L4, L5, L6] + + +def trim(x): + return lag.lagtrim(x, tol=1e-6) + + +class TestConstants: + + def test_lagdomain(self): + assert_equal(lag.lagdomain, [0, 1]) + + def test_lagzero(self): + assert_equal(lag.lagzero, [0]) + + def test_lagone(self): + assert_equal(lag.lagone, [1]) + + def test_lagx(self): + assert_equal(lag.lagx, [1, -1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_lagadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = lag.lagadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = lag.lagsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagmulx(self): + assert_equal(lag.lagmulx([0]), [0]) + assert_equal(lag.lagmulx([1]), [1, -1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] + assert_almost_equal(lag.lagmulx(ser), tgt) + + def test_lagmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = lag.lagval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = lag.lagval(self.x, pol2) + pol3 = lag.lagmul(pol1, pol2) + val3 = lag.lagval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_lagdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = lag.lagadd(ci, cj) + quo, rem = lag.lagdiv(tgt, ci) + res = lag.lagadd(lag.lagmul(quo, ci), rem) + assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(lag.lagmul, [c]*j, np.array([1])) + res = lag.lagpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([9., -14., 6.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_lagval(self): + #check empty input + assert_equal(lag.lagval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(7): + msg = f"At i={i}" + tgt = y[i] + res = lag.lagval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(lag.lagval(x, [1]).shape, dims) + assert_equal(lag.lagval(x, [1, 0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) + + def test_lagval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = lag.lagval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_lagval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = lag.lagval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_laggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = lag.laggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_laggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = lag.laggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_lagint(self): + # check exceptions + assert_raises(TypeError, lag.lagint, [0], .5) + assert_raises(ValueError, lag.lagint, [0], -1) + assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) + assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) + assert_raises(ValueError, lag.lagint, [0], scl=[0]) + assert_raises(TypeError, lag.lagint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = lag.lagint([0], m=i, k=k) + assert_almost_equal(res, [1, -1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i]) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(lag.lagval(-1, lagint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1) + res = lag.lagint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k]) + res = lag.lagint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) + res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], scl=2) + res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T + res = lag.lagint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c) for c in c2d]) + res = lag.lagint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) + res = lag.lagint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_lagder(self): + # check exceptions + assert_raises(TypeError, lag.lagder, [0], .5) + assert_raises(ValueError, lag.lagder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = lag.lagder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T + res = lag.lagder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagder(c) for c in c2d]) + res = lag.lagder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_lagvander(self): + # check for 1d x + x = np.arange(3) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + def test_lagvander2d(self): + # also tests lagval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = lag.lagvander2d(x1, x2, [1, 2]) + tgt = lag.lagval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_lagvander3d(self): + # also tests lagval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) + tgt = lag.lagval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_lagfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, lag.lagfit, [1], [1], -1) + assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) + assert_raises(TypeError, lag.lagfit, [], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) + assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) + assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, lag.lagfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = lag.lagfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + # + coef4 = lag.lagfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + # + coef2d = lag.lagfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = lag.lagfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) + assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, lag.lagcompanion, []) + assert_raises(ValueError, lag.lagcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(lag.lagcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) + + +class TestGauss: + + def test_100(self): + x, w = lag.laggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = lag.lagvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 1.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_lagfromroots(self): + res = lag.lagfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = lag.lagfromroots(roots) + res = lag.lagval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(lag.lag2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_lagroots(self): + assert_almost_equal(lag.lagroots([1]), []) + assert_almost_equal(lag.lagroots([0, 1]), [1]) + for i in range(2, 5): + tgt = np.linspace(0, 3, i) + res = lag.lagroots(lag.lagfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, lag.lagtrim, coef, -1) + + # Test results + assert_equal(lag.lagtrim(coef), coef[:-1]) + assert_equal(lag.lagtrim(coef, 1), coef[:-3]) + assert_equal(lag.lagtrim(coef, 2), [0]) + + def test_lagline(self): + assert_equal(lag.lagline(3, 4), [7, -4]) + + def test_lag2poly(self): + for i in range(7): + assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) + + def test_poly2lag(self): + for i in range(7): + assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(0, 10, 11) + tgt = np.exp(-x) + res = lag.lagweight(x) + assert_almost_equal(res, tgt) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/test_legendre.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_legendre.py new file mode 100644 index 0000000000000000000000000000000000000000..93de5bb4cf86488b15983889058750f7b8fa1d33 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_legendre.py @@ -0,0 +1,568 @@ +"""Tests for legendre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.legendre as leg +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +L0 = np.array([1]) +L1 = np.array([0, 1]) +L2 = np.array([-1, 0, 3])/2 +L3 = np.array([0, -3, 0, 5])/2 +L4 = np.array([3, 0, -30, 0, 35])/8 +L5 = np.array([0, 15, 0, -70, 0, 63])/8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 + +Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] + + +def trim(x): + return leg.legtrim(x, tol=1e-6) + + +class TestConstants: + + def test_legdomain(self): + assert_equal(leg.legdomain, [-1, 1]) + + def test_legzero(self): + assert_equal(leg.legzero, [0]) + + def test_legone(self): + assert_equal(leg.legone, [1]) + + def test_legx(self): + assert_equal(leg.legx, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-1, 1, 100) + + def test_legadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = leg.legadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = leg.legsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legmulx(self): + assert_equal(leg.legmulx([0]), [0]) + assert_equal(leg.legmulx([1]), [0, 1]) + for i in range(1, 5): + tmp = 2*i + 1 + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] + assert_equal(leg.legmulx(ser), tgt) + + def test_legmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = leg.legval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = leg.legval(self.x, pol2) + pol3 = leg.legmul(pol1, pol2) + val3 = leg.legval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_legdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = leg.legadd(ci, cj) + quo, rem = leg.legdiv(tgt, ci) + res = leg.legadd(leg.legmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(leg.legmul, [c]*j, np.array([1])) + res = leg.legpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2., 2., 2.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_legval(self): + #check empty input + assert_equal(leg.legval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = leg.legval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(leg.legval(x, [1]).shape, dims) + assert_equal(leg.legval(x, [1, 0]).shape, dims) + assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) + + def test_legval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = leg.legval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_legval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = leg.legval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_leggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = leg.leggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_leggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = leg.leggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_legint(self): + # check exceptions + assert_raises(TypeError, leg.legint, [0], .5) + assert_raises(ValueError, leg.legint, [0], -1) + assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) + assert_raises(ValueError, leg.legint, [0], lbnd=[0]) + assert_raises(ValueError, leg.legint, [0], scl=[0]) + assert_raises(TypeError, leg.legint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = leg.legint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i]) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(leg.legval(-1, legint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], scl=2) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1) + res = leg.legint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k]) + res = leg.legint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) + res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], scl=2) + res = leg.legint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legint(c) for c in c2d.T]).T + res = leg.legint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c) for c in c2d]) + res = leg.legint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) + res = leg.legint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + def test_legint_zerointord(self): + assert_equal(leg.legint((1, 2, 3), 0), (1, 2, 3)) + + +class TestDerivative: + + def test_legder(self): + # check exceptions + assert_raises(TypeError, leg.legder, [0], .5) + assert_raises(ValueError, leg.legder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = leg.legder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legder(c) for c in c2d.T]).T + res = leg.legder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legder(c) for c in c2d]) + res = leg.legder(c2d, axis=1) + assert_almost_equal(res, tgt) + + def test_legder_orderhigherthancoeff(self): + c = (1, 2, 3, 4) + assert_equal(leg.legder(c, 4), [0]) + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_legvander(self): + # check for 1d x + x = np.arange(3) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + def test_legvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = leg.legvander2d(x1, x2, [1, 2]) + tgt = leg.legval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_legvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) + tgt = leg.legval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_legvander_negdeg(self): + assert_raises(ValueError, leg.legvander, (1, 2, 3), -1) + + +class TestFitting: + + def test_legfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, leg.legfit, [1], [1], -1) + assert_raises(TypeError, leg.legfit, [[1]], [1], 0) + assert_raises(TypeError, leg.legfit, [], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) + assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) + assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, leg.legfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = leg.legfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + coef3 = leg.legfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + # + coef4 = leg.legfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # + coef2d = leg.legfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = leg.legfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) + assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = leg.legfit(x, y, 4) + assert_almost_equal(leg.legval(x, coef1), y) + coef2 = leg.legfit(x, y, [0, 2, 4]) + assert_almost_equal(leg.legval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, leg.legcompanion, []) + assert_raises(ValueError, leg.legcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(leg.legcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(leg.legcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = leg.leggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = leg.legvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 2.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_legfromroots(self): + res = leg.legfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = leg.legfromroots(roots) + res = leg.legval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(leg.leg2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_legroots(self): + assert_almost_equal(leg.legroots([1]), []) + assert_almost_equal(leg.legroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = leg.legroots(leg.legfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, leg.legtrim, coef, -1) + + # Test results + assert_equal(leg.legtrim(coef), coef[:-1]) + assert_equal(leg.legtrim(coef, 1), coef[:-3]) + assert_equal(leg.legtrim(coef, 2), [0]) + + def test_legline(self): + assert_equal(leg.legline(3, 4), [3, 4]) + + def test_legline_zeroscl(self): + assert_equal(leg.legline(3, 0), [3]) + + def test_leg2poly(self): + for i in range(10): + assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) + + def test_poly2leg(self): + for i in range(10): + assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11) + tgt = 1. + res = leg.legweight(x) + assert_almost_equal(res, tgt) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/test_polynomial.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..c2a47269f1f1b36980299f0864905b31902b664d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_polynomial.py @@ -0,0 +1,600 @@ +"""Tests for polynomial module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.polynomial as poly +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + assert_warns, assert_array_equal, assert_raises_regex) + + +def trim(x): + return poly.polytrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestConstants: + + def test_polydomain(self): + assert_equal(poly.polydomain, [-1, 1]) + + def test_polyzero(self): + assert_equal(poly.polyzero, [0]) + + def test_polyone(self): + assert_equal(poly.polyone, [1]) + + def test_polyx(self): + assert_equal(poly.polyx, [0, 1]) + + +class TestArithmetic: + + def test_polyadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = poly.polyadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polysub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = poly.polysub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polymulx(self): + assert_equal(poly.polymulx([0]), [0]) + assert_equal(poly.polymulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i + 1) + [1] + assert_equal(poly.polymulx(ser), tgt) + + def test_polymul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += 1 + res = poly.polymul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polydiv(self): + # check zero division + assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) + + # check scalar division + quo, rem = poly.polydiv([2], [2]) + assert_equal((quo, rem), (1, 0)) + quo, rem = poly.polydiv([2, 2], [2]) + assert_equal((quo, rem), ((1, 1), 0)) + + # check rest. + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1, 2] + cj = [0]*j + [1, 2] + tgt = poly.polyadd(ci, cj) + quo, rem = poly.polydiv(tgt, ci) + res = poly.polyadd(poly.polymul(quo, ci), rem) + assert_equal(res, tgt, err_msg=msg) + + def test_polypow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(poly.polymul, [c]*j, np.array([1])) + res = poly.polypow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([1., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = poly.polyval(x, [1., 2., 3.]) + + def test_polyval(self): + #check empty input + assert_equal(poly.polyval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(5): + tgt = y[i] + res = poly.polyval(x, [0]*i + [1]) + assert_almost_equal(res, tgt) + tgt = x*(x**2 - 1) + res = poly.polyval(x, [0, -1, 0, 1]) + assert_almost_equal(res, tgt) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(poly.polyval(x, [1]).shape, dims) + assert_equal(poly.polyval(x, [1, 0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) + + #check masked arrays are processed correctly + mask = [False, True, False] + mx = np.ma.array([1, 2, 3], mask=mask) + res = np.polyval([7, 5, 3], mx) + assert_array_equal(res.mask, mask) + + #check subtypes of ndarray are preserved + class C(np.ndarray): + pass + + cx = np.array([1, 2, 3]).view(C) + assert_equal(type(np.polyval([2, 3, 4], cx)), C) + + def test_polyvalfromroots(self): + # check exception for broadcasting x values over root array with + # too few dimensions + assert_raises(ValueError, poly.polyvalfromroots, + [1], [1], tensor=False) + + # check empty input + assert_equal(poly.polyvalfromroots([], [1]).size, 0) + assert_(poly.polyvalfromroots([], [1]).shape == (0,)) + + # check empty input + multidimensional roots + assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) + assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) + + # check scalar input + assert_equal(poly.polyvalfromroots(1, 1), 0) + assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(1, 5): + tgt = y[i] + res = poly.polyvalfromroots(x, [0]*i) + assert_almost_equal(res, tgt) + tgt = x*(x - 1)*(x + 1) + res = poly.polyvalfromroots(x, [-1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) + + # check compatibility with factorization + ptest = [15, 2, -16, -2, 1] + r = poly.polyroots(ptest) + x = np.linspace(-1, 1) + assert_almost_equal(poly.polyval(x, ptest), + poly.polyvalfromroots(x, r)) + + # check multidimensional arrays of roots and values + # check tensor=False + rshape = (3, 5) + x = np.arange(-3, 2) + r = np.random.randint(-5, 5, size=rshape) + res = poly.polyvalfromroots(x, r, tensor=False) + tgt = np.empty(r.shape[1:]) + for ii in range(tgt.size): + tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) + assert_equal(res, tgt) + + # check tensor=True + x = np.vstack([x, 2*x]) + res = poly.polyvalfromroots(x, r, tensor=True) + tgt = np.empty(r.shape[1:] + x.shape) + for ii in range(r.shape[1]): + for jj in range(x.shape[0]): + tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) + assert_equal(res, tgt) + + def test_polyval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = poly.polyval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_polyval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = poly.polyval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_polygrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = poly.polygrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_polygrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = poly.polygrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_polyint(self): + # check exceptions + assert_raises(TypeError, poly.polyint, [0], .5) + assert_raises(ValueError, poly.polyint, [0], -1) + assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) + assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) + assert_raises(ValueError, poly.polyint, [0], scl=[0]) + assert_raises(TypeError, poly.polyint, [0], axis=.5) + with assert_warns(DeprecationWarning): + poly.polyint([1, 1], 1.) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = poly.polyint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + res = poly.polyint(pol, m=1, k=[i]) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + res = poly.polyint(pol, m=1, k=[i], lbnd=-1) + assert_almost_equal(poly.polyval(-1, res), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + res = poly.polyint(pol, m=1, k=[i], scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1) + res = poly.polyint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k]) + res = poly.polyint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) + res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], scl=2) + res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T + res = poly.polyint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c) for c in c2d]) + res = poly.polyint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) + res = poly.polyint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_polyder(self): + # check exceptions + assert_raises(TypeError, poly.polyder, [0], .5) + assert_raises(ValueError, poly.polyder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = poly.polyder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T + res = poly.polyder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyder(c) for c in c2d]) + res = poly.polyder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_polyvander(self): + # check for 1d x + x = np.arange(3) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + def test_polyvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = poly.polyvander2d(x1, x2, [1, 2]) + tgt = poly.polyval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_polyvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) + tgt = poly.polyval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_polyvandernegdeg(self): + x = np.arange(3) + assert_raises(ValueError, poly.polyvander, x, -1) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, poly.polycompanion, []) + assert_raises(ValueError, poly.polycompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(poly.polycompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(poly.polycompanion([1, 2])[0, 0] == -.5) + + +class TestMisc: + + def test_polyfromroots(self): + res = poly.polyfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = Tlist[i] + res = poly.polyfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyroots(self): + assert_almost_equal(poly.polyroots([1]), []) + assert_almost_equal(poly.polyroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, poly.polyfit, [1], [1], -1) + assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) + assert_raises(TypeError, poly.polyfit, [], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) + assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) + assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, poly.polyfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = poly.polyfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + # + coef4 = poly.polyfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + # + coef2d = poly.polyfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + yw[0::2] = 0 + wcoef3 = poly.polyfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) + assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Polyendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = poly.polyfit(x, y, 4) + assert_almost_equal(poly.polyval(x, coef1), y) + coef2 = poly.polyfit(x, y, [0, 2, 4]) + assert_almost_equal(poly.polyval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + def test_polytrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, poly.polytrim, coef, -1) + + # Test results + assert_equal(poly.polytrim(coef), coef[:-1]) + assert_equal(poly.polytrim(coef, 1), coef[:-3]) + assert_equal(poly.polytrim(coef, 2), [0]) + + def test_polyline(self): + assert_equal(poly.polyline(3, 4), [3, 4]) + + def test_polyline_zero(self): + assert_equal(poly.polyline(3, 0), [3]) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/test_polyutils.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_polyutils.py new file mode 100644 index 0000000000000000000000000000000000000000..183769c5eac96899b5d9a73f40ed22b38b51ef8d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_polyutils.py @@ -0,0 +1,121 @@ +"""Tests for polyutils module. + +""" +import numpy as np +import numpy.polynomial.polyutils as pu +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + + +class TestMisc: + + def test_trimseq(self): + for i in range(5): + tgt = [1] + res = pu.trimseq([1] + [0]*5) + assert_equal(res, tgt) + + def test_as_series(self): + # check exceptions + assert_raises(ValueError, pu.as_series, [[]]) + assert_raises(ValueError, pu.as_series, [[[1, 2]]]) + assert_raises(ValueError, pu.as_series, [[1], ['a']]) + # check common types + types = ['i', 'd', 'O'] + for i in range(len(types)): + for j in range(i): + ci = np.ones(1, types[i]) + cj = np.ones(1, types[j]) + [resi, resj] = pu.as_series([ci, cj]) + assert_(resi.dtype.char == resj.dtype.char) + assert_(resj.dtype.char == types[i]) + + def test_trimcoef(self): + coef = [2, -1, 1, 0] + # Test exceptions + assert_raises(ValueError, pu.trimcoef, coef, -1) + # Test results + assert_equal(pu.trimcoef(coef), coef[:-1]) + assert_equal(pu.trimcoef(coef, 1), coef[:-3]) + assert_equal(pu.trimcoef(coef, 2), [0]) + + def test_vander_nd_exception(self): + # n_dims != len(points) + assert_raises(ValueError, pu._vander_nd, (), (1, 2, 3), [90]) + # n_dims != len(degrees) + assert_raises(ValueError, pu._vander_nd, (), (), [90.65]) + # n_dims == 0 + assert_raises(ValueError, pu._vander_nd, (), (), []) + + def test_div_zerodiv(self): + # c2[-1] == 0 + assert_raises(ZeroDivisionError, pu._div, pu._div, (1, 2, 3), [0]) + + def test_pow_too_large(self): + # power > maxpower + assert_raises(ValueError, pu._pow, (), [1, 2, 3], 5, 4) + +class TestDomain: + + def test_getdomain(self): + # test for real values + x = [1, 10, 3, -1] + tgt = [-1, 10] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + # test for complex values + x = [1 + 1j, 1 - 1j, 0, 2] + tgt = [-1j, 2 + 1j] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + def test_mapdomain(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = dom2 + res = pu.mapdomain(dom1, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = dom2 + x = dom1 + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for multidimensional arrays + dom1 = [0, 4] + dom2 = [1, 3] + tgt = np.array([dom2, dom2]) + x = np.array([dom1, dom1]) + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test that subtypes are preserved. + class MyNDArray(np.ndarray): + pass + + dom1 = [0, 4] + dom2 = [1, 3] + x = np.array([dom1, dom1]).view(MyNDArray) + res = pu.mapdomain(x, dom1, dom2) + assert_(isinstance(res, MyNDArray)) + + def test_mapparms(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = [1, .5] + res = pu. mapparms(dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = [-1 + 1j, 1 - 1j] + res = pu.mapparms(dom1, dom2) + assert_almost_equal(res, tgt) diff --git a/MLPY/Lib/site-packages/numpy/polynomial/tests/test_printing.py b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_printing.py new file mode 100644 index 0000000000000000000000000000000000000000..640e79fcb744dd18903577a7edb484233c74a02c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/polynomial/tests/test_printing.py @@ -0,0 +1,390 @@ +import pytest +from numpy.core import array, arange, printoptions +import numpy.polynomial as poly +from numpy.testing import assert_equal, assert_ + +# For testing polynomial printing with object arrays +from fractions import Fraction +from decimal import Decimal + + +class TestStrUnicodeSuperSubscripts: + + @pytest.fixture(scope='class', autouse=True) + def use_unicode(self): + poly.set_default_printstyle('unicode') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·x¹ + 3.0·x²"), + ([-1, 0, 3, -1], "-1.0 + 0.0·x¹ + 3.0·x² - 1.0·x³"), + (arange(12), ("0.0 + 1.0·x¹ + 2.0·x² + 3.0·x³ + 4.0·x⁴ + 5.0·x⁵ + " + "6.0·x⁶ + 7.0·x⁷ +\n8.0·x⁸ + 9.0·x⁹ + 10.0·x¹⁰ + " + "11.0·x¹¹")), + )) + def test_polynomial_str(self, inp, tgt): + res = str(poly.Polynomial(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·T₁(x) + 3.0·T₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·T₁(x) + 3.0·T₂(x) - 1.0·T₃(x)"), + (arange(12), ("0.0 + 1.0·T₁(x) + 2.0·T₂(x) + 3.0·T₃(x) + 4.0·T₄(x) + " + "5.0·T₅(x) +\n6.0·T₆(x) + 7.0·T₇(x) + 8.0·T₈(x) + " + "9.0·T₉(x) + 10.0·T₁₀(x) + 11.0·T₁₁(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·P₁(x) + 3.0·P₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·P₁(x) + 3.0·P₂(x) - 1.0·P₃(x)"), + (arange(12), ("0.0 + 1.0·P₁(x) + 2.0·P₂(x) + 3.0·P₃(x) + 4.0·P₄(x) + " + "5.0·P₅(x) +\n6.0·P₆(x) + 7.0·P₇(x) + 8.0·P₈(x) + " + "9.0·P₉(x) + 10.0·P₁₀(x) + 11.0·P₁₁(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·H₁(x) + 3.0·H₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·H₁(x) + 3.0·H₂(x) - 1.0·H₃(x)"), + (arange(12), ("0.0 + 1.0·H₁(x) + 2.0·H₂(x) + 3.0·H₃(x) + 4.0·H₄(x) + " + "5.0·H₅(x) +\n6.0·H₆(x) + 7.0·H₇(x) + 8.0·H₈(x) + " + "9.0·H₉(x) + 10.0·H₁₀(x) + 11.0·H₁₁(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·He₁(x) + 3.0·He₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·He₁(x) + 3.0·He₂(x) - 1.0·He₃(x)"), + (arange(12), ("0.0 + 1.0·He₁(x) + 2.0·He₂(x) + 3.0·He₃(x) + " + "4.0·He₄(x) + 5.0·He₅(x) +\n6.0·He₆(x) + 7.0·He₇(x) + " + "8.0·He₈(x) + 9.0·He₉(x) + 10.0·He₁₀(x) +\n" + "11.0·He₁₁(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·L₁(x) + 3.0·L₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·L₁(x) + 3.0·L₂(x) - 1.0·L₃(x)"), + (arange(12), ("0.0 + 1.0·L₁(x) + 2.0·L₂(x) + 3.0·L₃(x) + 4.0·L₄(x) + " + "5.0·L₅(x) +\n6.0·L₆(x) + 7.0·L₇(x) + 8.0·L₈(x) + " + "9.0·L₉(x) + 10.0·L₁₀(x) + 11.0·L₁₁(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + +class TestStrAscii: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 x**1 + 3.0 x**2"), + ([-1, 0, 3, -1], "-1.0 + 0.0 x**1 + 3.0 x**2 - 1.0 x**3"), + (arange(12), ("0.0 + 1.0 x**1 + 2.0 x**2 + 3.0 x**3 + 4.0 x**4 + " + "5.0 x**5 + 6.0 x**6 +\n7.0 x**7 + 8.0 x**8 + " + "9.0 x**9 + 10.0 x**10 + 11.0 x**11")), + )) + def test_polynomial_str(self, inp, tgt): + res = str(poly.Polynomial(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 T_1(x) + 3.0 T_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 T_1(x) + 3.0 T_2(x) - 1.0 T_3(x)"), + (arange(12), ("0.0 + 1.0 T_1(x) + 2.0 T_2(x) + 3.0 T_3(x) + " + "4.0 T_4(x) + 5.0 T_5(x) +\n6.0 T_6(x) + 7.0 T_7(x) + " + "8.0 T_8(x) + 9.0 T_9(x) + 10.0 T_10(x) +\n" + "11.0 T_11(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 P_1(x) + 3.0 P_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 P_1(x) + 3.0 P_2(x) - 1.0 P_3(x)"), + (arange(12), ("0.0 + 1.0 P_1(x) + 2.0 P_2(x) + 3.0 P_3(x) + " + "4.0 P_4(x) + 5.0 P_5(x) +\n6.0 P_6(x) + 7.0 P_7(x) + " + "8.0 P_8(x) + 9.0 P_9(x) + 10.0 P_10(x) +\n" + "11.0 P_11(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 H_1(x) + 3.0 H_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 H_1(x) + 3.0 H_2(x) - 1.0 H_3(x)"), + (arange(12), ("0.0 + 1.0 H_1(x) + 2.0 H_2(x) + 3.0 H_3(x) + " + "4.0 H_4(x) + 5.0 H_5(x) +\n6.0 H_6(x) + 7.0 H_7(x) + " + "8.0 H_8(x) + 9.0 H_9(x) + 10.0 H_10(x) +\n" + "11.0 H_11(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 He_1(x) + 3.0 He_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 He_1(x) + 3.0 He_2(x) - 1.0 He_3(x)"), + (arange(12), ("0.0 + 1.0 He_1(x) + 2.0 He_2(x) + 3.0 He_3(x) + " + "4.0 He_4(x) +\n5.0 He_5(x) + 6.0 He_6(x) + " + "7.0 He_7(x) + 8.0 He_8(x) + 9.0 He_9(x) +\n" + "10.0 He_10(x) + 11.0 He_11(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 L_1(x) + 3.0 L_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 L_1(x) + 3.0 L_2(x) - 1.0 L_3(x)"), + (arange(12), ("0.0 + 1.0 L_1(x) + 2.0 L_2(x) + 3.0 L_3(x) + " + "4.0 L_4(x) + 5.0 L_5(x) +\n6.0 L_6(x) + 7.0 L_7(x) + " + "8.0 L_8(x) + 9.0 L_9(x) + 10.0 L_10(x) +\n" + "11.0 L_11(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + +class TestLinebreaking: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_single_line_one_less(self): + # With 'ascii' style, len(str(p)) is default linewidth - 1 (i.e. 74) + p = poly.Polynomial([123456789, 123456789, 123456789, 1234, 1]) + assert_equal(len(str(p)), 74) + assert_equal(str(p), ( + '123456789.0 + 123456789.0 x**1 + 123456789.0 x**2 + ' + '1234.0 x**3 + 1.0 x**4' + )) + + def test_num_chars_is_linewidth(self): + # len(str(p)) == default linewidth == 75 + p = poly.Polynomial([123456789, 123456789, 123456789, 1234, 10]) + assert_equal(len(str(p)), 75) + assert_equal(str(p), ( + '123456789.0 + 123456789.0 x**1 + 123456789.0 x**2 + ' + '1234.0 x**3 +\n10.0 x**4' + )) + + def test_first_linebreak_multiline_one_less_than_linewidth(self): + # Multiline str where len(first_line) + len(next_term) == lw - 1 == 74 + p = poly.Polynomial( + [123456789, 123456789, 123456789, 12, 1, 123456789] + ) + assert_equal(len(str(p).split('\n')[0]), 74) + assert_equal(str(p), ( + '123456789.0 + 123456789.0 x**1 + 123456789.0 x**2 + ' + '12.0 x**3 + 1.0 x**4 +\n123456789.0 x**5' + )) + + def test_first_linebreak_multiline_on_linewidth(self): + # First line is one character longer than previous test + p = poly.Polynomial( + [123456789, 123456789, 123456789, 123, 1, 123456789] + ) + assert_equal(str(p), ( + '123456789.0 + 123456789.0 x**1 + 123456789.0 x**2 + ' + '123.0 x**3 +\n1.0 x**4 + 123456789.0 x**5' + )) + + @pytest.mark.parametrize(('lw', 'tgt'), ( + (75, ('0.0 + 10.0 x**1 + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 +\n' + '500000.0 x**5 + 600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + (45, ('0.0 + 10.0 x**1 + 200.0 x**2 + 3000.0 x**3 +\n40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 +\n' + '900.0 x**9')), + (132, ('0.0 + 10.0 x**1 + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 + 600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + )) + def test_linewidth_printoption(self, lw, tgt): + p = poly.Polynomial( + [0, 10, 200, 3000, 40000, 500000, 600000, 70000, 8000, 900] + ) + with printoptions(linewidth=lw): + assert_equal(str(p), tgt) + for line in str(p).split('\n'): + assert_(len(line) < lw) + + +def test_set_default_printoptions(): + p = poly.Polynomial([1, 2, 3]) + c = poly.Chebyshev([1, 2, 3]) + poly.set_default_printstyle('ascii') + assert_equal(str(p), "1.0 + 2.0 x**1 + 3.0 x**2") + assert_equal(str(c), "1.0 + 2.0 T_1(x) + 3.0 T_2(x)") + poly.set_default_printstyle('unicode') + assert_equal(str(p), "1.0 + 2.0·x¹ + 3.0·x²") + assert_equal(str(c), "1.0 + 2.0·T₁(x) + 3.0·T₂(x)") + with pytest.raises(ValueError): + poly.set_default_printstyle('invalid_input') + + +def test_complex_coefficients(): + """Test both numpy and built-in complex.""" + coefs = [0+1j, 1+1j, -2+2j, 3+0j] + # numpy complex + p1 = poly.Polynomial(coefs) + # Python complex + p2 = poly.Polynomial(array(coefs, dtype=object)) + poly.set_default_printstyle('unicode') + assert_equal(str(p1), "1j + (1+1j)·x¹ - (2-2j)·x² + (3+0j)·x³") + assert_equal(str(p2), "1j + (1+1j)·x¹ + (-2+2j)·x² + (3+0j)·x³") + poly.set_default_printstyle('ascii') + assert_equal(str(p1), "1j + (1+1j) x**1 - (2-2j) x**2 + (3+0j) x**3") + assert_equal(str(p2), "1j + (1+1j) x**1 + (-2+2j) x**2 + (3+0j) x**3") + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([Fraction(1, 2), Fraction(3, 4)], dtype=object), ( + "1/2 + 3/4·x¹" + )), + (array([1, 2, Fraction(5, 7)], dtype=object), ( + "1 + 2·x¹ + 5/7·x²" + )), + (array([Decimal('1.00'), Decimal('2.2'), 3], dtype=object), ( + "1.00 + 2.2·x¹ + 3·x²" + )), +)) +def test_numeric_object_coefficients(coefs, tgt): + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([1, 2, 'f'], dtype=object), '1 + 2·x¹ + f·x²'), + (array([1, 2, [3, 4]], dtype=object), '1 + 2·x¹ + [3, 4]·x²'), +)) +def test_nonnumeric_object_coefficients(coefs, tgt): + """ + Test coef fallback for object arrays of non-numeric coefficients. + """ + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +class TestFormat: + def test_format_unicode(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal(format(p, 'unicode'), "1.0 + 2.0·x¹ + 0.0·x² - 1.0·x³") + + def test_format_ascii(self): + poly.set_default_printstyle('unicode') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal( + format(p, 'ascii'), "1.0 + 2.0 x**1 + 0.0 x**2 - 1.0 x**3" + ) + + def test_empty_formatstr(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 3]) + assert_equal(format(p), "1.0 + 2.0 x**1 + 3.0 x**2") + assert_equal(f"{p}", "1.0 + 2.0 x**1 + 3.0 x**2") + + def test_bad_formatstr(self): + p = poly.Polynomial([1, 2, 0, -1]) + with pytest.raises(ValueError): + format(p, '.2f') + + +class TestRepr: + def test_polynomial_str(self): + res = repr(poly.Polynomial([0, 1])) + tgt = 'Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_chebyshev_str(self): + res = repr(poly.Chebyshev([0, 1])) + tgt = 'Chebyshev([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_legendre_repr(self): + res = repr(poly.Legendre([0, 1])) + tgt = 'Legendre([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_hermite_repr(self): + res = repr(poly.Hermite([0, 1])) + tgt = 'Hermite([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_hermiteE_repr(self): + res = repr(poly.HermiteE([0, 1])) + tgt = 'HermiteE([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) + + def test_laguerre_repr(self): + res = repr(poly.Laguerre([0, 1])) + tgt = 'Laguerre([0., 1.], domain=[0, 1], window=[0, 1])' + assert_equal(res, tgt) + + +class TestLatexRepr: + """Test the latex repr used by Jupyter""" + + def as_latex(self, obj): + # right now we ignore the formatting of scalars in our tests, since + # it makes them too verbose. Ideally, the formatting of scalars will + # be fixed such that tests below continue to pass + obj._repr_latex_scalar = lambda x: str(x) + try: + return obj._repr_latex_() + finally: + del obj._repr_latex_scalar + + def test_simple_polynomial(self): + # default input + p = poly.Polynomial([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') + + def test_basis_func(self): + p = poly.Chebyshev([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') + # affine input - check no surplus parens are added + p = poly.Chebyshev([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') + + def test_multichar_basis_func(self): + p = poly.HermiteE([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') diff --git a/MLPY/Lib/site-packages/numpy/py.typed b/MLPY/Lib/site-packages/numpy/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/random/__init__.pxd b/MLPY/Lib/site-packages/numpy/random/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e281fdbde3b5560fb2b0fa9dae85358e8c129d95 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/__init__.pxd @@ -0,0 +1,14 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +from numpy.random.bit_generator cimport BitGenerator, SeedSequence diff --git a/MLPY/Lib/site-packages/numpy/random/__init__.py b/MLPY/Lib/site-packages/numpy/random/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..45e1557632415d569b408f75357e6cec58cc11fa --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/__init__.py @@ -0,0 +1,215 @@ +""" +======================== +Random Number Generation +======================== + +Use ``default_rng()`` to create a `Generator` and call its methods. + +=============== ========================================================= +Generator +--------------- --------------------------------------------------------- +Generator Class implementing all of the random number distributions +default_rng Default constructor for ``Generator`` +=============== ========================================================= + +============================================= === +BitGenerator Streams that work with Generator +--------------------------------------------- --- +MT19937 +PCG64 +PCG64DXSM +Philox +SFC64 +============================================= === + +============================================= === +Getting entropy to initialize a BitGenerator +--------------------------------------------- --- +SeedSequence +============================================= === + + +Legacy +------ + +For backwards compatibility with previous versions of numpy before 1.17, the +various aliases to the global `RandomState` methods are left alone and do not +use the new `Generator` API. + +==================== ========================================================= +Utility functions +-------------------- --------------------------------------------------------- +random Uniformly distributed floats over ``[0, 1)`` +bytes Uniformly distributed random bytes. +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +choice Random sample from 1-D array. +==================== ========================================================= + +==================== ========================================================= +Compatibility +functions - removed +in the new API +-------------------- --------------------------------------------------------- +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +random_integers Uniformly distributed integers in a given range. + (deprecated, use ``integers(..., closed=True)`` instead) +random_sample Alias for `random_sample` +randint Uniformly distributed integers in a given range +seed Seed the legacy random number generator. +==================== ========================================================= + +==================== ========================================================= +Univariate +distributions +-------------------- --------------------------------------------------------- +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================== +Multivariate +distributions +-------------------- ---------------------------------------------------------- +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================== + +==================== ========================================================= +Standard +distributions +-------------------- --------------------------------------------------------- +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +-------------------- --------------------------------------------------------- +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + + +""" +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'choice', + 'dirichlet', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random', + 'random_integers', + 'random_sample', + 'ranf', + 'rayleigh', + 'sample', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf', +] + +# add these for module-freeze analysis (like PyInstaller) +from . import _pickle +from . import _common +from . import _bounded_integers + +from ._generator import Generator, default_rng +from .bit_generator import SeedSequence, BitGenerator +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .mtrand import * + +__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', + 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng', + 'BitGenerator'] + + +def __RandomState_ctor(): + """Return a RandomState instance. + + This function exists solely to assist (un)pickling. + + Note that the state of the RandomState returned here is irrelevant, as this + function's entire purpose is to return a newly allocated RandomState whose + state pickle can set. Consequently the RandomState returned by this function + is a freshly allocated copy with a seed=0. + + See https://github.com/numpy/numpy/issues/4763 for a detailed discussion + + """ + return RandomState(seed=0) + + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/random/__init__.pyi b/MLPY/Lib/site-packages/numpy/random/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e83bb5aa2dd0300fa90c68b8f68908cd3902cdba --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/__init__.pyi @@ -0,0 +1,68 @@ +from typing import List + +from numpy.random._generator import Generator as Generator +from numpy.random._generator import default_rng as default_rng +from numpy.random._mt19937 import MT19937 as MT19937 +from numpy.random._pcg64 import ( + PCG64 as PCG64, + PCG64DXSM as PCG64DXSM, +) +from numpy.random._philox import Philox as Philox +from numpy.random._sfc64 import SFC64 as SFC64 +from numpy.random.bit_generator import BitGenerator as BitGenerator +from numpy.random.bit_generator import SeedSequence as SeedSequence +from numpy.random.mtrand import ( + RandomState as RandomState, + beta as beta, + binomial as binomial, + bytes as bytes, + chisquare as chisquare, + choice as choice, + dirichlet as dirichlet, + exponential as exponential, + f as f, + gamma as gamma, + geometric as geometric, + get_state as get_state, + gumbel as gumbel, + hypergeometric as hypergeometric, + laplace as laplace, + logistic as logistic, + lognormal as lognormal, + logseries as logseries, + multinomial as multinomial, + multivariate_normal as multivariate_normal, + negative_binomial as negative_binomial, + noncentral_chisquare as noncentral_chisquare, + noncentral_f as noncentral_f, + normal as normal, + pareto as pareto, + permutation as permutation, + poisson as poisson, + power as power, + rand as rand, + randint as randint, + randn as randn, + random as random, + random_integers as random_integers, + random_sample as random_sample, + ranf as ranf, + rayleigh as rayleigh, + sample as sample, + seed as seed, + set_state as set_state, + shuffle as shuffle, + standard_cauchy as standard_cauchy, + standard_exponential as standard_exponential, + standard_gamma as standard_gamma, + standard_normal as standard_normal, + standard_t as standard_t, + triangular as triangular, + uniform as uniform, + vonmises as vonmises, + wald as wald, + weibull as weibull, + zipf as zipf, +) + +__all__: List[str] diff --git a/MLPY/Lib/site-packages/numpy/random/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..944307582b2f9bcefddeba1c19e9ea530d8756ad Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/__pycache__/_pickle.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/__pycache__/_pickle.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b62dcbf95103476f14e5a6517f1ead7d015f979 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/__pycache__/_pickle.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ddfaa26baa01c85a5ed5dcf900bd4235da3bdaa Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/_bounded_integers.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/random/_bounded_integers.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..b5029d68ae53bcc8ff1cc35c43d07098cae5b39c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_bounded_integers.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/random/_bounded_integers.pxd b/MLPY/Lib/site-packages/numpy/random/_bounded_integers.pxd new file mode 100644 index 0000000000000000000000000000000000000000..da6b4f4811a4d04896295558ef2cf86f740dbe10 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_bounded_integers.pxd @@ -0,0 +1,29 @@ +from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, + int8_t, int16_t, int32_t, int64_t, intptr_t) +import numpy as np +cimport numpy as np +ctypedef np.npy_bool bool_t + +from numpy.random cimport bitgen_t + +cdef inline uint64_t _gen_mask(uint64_t max_val) nogil: + """Mask generator for use in bounded random numbers""" + # Smallest bit mask >= max + cdef uint64_t mask = max_val + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + mask |= mask >> 32 + return mask + +cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) diff --git a/MLPY/Lib/site-packages/numpy/random/_common.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/random/_common.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..2cd0ff3568c3b8d08c02797ce2c86762b4931a34 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_common.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/random/_common.pxd b/MLPY/Lib/site-packages/numpy/random/_common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1264cd29872a585b9cc70ef486e24bac66d8c220 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_common.pxd @@ -0,0 +1,106 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t + +import numpy as np +cimport numpy as np + +from numpy.random cimport bitgen_t + +cdef double POISSON_LAM_MAX +cdef double LEGACY_POISSON_LAM_MAX +cdef uint64_t MAXSIZE + +cdef enum ConstraintType: + CONS_NONE + CONS_NON_NEGATIVE + CONS_POSITIVE + CONS_POSITIVE_NOT_NAN + CONS_BOUNDED_0_1 + CONS_BOUNDED_0_1_NOTNAN + CONS_BOUNDED_GT_0_1 + CONS_GT_1 + CONS_GTE_1 + CONS_POISSON + LEGACY_CONS_POISSON + +ctypedef ConstraintType constraint_type + +cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) +cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) +cdef object prepare_cffi(bitgen_t *bitgen) +cdef object prepare_ctypes(bitgen_t *bitgen) +cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 + +cdef extern from "include/aligned_malloc.h": + cdef void *PyArray_realloc_aligned(void *p, size_t n) + cdef void *PyArray_malloc_aligned(size_t n) + cdef void *PyArray_calloc_aligned(size_t n, size_t s) + cdef void PyArray_free_aligned(void *p) + +ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil +ctypedef double (*random_double_0)(void *state) nogil +ctypedef double (*random_double_1)(void *state, double a) nogil +ctypedef double (*random_double_2)(void *state, double a, double b) nogil +ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil + +ctypedef double (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) nogil +ctypedef float (*random_float_0)(bitgen_t *state) nogil +ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil + +ctypedef int64_t (*random_uint_0)(void *state) nogil +ctypedef int64_t (*random_uint_d)(void *state, double a) nogil +ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) nogil +ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) nogil +ctypedef int64_t (*random_uint_i)(void *state, int64_t a) nogil +ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) nogil + +ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) nogil +ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) nogil + +ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) nogil +ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) nogil + +cdef double kahan_sum(double *darr, np.npy_intp n) + +cdef inline double uint64_to_double(uint64_t rnd) nogil: + return (rnd >> 11) * (1.0 / 9007199254740992.0) + +cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object wrap_int(object val, object bits) + +cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size) + +cdef validate_output_shape(iter_shape, np.ndarray output) + +cdef object cont(void *func, void *state, object size, object lock, int narg, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint, + object out) + +cdef object disc(void *func, void *state, object size, object lock, + int narg_double, int narg_int64, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint) + +cdef object cont_f(void *func, bitgen_t *state, object size, object lock, + object a, object a_name, constraint_type a_constraint, + object out) + +cdef object cont_broadcast_3(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) + +cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c2969223bbf66984dfeb990e1d148f698fdcf18 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd53b890ebba33d9ce232327bc757b7ea2cb4b9b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/cffi/extending.py b/MLPY/Lib/site-packages/numpy/random/_examples/cffi/extending.py new file mode 100644 index 0000000000000000000000000000000000000000..b8aca712bbd43fae2801bcbbbbb15ede95c9a0f1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_examples/cffi/extending.py @@ -0,0 +1,40 @@ +""" +Use cffi to access any of the underlying C functions from distributions.h +""" +import os +import numpy as np +import cffi +from .parse import parse_distributions_h +ffi = cffi.FFI() + +inc_dir = os.path.join(np.get_include(), 'numpy') + +# Basic numpy types +ffi.cdef(''' + typedef intptr_t npy_intp; + typedef unsigned char npy_bool; + +''') + +parse_distributions_h(ffi, inc_dir) + +lib = ffi.dlopen(np.random._generator.__file__) + +# Compare the distributions.h random_standard_normal_fill to +# Generator.standard_random +bit_gen = np.random.PCG64() +rng = np.random.Generator(bit_gen) +state = bit_gen.state + +interface = rng.bit_generator.cffi +n = 100 +vals_cffi = ffi.new('double[%d]' % n) +lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi) + +# reset the state +bit_gen.state = state + +vals = rng.standard_normal(n) + +for i in range(n): + assert vals[i] == vals_cffi[i] diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/cffi/parse.py b/MLPY/Lib/site-packages/numpy/random/_examples/cffi/parse.py new file mode 100644 index 0000000000000000000000000000000000000000..aab92cf9ffc1a202f5f2dbd3c96e7394ce084fe7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_examples/cffi/parse.py @@ -0,0 +1,55 @@ +import os + + +def parse_distributions_h(ffi, inc_dir): + """ + Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef + + Read the function declarations without the "#define ..." macros that will + be filled in when loading the library. + """ + + with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid: + s = [] + for line in fid: + # massage the include file + if line.strip().startswith('#'): + continue + s.append(line) + ffi.cdef('\n'.join(s)) + + with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid: + s = [] + in_skip = 0 + ignoring = False + for line in fid: + # check for and remove extern "C" guards + if ignoring: + if line.strip().startswith('#endif'): + ignoring = False + continue + if line.strip().startswith('#ifdef __cplusplus'): + ignoring = True + + # massage the include file + if line.strip().startswith('#'): + continue + + # skip any inlined function definition + # which starts with 'static NPY_INLINE xxx(...) {' + # and ends with a closing '}' + if line.strip().startswith('static NPY_INLINE'): + in_skip += line.count('{') + continue + elif in_skip > 0: + in_skip += line.count('{') + in_skip -= line.count('}') + continue + + # replace defines with their value or remove them + line = line.replace('DECLDIR', '') + line = line.replace('NPY_INLINE', '') + line = line.replace('RAND_INT_TYPE', 'int64_t') + s.append(line) + ffi.cdef('\n'.join(s)) + diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/cython/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/_examples/cython/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d764a73c7c6c354ddd293b7a1665909dfe00c1c6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_examples/cython/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/cython/extending.pyx b/MLPY/Lib/site-packages/numpy/random/_examples/cython/extending.pyx new file mode 100644 index 0000000000000000000000000000000000000000..d8d2af41485c44beda760b1f59c136421d7ee0df --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_examples/cython/extending.pyx @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +#cython: language_level=3 + +from libc.stdint cimport uint32_t +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer + +import numpy as np +cimport numpy as np +cimport cython + +from numpy.random cimport bitgen_t +from numpy.random import PCG64 + +np.import_array() + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniform_mean(Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + cdef np.ndarray randoms + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n) + # Best practice is to acquire the lock whenever generating random values. + # This prevents other threads from modifying the state. Acquiring the lock + # is only necessary if if the GIL is also released, as in this example. + with x.lock, nogil: + for i in range(n): + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + return randoms.mean() + + +# This function is declared nogil so it can be used without the GIL below +cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil: + cdef uint32_t mask, delta, val + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = rng.next_uint32(rng.state) & mask + while val > delta: + val = rng.next_uint32(rng.state) & mask + + return lb + val + + +@cython.boundscheck(False) +@cython.wraparound(False) +def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef uint32_t[::1] out + cdef const char *capsule_name = "BitGenerator" + + x = PCG64() + out = np.empty(n, dtype=np.uint32) + capsule = x.capsule + + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + + with x.lock, nogil: + for i in range(n): + out[i] = bounded_uint(lb, ub, rng) + return np.asarray(out) diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/cython/extending_distributions.pyx b/MLPY/Lib/site-packages/numpy/random/_examples/cython/extending_distributions.pyx new file mode 100644 index 0000000000000000000000000000000000000000..dffbdb2b75c3686d65413e9feadba451a4289e3a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_examples/cython/extending_distributions.pyx @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +#cython: language_level=3 +""" +This file shows how the to use a BitGenerator to create a distribution. +""" +import numpy as np +cimport numpy as np +cimport cython +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer +from libc.stdint cimport uint16_t, uint64_t +from numpy.random cimport bitgen_t +from numpy.random import PCG64 +from numpy.random.c_distributions cimport ( + random_standard_uniform_fill, random_standard_uniform_fill_f) + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniforms(Py_ssize_t n): + """ + Create an array of `n` uniformly distributed doubles. + A 'real' distribution would want to process the values into + some non-uniform distribution + """ + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + + x = PCG64() + capsule = x.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='float64') + with x.lock, nogil: + for i in range(n): + # Call the function + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + + return randoms + +# cython example 2 +@cython.boundscheck(False) +@cython.wraparound(False) +def uint10_uniforms(Py_ssize_t n): + """Uniform 10 bit integers stored as 16-bit unsigned integers""" + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef uint16_t[::1] random_values + cdef int bits_remaining + cdef int width = 10 + cdef uint64_t buff, mask = 0x3FF + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='uint16') + # Best practice is to release GIL and acquire the lock + bits_remaining = 0 + with x.lock, nogil: + for i in range(n): + if bits_remaining < width: + buff = rng.next_uint64(rng.state) + random_values[i] = buff & mask + buff >>= width + + randoms = np.asarray(random_values) + return randoms + +# cython example 3 +def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): + """ + Create an array of `n` uniformly distributed doubles via a "fill" function. + + A 'real' distribution would want to process the values into + some non-uniform distribution + + Parameters + ---------- + bit_generator: BitGenerator instance + n: int + Output vector length + dtype: {str, dtype}, optional + Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The + default dtype value is 'd' + """ + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef np.ndarray randoms + + capsule = bit_generator.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + + _dtype = np.dtype(dtype) + randoms = np.empty(n, dtype=_dtype) + if _dtype == np.float32: + with bit_generator.lock: + random_standard_uniform_fill_f(rng, n, np.PyArray_DATA(randoms)) + elif _dtype == np.float64: + with bit_generator.lock: + random_standard_uniform_fill(rng, n, np.PyArray_DATA(randoms)) + else: + raise TypeError('Unsupported dtype %r for random' % _dtype) + return randoms + diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/cython/setup.py b/MLPY/Lib/site-packages/numpy/random/_examples/cython/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..a9848f7c0f5607f6b67866afea6c69c97c7e23e3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_examples/cython/setup.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +""" +Build the Cython demonstrations of low-level access to NumPy random + +Usage: python setup.py build_ext -i +""" +from distutils.core import setup +from os.path import dirname, join, abspath + +import numpy as np +from Cython.Build import cythonize +from numpy.distutils.misc_util import get_info +from setuptools.extension import Extension + +path = dirname(__file__) +src_dir = join(dirname(path), '..', 'src') +defs = [('NPY_NO_DEPRECATED_API', 0)] +inc_path = np.get_include() +lib_path = [abspath(join(np.get_include(), '..', '..', 'random', 'lib'))] +lib_path += get_info('npymath')['library_dirs'] + +extending = Extension("extending", + sources=[join('.', 'extending.pyx')], + include_dirs=[ + np.get_include(), + join(path, '..', '..') + ], + define_macros=defs, + ) +distributions = Extension("extending_distributions", + sources=[join('.', 'extending_distributions.pyx')], + include_dirs=[inc_path], + library_dirs=lib_path, + libraries=['npyrandom', 'npymath'], + define_macros=defs, + ) + +extensions = [extending, distributions] + +setup( + ext_modules=cythonize(extensions) +) diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e89cd35bc920a164b854643cbca04a6ec8a48ab Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b8d5766eedaef8be33afcf47b34d48d91628e07 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/numba/extending.py b/MLPY/Lib/site-packages/numpy/random/_examples/numba/extending.py new file mode 100644 index 0000000000000000000000000000000000000000..ef1a0024f6e7ac4c04078b58f393f12c476330e8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_examples/numba/extending.py @@ -0,0 +1,84 @@ +import numpy as np +import numba as nb + +from numpy.random import PCG64 +from timeit import timeit + +bit_gen = PCG64() +next_d = bit_gen.cffi.next_double +state_addr = bit_gen.cffi.state_address + +def normals(n, state): + out = np.empty(n) + for i in range((n + 1) // 2): + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + f = np.sqrt(-2.0 * np.log(r2) / r2) + out[2 * i] = f * x1 + if 2 * i + 1 < n: + out[2 * i + 1] = f * x2 + return out + +# Compile using Numba +normalsj = nb.jit(normals, nopython=True) +# Must use state address not state with numba +n = 10000 + +def numbacall(): + return normalsj(n, state_addr) + +rg = np.random.Generator(PCG64()) + +def numpycall(): + return rg.normal(size=n) + +# Check that the functions work +r1 = numbacall() +r2 = numpycall() +assert r1.shape == (n,) +assert r1.shape == r2.shape + +t1 = timeit(numbacall, number=1000) +print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms') +t2 = timeit(numpycall, number=1000) +print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms') + +# example 2 + +next_u32 = bit_gen.ctypes.next_uint32 +ctypes_state = bit_gen.ctypes.state + +@nb.jit(nopython=True) +def bounded_uint(lb, ub, state): + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = next_u32(state) & mask + while val > delta: + val = next_u32(state) & mask + + return lb + val + + +print(bounded_uint(323, 2394691, ctypes_state.value)) + + +@nb.jit(nopython=True) +def bounded_uints(lb, ub, n, state): + out = np.empty(n, dtype=np.uint32) + for i in range(n): + out[i] = bounded_uint(lb, ub, state) + + +bounded_uints(323, 2394691, 10000000, ctypes_state.value) + + diff --git a/MLPY/Lib/site-packages/numpy/random/_examples/numba/extending_distributions.py b/MLPY/Lib/site-packages/numpy/random/_examples/numba/extending_distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..f349d9bee772b86bdb614f626b295db19c7f490e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_examples/numba/extending_distributions.py @@ -0,0 +1,67 @@ +r""" +Building the required library in this example requires a source distribution +of NumPy or clone of the NumPy git repository since distributions.c is not +included in binary distributions. + +On *nix, execute in numpy/random/src/distributions + +export ${PYTHON_VERSION}=3.8 # Python version +export PYTHON_INCLUDE=#path to Python's include folder, usually \ + ${PYTHON_HOME}/include/python${PYTHON_VERSION}m +export NUMPY_INCLUDE=#path to numpy's include folder, usually \ + ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include +gcc -shared -o libdistributions.so -fPIC distributions.c \ + -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE} +mv libdistributions.so ../../_examples/numba/ + +On Windows + +rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example +set PYTHON_HOME=c:\Anaconda +set PYTHON_VERSION=38 +cl.exe /LD .\distributions.c -DDLL_EXPORT \ + -I%PYTHON_HOME%\lib\site-packages\numpy\core\include \ + -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib +move distributions.dll ../../_examples/numba/ +""" +import os + +import numba as nb +import numpy as np +from cffi import FFI + +from numpy.random import PCG64 + +ffi = FFI() +if os.path.exists('./distributions.dll'): + lib = ffi.dlopen('./distributions.dll') +elif os.path.exists('./libdistributions.so'): + lib = ffi.dlopen('./libdistributions.so') +else: + raise RuntimeError('Required DLL/so file was not found.') + +ffi.cdef(""" +double random_standard_normal(void *bitgen_state); +""") +x = PCG64() +xffi = x.cffi +bit_generator = xffi.bit_generator + +random_standard_normal = lib.random_standard_normal + + +def normals(n, bit_generator): + out = np.empty(n) + for i in range(n): + out[i] = random_standard_normal(bit_generator) + return out + + +normalsj = nb.jit(normals, nopython=True) + +# Numba requires a memory address for void * +# Can also get address from x.ctypes.bit_generator.value +bit_generator_address = int(ffi.cast('uintptr_t', bit_generator)) + +norm = normalsj(1000, bit_generator_address) +print(norm[:12]) diff --git a/MLPY/Lib/site-packages/numpy/random/_generator.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/random/_generator.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..b460066fd806a60361c2b3b99483a123a4ff3023 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_generator.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/random/_generator.pyi b/MLPY/Lib/site-packages/numpy/random/_generator.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d40206106f02411fcfd72012f215bc38aedc2b5f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_generator.pyi @@ -0,0 +1,651 @@ +import sys +from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, TypeVar + +from numpy import ( + bool_, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + int_, + ndarray, + uint, + uint8, + uint16, + uint32, + uint64, +) +from numpy.random import BitGenerator, SeedSequence +from numpy.typing import ( + ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DoubleCodes, + _DTypeLikeBool, + _DTypeLikeInt, + _DTypeLikeUInt, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, +) + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + +_DTypeLikeFloat32 = Union[ + dtype[float32], + _SupportsDType[dtype[float32]], + Type[float32], + _Float32Codes, + _SingleCodes, +] + +_DTypeLikeFloat64 = Union[ + dtype[float64], + _SupportsDType[dtype[float64]], + Type[float], + Type[float64], + _Float64Codes, + _DoubleCodes, +] + +class Generator: + def __init__(self, bit_generator: BitGenerator) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> Dict[str, Any]: ... + def __setstate__(self, state: Dict[str, Any]) -> None: ... + def __reduce__(self) -> Tuple[Callable[[str], Generator], Tuple[str], Dict[str, Any]]: ... + @property + def bit_generator(self) -> BitGenerator: ... + def bytes(self, length: int) -> bytes: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: None = ..., + dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: Optional[ndarray[Any, dtype[float32]]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ... + @overload + def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ... + @overload + def standard_exponential( # type: ignore[misc] + self, + size: None = ..., + dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + method: Literal["zig", "inv"] = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + *, + method: Literal["zig", "inv"] = ..., + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + method: Literal["zig", "inv"] = ..., + out: Optional[ndarray[Any, dtype[float32]]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( # type: ignore[misc] + self, + size: None = ..., + dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + out: None = ..., + ) -> float: ... + @overload + def random( + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + *, + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: Optional[ndarray[Any, dtype[float32]]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def beta( + self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + ) -> int: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + size: None = ..., + dtype: _DTypeLikeBool = ..., + endpoint: bool = ..., + ) -> bool: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + size: None = ..., + dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ..., + endpoint: bool = ..., + ) -> int: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: _DTypeLikeBool = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[bool_]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[int8], Type[int8], _Int8Codes, _SupportsDType[dtype[int8]]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int8]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[int16], Type[int16], _Int16Codes, _SupportsDType[dtype[int16]]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int16]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[int32], Type[int32], _Int32Codes, _SupportsDType[dtype[int32]]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[Union[int32]]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Optional[ + Union[dtype[int64], Type[int64], _Int64Codes, _SupportsDType[dtype[int64]]] + ] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[uint8], Type[uint8], _UInt8Codes, _SupportsDType[dtype[uint8]]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint8]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[uint16], Type[uint16], _UInt16Codes, _SupportsDType[dtype[uint16]] + ] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[Union[uint16]]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[uint32], Type[uint32], _UInt32Codes, _SupportsDType[dtype[uint32]] + ] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint32]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[uint64], Type[uint64], _UInt64Codes, _SupportsDType[dtype[uint64]] + ] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]] + ] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint]]: ... + # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> Union[_T, ndarray[Any,Any]] + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> ndarray[Any, Any]: ... + @overload + def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: float, + size: None = ..., + dtype: Union[_DTypeLikeFloat32, _DTypeLikeFloat64] = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + dtype: _DTypeLikeFloat32 = ..., + out: Optional[ndarray[Any, dtype[float32]]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + dtype: _DTypeLikeFloat64 = ..., + out: Optional[ndarray[Any, dtype[float64]]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def f( + self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def wald( + self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int64]]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + *, + method: Literal["svd", "eigh", "cholesky"] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + def multinomial( + self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int64]]: ... + def multivariate_hypergeometric( + self, + colors: _ArrayLikeInt_co, + nsample: int, + size: Optional[_ShapeLike] = ..., + method: Literal["marginals", "count"] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + def permuted( + self, x: ArrayLike, *, axis: Optional[int] = ..., out: Optional[ndarray[Any, Any]] = ... + ) -> ndarray[Any, Any]: ... + def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... + +def default_rng( + seed: Union[None, _ArrayLikeInt_co, SeedSequence, BitGenerator, Generator] = ... +) -> Generator: ... diff --git a/MLPY/Lib/site-packages/numpy/random/_mt19937.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/random/_mt19937.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..21014677ee596fe614b2a53f69ec9dd507b38b22 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_mt19937.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/random/_mt19937.pyi b/MLPY/Lib/site-packages/numpy/random/_mt19937.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8827789ca6e9db0a7fff6dbbfc7fb6661d2ecf11 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_mt19937.pyi @@ -0,0 +1,28 @@ +import sys +from typing import Any, Union + +from numpy import dtype, ndarray, uint32 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import _ArrayLikeInt_co + +if sys.version_info >= (3, 8): + from typing import TypedDict +else: + from typing_extensions import TypedDict + +class _MT19937Internal(TypedDict): + key: ndarray[Any, dtype[uint32]] + pos: int + +class _MT19937State(TypedDict): + bit_generator: str + state: _MT19937Internal + +class MT19937(BitGenerator): + def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... + def jumped(self, jumps: int = ...) -> MT19937: ... + @property + def state(self) -> _MT19937State: ... + @state.setter + def state(self, value: _MT19937State) -> None: ... diff --git a/MLPY/Lib/site-packages/numpy/random/_pcg64.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/random/_pcg64.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..22ef21375a8f68ad733bf0b213fe9e2e867827d0 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_pcg64.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/random/_pcg64.pyi b/MLPY/Lib/site-packages/numpy/random/_pcg64.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d3845a00322a631c8ccec6329ecfa3aef1c46f53 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_pcg64.pyi @@ -0,0 +1,48 @@ +import sys +from typing import Union + +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import _ArrayLikeInt_co + +if sys.version_info >= (3, 8): + from typing import TypedDict +else: + from typing_extensions import TypedDict + +class _PCG64Internal(TypedDict): + state: int + inc: int + +class _PCG64State(TypedDict): + bit_generator: str + state: _PCG64Internal + has_uint32: int + uinteger: int + +class PCG64(BitGenerator): + def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64: ... + +class PCG64DXSM(BitGenerator): + def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64DXSM: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/MLPY/Lib/site-packages/numpy/random/_philox.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/random/_philox.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..5ca0b8612a829e2a84e1f7187a84ac18304a6c50 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_philox.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/random/_philox.pyi b/MLPY/Lib/site-packages/numpy/random/_philox.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a3581b1ee84d017cb01aee1f7e45403f1d5a6c25 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_philox.pyi @@ -0,0 +1,42 @@ +import sys +from typing import Any, Union + +from numpy import dtype, ndarray, uint64 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import _ArrayLikeInt_co + +if sys.version_info >= (3, 8): + from typing import TypedDict +else: + from typing_extensions import TypedDict + +class _PhiloxInternal(TypedDict): + counter: ndarray[Any, dtype[uint64]] + key: ndarray[Any, dtype[uint64]] + +class _PhiloxState(TypedDict): + bit_generator: str + state: _PhiloxInternal + buffer: ndarray[Any, dtype[uint64]] + buffer_pos: int + has_uint32: int + uinteger: int + +class Philox(BitGenerator): + def __init__( + self, + seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ..., + counter: Union[None, _ArrayLikeInt_co] = ..., + key: Union[None, _ArrayLikeInt_co] = ..., + ) -> None: ... + @property + def state( + self, + ) -> _PhiloxState: ... + @state.setter + def state( + self, + value: _PhiloxState, + ) -> None: ... + def jumped(self, jumps: int = ...) -> Philox: ... + def advance(self, delta: int) -> Philox: ... diff --git a/MLPY/Lib/site-packages/numpy/random/_pickle.py b/MLPY/Lib/site-packages/numpy/random/_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..04cdb3874e99a5c25d5ed6747ad6f717c5ee0910 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_pickle.py @@ -0,0 +1,83 @@ +from .mtrand import RandomState +from ._philox import Philox +from ._pcg64 import PCG64, PCG64DXSM +from ._sfc64 import SFC64 + +from ._generator import Generator +from ._mt19937 import MT19937 + +BitGenerators = {'MT19937': MT19937, + 'PCG64': PCG64, + 'PCG64DXSM': PCG64DXSM, + 'Philox': Philox, + 'SFC64': SFC64, + } + + +def __generator_ctor(bit_generator_name='MT19937'): + """ + Pickling helper function that returns a Generator object + + Parameters + ---------- + bit_generator_name : str + String containing the core BitGenerator + + Returns + ------- + rg: Generator + Generator using the named core BitGenerator + """ + if bit_generator_name in BitGenerators: + bit_generator = BitGenerators[bit_generator_name] + else: + raise ValueError(str(bit_generator_name) + ' is not a known ' + 'BitGenerator module.') + + return Generator(bit_generator()) + + +def __bit_generator_ctor(bit_generator_name='MT19937'): + """ + Pickling helper function that returns a bit generator object + + Parameters + ---------- + bit_generator_name : str + String containing the name of the BitGenerator + + Returns + ------- + bit_generator: BitGenerator + BitGenerator instance + """ + if bit_generator_name in BitGenerators: + bit_generator = BitGenerators[bit_generator_name] + else: + raise ValueError(str(bit_generator_name) + ' is not a known ' + 'BitGenerator module.') + + return bit_generator() + + +def __randomstate_ctor(bit_generator_name='MT19937'): + """ + Pickling helper function that returns a legacy RandomState-like object + + Parameters + ---------- + bit_generator_name : str + String containing the core BitGenerator + + Returns + ------- + rs: RandomState + Legacy RandomState using the named core BitGenerator + """ + if bit_generator_name in BitGenerators: + bit_generator = BitGenerators[bit_generator_name] + else: + raise ValueError(str(bit_generator_name) + ' is not a known ' + 'BitGenerator module.') + + return RandomState(bit_generator()) diff --git a/MLPY/Lib/site-packages/numpy/random/_sfc64.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/random/_sfc64.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..967af1e18d9bf049aaabbec89349f0a5de35ec49 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/_sfc64.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/random/_sfc64.pyi b/MLPY/Lib/site-packages/numpy/random/_sfc64.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3238fd6fc4a48a480d53b00681db15d9a46a73eb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/_sfc64.pyi @@ -0,0 +1,34 @@ +import sys +from typing import Any, Union + +from numpy import dtype as dtype +from numpy import ndarray as ndarray +from numpy import uint64 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import _ArrayLikeInt_co + +if sys.version_info >= (3, 8): + from typing import TypedDict +else: + from typing_extensions import TypedDict + +class _SFC64Internal(TypedDict): + state: ndarray[Any, dtype[uint64]] + +class _SFC64State(TypedDict): + bit_generator: str + state: _SFC64Internal + has_uint32: int + uinteger: int + +class SFC64(BitGenerator): + def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + @property + def state( + self, + ) -> _SFC64State: ... + @state.setter + def state( + self, + value: _SFC64State, + ) -> None: ... diff --git a/MLPY/Lib/site-packages/numpy/random/bit_generator.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/random/bit_generator.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..a9bf75fa64124c31cf58eb80fdc3a1b0f815c179 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/bit_generator.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/random/bit_generator.pxd b/MLPY/Lib/site-packages/numpy/random/bit_generator.pxd new file mode 100644 index 0000000000000000000000000000000000000000..0b1d793c6efc3a5ceb69fac328b606c460828886 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/bit_generator.pxd @@ -0,0 +1,35 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +cdef class BitGenerator(): + cdef readonly object _seed_seq + cdef readonly object lock + cdef bitgen_t _bitgen + cdef readonly object _ctypes + cdef readonly object _cffi + cdef readonly object capsule + + +cdef class SeedSequence(): + cdef readonly object entropy + cdef readonly tuple spawn_key + cdef readonly Py_ssize_t pool_size + cdef readonly object pool + cdef readonly uint32_t n_children_spawned + + cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer, + np.ndarray[np.npy_uint32, ndim=1] entropy_array) + cdef get_assembled_entropy(self) + +cdef class SeedlessSequence(): + pass diff --git a/MLPY/Lib/site-packages/numpy/random/bit_generator.pyi b/MLPY/Lib/site-packages/numpy/random/bit_generator.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ab3d5dc83648a79b5a97686fad47467b82bb3eb8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/bit_generator.pyi @@ -0,0 +1,121 @@ +import abc +import sys +from threading import Lock +from typing import ( + Any, + Callable, + Dict, + List, + Mapping, + NamedTuple, + Optional, + Sequence, + Tuple, + Type, + TypedDict, + TypeVar, + Union, + overload, +) + +from numpy import dtype, ndarray, uint32, uint64 +from numpy.typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +_T = TypeVar("_T") + +_DTypeLikeUint32 = Union[ + dtype[uint32], + _SupportsDType[dtype[uint32]], + Type[uint32], + _UInt32Codes, +] +_DTypeLikeUint64 = Union[ + dtype[uint64], + _SupportsDType[dtype[uint64]], + Type[uint64], + _UInt64Codes, +] + +class _SeedSeqState(TypedDict): + entropy: Union[None, int, Sequence[int]] + spawn_key: Tuple[int, ...] + pool_size: int + n_children_spawned: int + +class _Interface(NamedTuple): + state_address: Any + state: Any + next_uint64: Any + next_uint32: Any + next_double: Any + bit_generator: Any + +class ISeedSequence(abc.ABC): + @abc.abstractmethod + def generate_state( + self, n_words: int, dtype: Union[_DTypeLikeUint32, _DTypeLikeUint64] = ... + ) -> ndarray[Any, dtype[Union[uint32, uint64]]]: ... + +class ISpawnableSeedSequence(ISeedSequence): + @abc.abstractmethod + def spawn(self: _T, n_children: int) -> List[_T]: ... + +class SeedlessSeedSequence(ISpawnableSeedSequence): + def generate_state( + self, n_words: int, dtype: Union[_DTypeLikeUint32, _DTypeLikeUint64] = ... + ) -> ndarray[Any, dtype[Union[uint32, uint64]]]: ... + def spawn(self: _T, n_children: int) -> List[_T]: ... + +class SeedSequence(ISpawnableSeedSequence): + entropy: Union[None, int, Sequence[int]] + spawn_key: Tuple[int, ...] + pool_size: int + n_children_spawned: int + pool: ndarray[Any, dtype[uint32]] + def __init__( + self, + entropy: Union[None, int, Sequence[int], _ArrayLikeInt_co] = ..., + *, + spawn_key: Sequence[int] = ..., + pool_size: int = ..., + n_children_spawned: int = ..., + ) -> None: ... + def __repr__(self) -> str: ... + @property + def state( + self, + ) -> _SeedSeqState: ... + def generate_state( + self, n_words: int, dtype: Union[_DTypeLikeUint32, _DTypeLikeUint64] = ... + ) -> ndarray[Any, dtype[Union[uint32, uint64]]]: ... + def spawn(self, n_children: int) -> List[SeedSequence]: ... + +class BitGenerator(abc.ABC): + lock: Lock + def __init__(self, seed: Union[None, _ArrayLikeInt_co, SeedSequence] = ...) -> None: ... + def __getstate__(self) -> Dict[str, Any]: ... + def __setstate__(self, state: Dict[str, Any]) -> None: ... + def __reduce__( + self, + ) -> Tuple[Callable[[str], BitGenerator], Tuple[str], Tuple[Dict[str, Any]]]: ... + @abc.abstractmethod + @property + def state(self) -> Mapping[str, Any]: ... + @state.setter + def state(self, value: Mapping[str, Any]) -> None: ... + @overload + def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc] + @overload + def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> ndarray[Any, dtype[uint64]]: ... # type: ignore[misc] + @overload + def random_raw(self, size: Optional[_ShapeLike] = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc] + def _benchmark(self, cnt: int, method: str = ...) -> None: ... + @property + def ctypes(self) -> _Interface: ... + @property + def cffi(self) -> _Interface: ... diff --git a/MLPY/Lib/site-packages/numpy/random/c_distributions.pxd b/MLPY/Lib/site-packages/numpy/random/c_distributions.pxd new file mode 100644 index 0000000000000000000000000000000000000000..cf6b34ab65b2b8ef32cbee0698447af63a1a2a90 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/c_distributions.pxd @@ -0,0 +1,114 @@ +#!python +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 +from numpy cimport npy_intp + +from libc.stdint cimport (uint64_t, int32_t, int64_t) +from numpy.random cimport bitgen_t + +cdef extern from "numpy/random/distributions.h": + + struct s_binomial_t: + int has_binomial + double psave + int64_t nsave + double r + double q + double fm + int64_t m + double p1 + double xm + double xl + double xr + double c + double laml + double lamr + double p2 + double p3 + double p4 + + ctypedef s_binomial_t binomial_t + + double random_standard_uniform(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil + double random_standard_exponential(bitgen_t *bitgen_state) nogil + double random_standard_exponential_f(bitgen_t *bitgen_state) nogil + void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + double random_standard_normal(bitgen_t* bitgen_state) nogil + void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil + void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil + double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil + float random_standard_normal_f(bitgen_t* bitgen_state) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + int64_t random_positive_int64(bitgen_t *bitgen_state) nogil + int32_t random_positive_int32(bitgen_t *bitgen_state) nogil + int64_t random_positive_int(bitgen_t *bitgen_state) nogil + uint64_t random_uint(bitgen_t *bitgen_state) nogil + + double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil + + double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil + float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil + + double random_exponential(bitgen_t *bitgen_state, double scale) nogil + double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil + double random_beta(bitgen_t *bitgen_state, double a, double b) nogil + double random_chisquare(bitgen_t *bitgen_state, double df) nogil + double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil + double random_standard_cauchy(bitgen_t *bitgen_state) nogil + double random_pareto(bitgen_t *bitgen_state, double a) nogil + double random_weibull(bitgen_t *bitgen_state, double a) nogil + double random_power(bitgen_t *bitgen_state, double a) nogil + double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil + double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil + double random_standard_t(bitgen_t *bitgen_state, double df) nogil + double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc) nogil + double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc) nogil + double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil + double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil + double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right) nogil + + int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil + int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil + int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil + int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil + int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil + int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, + int64_t sample) nogil + + uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil + + # Generate random uint64 numbers in closed interval [off, off + rng]. + uint64_t random_bounded_uint64(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, + uint64_t mask, bint use_masked) nogil + + void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix, + double *pix, npy_intp d, binomial_t *binomial) nogil + + int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + diff --git a/MLPY/Lib/site-packages/numpy/random/lib/npyrandom.lib b/MLPY/Lib/site-packages/numpy/random/lib/npyrandom.lib new file mode 100644 index 0000000000000000000000000000000000000000..9a09e66611919f022536a7bad82955e3d35301c6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/lib/npyrandom.lib differ diff --git a/MLPY/Lib/site-packages/numpy/random/mtrand.cp39-win_amd64.pyd b/MLPY/Lib/site-packages/numpy/random/mtrand.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..d0425e3ce0e7b248cb85c6b850396fbf2109d78a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/mtrand.cp39-win_amd64.pyd differ diff --git a/MLPY/Lib/site-packages/numpy/random/mtrand.pyi b/MLPY/Lib/site-packages/numpy/random/mtrand.pyi new file mode 100644 index 0000000000000000000000000000000000000000..edc0ec8dd0cb76609d96841f757f8f8eec32115f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/mtrand.pyi @@ -0,0 +1,579 @@ +import sys +from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload + +from numpy import ( + bool_, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + int_, + ndarray, + uint, + uint8, + uint16, + uint32, + uint64, +) +from numpy.random.bit_generator import BitGenerator +from numpy.typing import ( + ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DoubleCodes, + _DTypeLikeBool, + _DTypeLikeInt, + _DTypeLikeUInt, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, +) + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +_DTypeLikeFloat32 = Union[ + dtype[float32], + _SupportsDType[dtype[float32]], + Type[float32], + _Float32Codes, + _SingleCodes, +] + +_DTypeLikeFloat64 = Union[ + dtype[float64], + _SupportsDType[dtype[float64]], + Type[float], + Type[float64], + _Float64Codes, + _DoubleCodes, +] + +class RandomState: + _bit_generator: BitGenerator + def __init__(self, seed: Union[None, _ArrayLikeInt_co, BitGenerator] = ...) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> Dict[str, Any]: ... + def __setstate__(self, state: Dict[str, Any]) -> None: ... + def __reduce__(self) -> Tuple[Callable[[str], RandomState], Tuple[str], Dict[str, Any]]: ... + def seed(self, seed: Optional[_ArrayLikeFloat_co] = ...) -> None: ... + @overload + def get_state(self, legacy: Literal[False] = ...) -> Dict[str, Any]: ... + @overload + def get_state( + self, legacy: Literal[True] = ... + ) -> Union[Dict[str, Any], Tuple[str, ndarray[Any, dtype[uint32]], int, int, float]]: ... + def set_state( + self, state: Union[Dict[str, Any], Tuple[str, ndarray[Any, dtype[uint32]], int, int, float]] + ) -> None: ... + @overload + def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def random(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def beta( + self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + size: None = ..., + dtype: _DTypeLikeBool = ..., + ) -> bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: Optional[int] = ..., + size: None = ..., + dtype: Union[_DTypeLikeInt, _DTypeLikeUInt] = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: _DTypeLikeBool = ..., + ) -> ndarray[Any, dtype[bool_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[int8], Type[int8], _Int8Codes, _SupportsDType[dtype[int8]]] = ..., + ) -> ndarray[Any, dtype[int8]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[int16], Type[int16], _Int16Codes, _SupportsDType[dtype[int16]]] = ..., + ) -> ndarray[Any, dtype[int16]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[int32], Type[int32], _Int32Codes, _SupportsDType[dtype[int32]]] = ..., + ) -> ndarray[Any, dtype[Union[int32]]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Optional[ + Union[dtype[int64], Type[int64], _Int64Codes, _SupportsDType[dtype[int64]]] + ] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[uint8], Type[uint8], _UInt8Codes, _SupportsDType[dtype[uint8]]] = ..., + ) -> ndarray[Any, dtype[uint8]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[uint16], Type[uint16], _UInt16Codes, _SupportsDType[dtype[uint16]] + ] = ..., + ) -> ndarray[Any, dtype[Union[uint16]]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[uint32], Type[uint32], _UInt32Codes, _SupportsDType[dtype[uint32]] + ] = ..., + ) -> ndarray[Any, dtype[uint32]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[uint64], Type[uint64], _UInt64Codes, _SupportsDType[dtype[uint64]] + ] = ..., + ) -> ndarray[Any, dtype[uint64]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[ + dtype[int_], Type[int], Type[int_], _IntCodes, _SupportsDType[dtype[int_]] + ] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + dtype: Union[dtype[uint], Type[uint], _UIntCodes, _SupportsDType[dtype[uint]]] = ..., + ) -> ndarray[Any, dtype[uint]]: ... + def bytes(self, length: int) -> bytes: ... + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: Optional[_ArrayLikeFloat_co] = ..., + ) -> ndarray[Any, Any]: ... + @overload + def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rand(self) -> float: ... + @overload + def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ... + @overload + def randn(self) -> float: ... + @overload + def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ... + @overload + def random_integers(self, low: int, high: Optional[int] = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: Optional[_ArrayLikeInt_co] = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_normal( # type: ignore[misc] + self, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: float, + size: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def f( + self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def wald( + self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: Optional[_ShapeLike] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: Optional[_ShapeLike] = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + ) -> ndarray[Any, dtype[float64]]: ... + def multinomial( + self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[int_]]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: Optional[_ShapeLike] = ... + ) -> ndarray[Any, dtype[float64]]: ... + def shuffle(self, x: ArrayLike) -> None: ... + @overload + def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ... + @overload + def permutation(self, x: ArrayLike) -> ndarray[Any, Any]: ... + +_rand: RandomState + +beta = _rand.beta +binomial = _rand.binomial +bytes = _rand.bytes +chisquare = _rand.chisquare +choice = _rand.choice +dirichlet = _rand.dirichlet +exponential = _rand.exponential +f = _rand.f +gamma = _rand.gamma +get_state = _rand.get_state +geometric = _rand.geometric +gumbel = _rand.gumbel +hypergeometric = _rand.hypergeometric +laplace = _rand.laplace +logistic = _rand.logistic +lognormal = _rand.lognormal +logseries = _rand.logseries +multinomial = _rand.multinomial +multivariate_normal = _rand.multivariate_normal +negative_binomial = _rand.negative_binomial +noncentral_chisquare = _rand.noncentral_chisquare +noncentral_f = _rand.noncentral_f +normal = _rand.normal +pareto = _rand.pareto +permutation = _rand.permutation +poisson = _rand.poisson +power = _rand.power +rand = _rand.rand +randint = _rand.randint +randn = _rand.randn +random = _rand.random +random_integers = _rand.random_integers +random_sample = _rand.random_sample +rayleigh = _rand.rayleigh +seed = _rand.seed +set_state = _rand.set_state +shuffle = _rand.shuffle +standard_cauchy = _rand.standard_cauchy +standard_exponential = _rand.standard_exponential +standard_gamma = _rand.standard_gamma +standard_normal = _rand.standard_normal +standard_t = _rand.standard_t +triangular = _rand.triangular +uniform = _rand.uniform +vonmises = _rand.vonmises +wald = _rand.wald +weibull = _rand.weibull +zipf = _rand.zipf +# Two legacy that are trivial wrappers around random_sample +sample = _rand.random_sample +ranf = _rand.random_sample diff --git a/MLPY/Lib/site-packages/numpy/random/setup.py b/MLPY/Lib/site-packages/numpy/random/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc34a8da82526a28d0a0c548343c08ac76b841c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/setup.py @@ -0,0 +1,148 @@ +import os +import platform +import sys +from os.path import join + +from numpy.distutils.system_info import platform_bits + +is_msvc = (platform.platform().startswith('Windows') and + platform.python_compiler().startswith('MS')) + + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration, get_mathlibs + config = Configuration('random', parent_package, top_path) + + def generate_libraries(ext, build_dir): + config_cmd = config.get_config_cmd() + libs = get_mathlibs() + if sys.platform == 'win32': + libs.extend(['Advapi32', 'Kernel32']) + ext.libraries.extend(libs) + return None + + # enable unix large file support on 32 bit systems + # (64 bit off_t, lseek -> lseek64 etc.) + if sys.platform[:3] == 'aix': + defs = [('_LARGE_FILES', None)] + else: + defs = [('_FILE_OFFSET_BITS', '64'), + ('_LARGEFILE_SOURCE', '1'), + ('_LARGEFILE64_SOURCE', '1')] + + defs.append(('NPY_NO_DEPRECATED_API', 0)) + config.add_subpackage('tests') + config.add_data_dir('tests/data') + config.add_data_dir('_examples') + + EXTRA_LINK_ARGS = [] + EXTRA_LIBRARIES = ['npyrandom'] + if os.name != 'nt': + # Math lib + EXTRA_LIBRARIES.append('m') + # Some bit generators exclude GCC inlining + EXTRA_COMPILE_ARGS = ['-U__GNUC_GNU_INLINE__'] + + if is_msvc and platform_bits == 32: + # 32-bit windows requires explicit sse2 option + EXTRA_COMPILE_ARGS += ['/arch:SSE2'] + elif not is_msvc: + # Some bit generators require c99 + EXTRA_COMPILE_ARGS += ['-std=c99'] + + # Use legacy integer variable sizes + LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')] + PCG64_DEFS = [] + # One can force emulated 128-bit arithmetic if one wants. + #PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')] + depends = ['__init__.pxd', 'c_distributions.pxd', 'bit_generator.pxd'] + + # npyrandom - a library like npymath + npyrandom_sources = [ + 'src/distributions/logfactorial.c', + 'src/distributions/distributions.c', + 'src/distributions/random_mvhg_count.c', + 'src/distributions/random_mvhg_marginals.c', + 'src/distributions/random_hypergeometric.c', + ] + config.add_installed_library('npyrandom', + sources=npyrandom_sources, + install_dir='lib', + build_info={ + 'include_dirs' : [], # empty list required for creating npyrandom.h + 'extra_compiler_args' : (['/GL-'] if is_msvc else []), + }) + + for gen in ['mt19937']: + # gen.pyx, src/gen/gen.c, src/gen/gen-jump.c + config.add_extension(f'_{gen}', + sources=[f'_{gen}.c', + f'src/{gen}/{gen}.c', + f'src/{gen}/{gen}-jump.c'], + include_dirs=['.', 'src', join('src', gen)], + libraries=EXTRA_LIBRARIES, + extra_compile_args=EXTRA_COMPILE_ARGS, + extra_link_args=EXTRA_LINK_ARGS, + depends=depends + [f'_{gen}.pyx'], + define_macros=defs, + ) + for gen in ['philox', 'pcg64', 'sfc64']: + # gen.pyx, src/gen/gen.c + _defs = defs + PCG64_DEFS if gen == 'pcg64' else defs + config.add_extension(f'_{gen}', + sources=[f'_{gen}.c', + f'src/{gen}/{gen}.c'], + include_dirs=['.', 'src', join('src', gen)], + libraries=EXTRA_LIBRARIES, + extra_compile_args=EXTRA_COMPILE_ARGS, + extra_link_args=EXTRA_LINK_ARGS, + depends=depends + [f'_{gen}.pyx', + 'bit_generator.pyx', 'bit_generator.pxd'], + define_macros=_defs, + ) + for gen in ['_common', 'bit_generator']: + # gen.pyx + config.add_extension(gen, + sources=[f'{gen}.c'], + libraries=EXTRA_LIBRARIES, + extra_compile_args=EXTRA_COMPILE_ARGS, + extra_link_args=EXTRA_LINK_ARGS, + include_dirs=['.', 'src'], + depends=depends + [f'{gen}.pyx', f'{gen}.pxd',], + define_macros=defs, + ) + config.add_data_files(f'{gen}.pxd') + for gen in ['_generator', '_bounded_integers']: + # gen.pyx, src/distributions/distributions.c + config.add_extension(gen, + sources=[f'{gen}.c'], + libraries=EXTRA_LIBRARIES + ['npymath'], + extra_compile_args=EXTRA_COMPILE_ARGS, + include_dirs=['.', 'src'], + extra_link_args=EXTRA_LINK_ARGS, + depends=depends + [f'{gen}.pyx'], + define_macros=defs, + ) + config.add_data_files('_bounded_integers.pxd') + mtrand_libs = ['m', 'npymath'] if os.name != 'nt' else ['npymath'] + config.add_extension('mtrand', + sources=['mtrand.c', + 'src/legacy/legacy-distributions.c', + 'src/distributions/distributions.c', + ], + include_dirs=['.', 'src', 'src/legacy'], + libraries=mtrand_libs, + extra_compile_args=EXTRA_COMPILE_ARGS, + extra_link_args=EXTRA_LINK_ARGS, + depends=depends + ['mtrand.pyx'], + define_macros=defs + LEGACY_DEFS, + ) + config.add_data_files(*depends) + config.add_data_files('*.pyi') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + + setup(configuration=configuration) diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__init__.py b/MLPY/Lib/site-packages/numpy/random/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..338f7d066e8e83cc3bf8b4b27e2d271702afe7e5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_direct.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_direct.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bb1c44caf9f01fa7f5b37b6b26d0e800c372fcc Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_direct.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_extending.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_extending.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22820749ab997ca828a6615870cef80bfe3a9b3d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_extending.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25b28a899b8de445d39bfd203ce8c2ecbb700fa7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c040fb3e828a60de62c8d0e738f9eb6c3ec46895 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_random.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_random.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf9f3203ef0712145861042599843afc27a95cba Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_random.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3876beff5b604c4f9cbb101ed6f542c6238fc444 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a93a0bf456636f6aeff8d807627ac7ccc9d2779c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_regression.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_regression.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41fb94d59cd1453f021c8ea74e13e59db22065ee Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_regression.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_seed_sequence.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_seed_sequence.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e825a9211bf1d78105b4cc0f2c5d030bd9f39e93 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_seed_sequence.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_smoke.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_smoke.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fbc6b7fc040836a36b61c0c486bb5eadcdf2bf0 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/__pycache__/test_smoke.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/__init__.py b/MLPY/Lib/site-packages/numpy/random/tests/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/random/tests/data/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd3cb5d1689177783700efceb4ef84cd95b29811 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/random/tests/data/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/mt19937-testset-1.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/mt19937-testset-1.csv new file mode 100644 index 0000000000000000000000000000000000000000..c4668253fe99726511d4ce588fed703e9caddcea --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/mt19937-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xc816921f +1, 0xb3623c6d +2, 0x5fa391bb +3, 0x40178d9 +4, 0x7dcc9811 +5, 0x548eb8e6 +6, 0x92ba3125 +7, 0x65fde68d +8, 0x2f81ec95 +9, 0xbd94f7a2 +10, 0xdc4d9bcc +11, 0xa672bf13 +12, 0xb41113e +13, 0xec7e0066 +14, 0x50239372 +15, 0xd9d66b1d +16, 0xab72a161 +17, 0xddc2e29f +18, 0x7ea29ab4 +19, 0x80d141ba +20, 0xb1c7edf1 +21, 0x44d29203 +22, 0xe224d98 +23, 0x5b3e9d26 +24, 0x14fd567c +25, 0x27d98c96 +26, 0x838779fc +27, 0x92a138a +28, 0x5d08965b +29, 0x531e0ad6 +30, 0x984ee8f4 +31, 0x1ed78539 +32, 0x32bd6d8d +33, 0xc37c8516 +34, 0x9aef5c6b +35, 0x3aacd139 +36, 0xd96ed154 +37, 0x489cd1ed +38, 0x2cba4b3b +39, 0x76c6ae72 +40, 0x2dae02b9 +41, 0x52ac5fd6 +42, 0xc2b5e265 +43, 0x630e6a28 +44, 0x3f560d5d +45, 0x9315bdf3 +46, 0xf1055aba +47, 0x840e42c6 +48, 0xf2099c6b +49, 0x15ff7696 +50, 0x7948d146 +51, 0x97342961 +52, 0x7a7a21c +53, 0xc66f4fb1 +54, 0x23c4103e +55, 0xd7321f98 +56, 0xeb7efb75 +57, 0xe02490b5 +58, 0x2aa02de +59, 0x8bee0bf7 +60, 0xfc2da059 +61, 0xae835034 +62, 0x678f2075 +63, 0x6d03094b +64, 0x56455e05 +65, 0x18b32373 +66, 0x8ff0356b +67, 0x1fe442fb +68, 0x3f1ab6c3 +69, 0xb6fd21b +70, 0xfc310eb2 +71, 0xb19e9a4d +72, 0x17ddee72 +73, 0xfd534251 +74, 0x9e500564 +75, 0x9013a036 +76, 0xcf08f118 +77, 0x6b6d5969 +78, 0x3ccf1977 +79, 0x7cc11497 +80, 0x651c6ac9 +81, 0x4d6b104b +82, 0x9a28314e +83, 0x14c237be +84, 0x9cfc8d52 +85, 0x2947fad5 +86, 0xd71eff49 +87, 0x5188730e +88, 0x4b894614 +89, 0xf4fa2a34 +90, 0x42f7cc69 +91, 0x4089c9e8 +92, 0xbf0bbfe4 +93, 0x3cea65c +94, 0xc6221207 +95, 0x1bb71a8f +96, 0x54843fe7 +97, 0xbc59de4c +98, 0x79c6ee64 +99, 0x14e57a26 +100, 0x68d88fe +101, 0x2b86ef64 +102, 0x8ffff3c1 +103, 0x5bdd573f +104, 0x85671813 +105, 0xefe32ca2 +106, 0x105ded1e +107, 0x90ca2769 +108, 0xb33963ac +109, 0x363fbbc3 +110, 0x3b3763ae +111, 0x1d50ab88 +112, 0xc9ec01eb +113, 0xc8bbeada +114, 0x5d704692 +115, 0x5fd9e40 +116, 0xe61c125 +117, 0x2fe05792 +118, 0xda8afb72 +119, 0x4cbaa653 +120, 0xdd2243df +121, 0x896fd3f5 +122, 0x5bc23db +123, 0xa1c4e807 +124, 0x57d1a24d +125, 0x66503ddc +126, 0xcf7c0838 +127, 0x19e034fc +128, 0x66807450 +129, 0xfc219b3b +130, 0xe8a843e7 +131, 0x9ce61f08 +132, 0x92b950d6 +133, 0xce955ec4 +134, 0xda0d1f0d +135, 0x960c6250 +136, 0x39552432 +137, 0xde845e84 +138, 0xff3b4b11 +139, 0x5d918e6f +140, 0xbb930df2 +141, 0x7cfb0993 +142, 0x5400e1e9 +143, 0x3bfa0954 +144, 0x7e2605fb +145, 0x11941591 +146, 0x887e6994 +147, 0xdc8bed45 +148, 0x45b3fb50 +149, 0xfbdf8358 +150, 0x41507468 +151, 0x34c87166 +152, 0x17f64d77 +153, 0x3bbaf4f8 +154, 0x4f26f37e +155, 0x4a56ebf2 +156, 0x81100f1 +157, 0x96d94eae +158, 0xca88fda5 +159, 0x2eef3a60 +160, 0x952afbd3 +161, 0x2bec88c7 +162, 0x52335c4b +163, 0x8296db8e +164, 0x4da7d00a +165, 0xc00ac899 +166, 0xadff8c72 +167, 0xbecf26cf +168, 0x8835c83c +169, 0x1d13c804 +170, 0xaa940ddc +171, 0x68222cfe +172, 0x4569c0e1 +173, 0x29077976 +174, 0x32d4a5af +175, 0xd31fcdef +176, 0xdc60682b +177, 0x7c95c368 +178, 0x75a70213 +179, 0x43021751 +180, 0x5e52e0a6 +181, 0xf7e190b5 +182, 0xee3e4bb +183, 0x2fe3b150 +184, 0xcf419c07 +185, 0x478a4570 +186, 0xe5c3ea50 +187, 0x417f30a8 +188, 0xf0cfdaa0 +189, 0xd1f7f738 +190, 0x2c70fc23 +191, 0x54fc89f9 +192, 0x444dcf01 +193, 0xec2a002d +194, 0xef0c3a88 +195, 0xde21be9 +196, 0x88ab3296 +197, 0x3028897c +198, 0x264b200b +199, 0xd8ae0706 +200, 0x9eef901a +201, 0xbd1b96e0 +202, 0xea71366c +203, 0x1465b694 +204, 0x5a794650 +205, 0x83df52d4 +206, 0x8262413d +207, 0x5bc148c0 +208, 0xe0ecd80c +209, 0x40649571 +210, 0xb4d2ee5f +211, 0xedfd7d09 +212, 0xa082e25f +213, 0xc62992d1 +214, 0xbc7e65ee +215, 0x5499cf8a +216, 0xac28f775 +217, 0x649840fb +218, 0xd4c54805 +219, 0x1d166ba6 +220, 0xbeb1171f +221, 0x45b66703 +222, 0x78c03349 +223, 0x38d2a6ff +224, 0x935cae8b +225, 0x1d07dc3f +226, 0x6c1ed365 +227, 0x579fc585 +228, 0x1320c0ec +229, 0x632757eb +230, 0xd265a397 +231, 0x70e9b6c2 +232, 0xc81e322c +233, 0xa27153cf +234, 0x2118ba19 +235, 0x514ec400 +236, 0x2bd0ecd6 +237, 0xc3e7dae3 +238, 0xfa39355e +239, 0x48f23cc1 +240, 0xbcf75948 +241, 0x53ccc70c +242, 0x75346423 +243, 0x951181e0 +244, 0x348e90df +245, 0x14365d7f +246, 0xfbc95d7a +247, 0xdc98a9e6 +248, 0xed202df7 +249, 0xa59ec913 +250, 0x6b6e9ae2 +251, 0x1697f265 +252, 0x15d322d0 +253, 0xa2e7ee0a +254, 0x88860b7e +255, 0x455d8b9d +256, 0x2f5c59cb +257, 0xac49c9f1 +258, 0xa6a6a039 +259, 0xc057f56b +260, 0xf1ff1208 +261, 0x5eb8dc9d +262, 0xe6702509 +263, 0xe238b0ed +264, 0x5ae32e3d +265, 0xa88ebbdf +266, 0xef885ae7 +267, 0xafa6d49b +268, 0xc94499e0 +269, 0x1a196325 +270, 0x88938da3 +271, 0x14f4345 +272, 0xd8e33637 +273, 0xa3551bd5 +274, 0x73fe35c7 +275, 0x9561e94b +276, 0xd673bf68 +277, 0x16134872 +278, 0x68c42f9f +279, 0xdf7574c8 +280, 0x8809bab9 +281, 0x1432cf69 +282, 0xafb66bf1 +283, 0xc184aa7b +284, 0xedbf2007 +285, 0xbd420ce1 +286, 0x761033a0 +287, 0xff7e351f +288, 0xd6c3780e +289, 0x5844416f +290, 0xc6c0ee1c +291, 0xd2e147db +292, 0x92ac601a +293, 0x393e846b +294, 0x18196cca +295, 0x54a22be +296, 0x32bab1c4 +297, 0x60365183 +298, 0x64fa342 +299, 0xca24a493 +300, 0xd8cc8b83 +301, 0x3faf102b +302, 0x6e09bb58 +303, 0x812f0ea +304, 0x592c95d8 +305, 0xe45ea4c5 +306, 0x23aebf83 +307, 0xbd9691d4 +308, 0xf47b4baa +309, 0x4ac7b487 +310, 0xcce18803 +311, 0x3377556e +312, 0x3ff8e6b6 +313, 0x99d22063 +314, 0x23250bec +315, 0x4e1f9861 +316, 0x8554249b +317, 0x8635c2fc +318, 0xe8426e8a +319, 0x966c29d8 +320, 0x270b6082 +321, 0x3180a8a1 +322, 0xe7e1668b +323, 0x7f868dc +324, 0xcf4c17cf +325, 0xe31de4d1 +326, 0xc8c8aff4 +327, 0xae8db704 +328, 0x3c928cc2 +329, 0xe12cd48 +330, 0xb33ecd04 +331, 0xb93d7cbe +332, 0x49c69d6a +333, 0x7d3bce64 +334, 0x86bc219 +335, 0x8408233b +336, 0x44dc7479 +337, 0xdf80d538 +338, 0xf3db02c3 +339, 0xbbbd31d7 +340, 0x121281f +341, 0x7521e9a3 +342, 0x8859675a +343, 0x75aa6502 +344, 0x430ed15b +345, 0xecf0a28d +346, 0x659774fd +347, 0xd58a2311 +348, 0x512389a9 +349, 0xff65e1ff +350, 0xb6ddf222 +351, 0xe3458895 +352, 0x8b13cd6e +353, 0xd4a22870 +354, 0xe604c50c +355, 0x27f54f26 +356, 0x8f7f422f +357, 0x9735b4cf +358, 0x414072b0 +359, 0x76a1c6d5 +360, 0xa2208c06 +361, 0x83cd0f61 +362, 0x6c4f7ead +363, 0x6553cf76 +364, 0xeffcf44 +365, 0x7f434a3f +366, 0x9dc364bd +367, 0x3cdf52ed +368, 0xad597594 +369, 0x9c3e211b +370, 0x6c04a33f +371, 0x885dafa6 +372, 0xbbdaca71 +373, 0x7ae5dd5c +374, 0x37675644 +375, 0x251853c6 +376, 0x130b086b +377, 0x143fa54b +378, 0x54cdc282 +379, 0x9faff5b3 +380, 0x502a5c8b +381, 0xd9524550 +382, 0xae221aa6 +383, 0x55cf759b +384, 0x24782da4 +385, 0xd715d815 +386, 0x250ea09a +387, 0x4e0744ac +388, 0x11e15814 +389, 0xabe5f9df +390, 0xc8146350 +391, 0xfba67d9b +392, 0x2b82e42f +393, 0xd4ea96fc +394, 0x5ffc179e +395, 0x1598bafe +396, 0x7fb6d662 +397, 0x1a12a0db +398, 0x450cee4a +399, 0x85f8e12 +400, 0xce71b594 +401, 0xd4bb1d19 +402, 0x968f379d +403, 0x54cc1d52 +404, 0x467e6066 +405, 0x7da5f9a9 +406, 0x70977034 +407, 0x49e65c4b +408, 0xd08570d1 +409, 0x7acdf60b +410, 0xdffa038b +411, 0x9ce14e4c +412, 0x107cbbf8 +413, 0xdd746ca0 +414, 0xc6370a46 +415, 0xe7f83312 +416, 0x373fa9ce +417, 0xd822a2c6 +418, 0x1d4efea6 +419, 0xc53dcadb +420, 0x9b4e898f +421, 0x71daa6bf +422, 0x7a0bc78b +423, 0xd7b86f50 +424, 0x1b8b3286 +425, 0xcf9425dd +426, 0xd5263220 +427, 0x4ea0b647 +428, 0xc767fe64 +429, 0xcfc5e67 +430, 0xcc6a2942 +431, 0xa51eff00 +432, 0x76092e1b +433, 0xf606e80f +434, 0x824b5e20 +435, 0xebb55e14 +436, 0x783d96a6 +437, 0x10696512 +438, 0x17ee510a +439, 0x3ab70a1f +440, 0xcce6b210 +441, 0x8f72f0fb +442, 0xf0610b41 +443, 0x83d01fb5 +444, 0x6b3de36 +445, 0xe4c2e84f +446, 0x9c43bb15 +447, 0xddf2905 +448, 0x7dd63556 +449, 0x3662ca09 +450, 0xfb81f35b +451, 0xc2c8a72a +452, 0x8e93c37 +453, 0xa93da2d4 +454, 0xa03af8f1 +455, 0x8d75159a +456, 0x15f010b0 +457, 0xa296ab06 +458, 0xe55962ba +459, 0xeae700a9 +460, 0xe388964a +461, 0x917f2bec +462, 0x1c203fea +463, 0x792a01ba +464, 0xa93a80ac +465, 0x9eb8a197 +466, 0x56c0bc73 +467, 0xb8f05799 +468, 0xf429a8c8 +469, 0xb92cee42 +470, 0xf8864ec +471, 0x62f2518a +472, 0x3a7bfa3e +473, 0x12e56e6d +474, 0xd7a18313 +475, 0x41fa3899 +476, 0xa09c4956 +477, 0xebcfd94a +478, 0xc485f90b +479, 0x4391ce40 +480, 0x742a3333 +481, 0xc932f9e5 +482, 0x75c6c263 +483, 0x80937f0 +484, 0xcf21833c +485, 0x16027520 +486, 0xd42e669f +487, 0xb0f01fb7 +488, 0xb35896f1 +489, 0x763737a9 +490, 0x1bb20209 +491, 0x3551f189 +492, 0x56bc2602 +493, 0xb6eacf4 +494, 0x42ec4d11 +495, 0x245cc68 +496, 0xc27ac43b +497, 0x9d903466 +498, 0xce3f0c05 +499, 0xb708c31c +500, 0xc0fd37eb +501, 0x95938b2c +502, 0xf20175a7 +503, 0x4a86ee9b +504, 0xbe039a58 +505, 0xd41cabe7 +506, 0x83bc99ba +507, 0x761d60e1 +508, 0x7737cc2e +509, 0x2b82fc4b +510, 0x375aa401 +511, 0xfe9597a0 +512, 0x5543806a +513, 0x44f31238 +514, 0x7df31538 +515, 0x74cfa770 +516, 0x8755d881 +517, 0x1fde665a +518, 0xda8bf315 +519, 0x973d8e95 +520, 0x72205228 +521, 0x8fe59717 +522, 0x7bb90b34 +523, 0xef6ed945 +524, 0x16fd4a38 +525, 0x5db44de1 +526, 0xf09f93b3 +527, 0xe84824cc +528, 0x945bb50e +529, 0xd0be4aa5 +530, 0x47c277c2 +531, 0xd3800c28 +532, 0xac1c33ec +533, 0xd3dacce +534, 0x811c8387 +535, 0x6761b36 +536, 0x70d3882f +537, 0xd6e62e3a +538, 0xea25daa2 +539, 0xb07f39d1 +540, 0x391d89d7 +541, 0x84b6fb5e +542, 0x3dda3fca +543, 0x229e80a4 +544, 0x3d94a4b7 +545, 0x5d3d576a +546, 0xad7818a0 +547, 0xce23b03a +548, 0x7aa2079c +549, 0x9a6be555 +550, 0x83f3b34a +551, 0x1848f9d9 +552, 0xd8fefc1c +553, 0x48e6ce48 +554, 0x52e55750 +555, 0xf41a71cf +556, 0xba08e259 +557, 0xfaf06a15 +558, 0xeaaac0fb +559, 0x34f90098 +560, 0xb1dfffbb +561, 0x718daec2 +562, 0xab4dda21 +563, 0xd27cc1ee +564, 0x4aafbc4c +565, 0x356dfb4f +566, 0x83fcdfd6 +567, 0x8f0bcde0 +568, 0x4363f844 +569, 0xadc0f4d5 +570, 0x3bde994e +571, 0x3884d452 +572, 0x21876b4a +573, 0x9c985398 +574, 0xca55a226 +575, 0x3a88c583 +576, 0x916dc33c +577, 0x8f67d1d7 +578, 0x3b26a667 +579, 0xe4ddeb4b +580, 0x1a9d8c33 +581, 0x81c9b74f +582, 0x9ed1e9df +583, 0x6e61aecf +584, 0x95e95a5d +585, 0x68864ff5 +586, 0xb8fa5b9 +587, 0x72b1b3de +588, 0x5e18a86b +589, 0xd7f2337d +590, 0xd70e0925 +591, 0xb573a4c1 +592, 0xc77b3f8a +593, 0x389b20de +594, 0x16cf6afb +595, 0xa39bd275 +596, 0xf491cf01 +597, 0x6f88a802 +598, 0x8510af05 +599, 0xe7cd549a +600, 0x8603179a +601, 0xef43f191 +602, 0xf9b64c60 +603, 0xb00254a7 +604, 0xd7c06a2d +605, 0x17e9380b +606, 0x529e727b +607, 0xaaa8fe0a +608, 0xfb64ff4c +609, 0xcd75af26 +610, 0xfb717c87 +611, 0xa0789899 +612, 0x10391ec9 +613, 0x7e9b40b3 +614, 0x18536554 +615, 0x728c05f7 +616, 0x787dca98 +617, 0xad948d1 +618, 0x44c18def +619, 0x3303f2ec +620, 0xa15acb5 +621, 0xb58d38f4 +622, 0xfe041ef8 +623, 0xd151a956 +624, 0x7b9168e8 +625, 0x5ebeca06 +626, 0x90fe95df +627, 0xf76875aa +628, 0xb2e0d664 +629, 0x2e3253b7 +630, 0x68e34469 +631, 0x1f0c2d89 +632, 0x13a34ac2 +633, 0x5ffeb841 +634, 0xe381e91c +635, 0xb8549a92 +636, 0x3f35cf1 +637, 0xda0f9dcb +638, 0xdd9828a6 +639, 0xe1428f29 +640, 0xf4db80b5 +641, 0xdac30af5 +642, 0x1af1dd17 +643, 0x9a540254 +644, 0xcab68a38 +645, 0x33560361 +646, 0x2fbf3886 +647, 0xbc785923 +648, 0xe081cd10 +649, 0x8e473356 +650, 0xd102c357 +651, 0xeea4fe48 +652, 0x248d3453 +653, 0x1da79ac +654, 0x815a65ff +655, 0x27693e76 +656, 0xb7d5af40 +657, 0x6d245d30 +658, 0x9e06fa8f +659, 0xb0570dcb +660, 0x469f0005 +661, 0x3e0ca132 +662, 0xd89bbf3 +663, 0xd61ccd47 +664, 0x6383878 +665, 0x62b5956 +666, 0x4dc83675 +667, 0x93fd8492 +668, 0x5a0091f5 +669, 0xc9f9bc3 +670, 0xa26e7778 +671, 0xeabf2d01 +672, 0xe612dc06 +673, 0x85d89ff9 +674, 0xd1763179 +675, 0xcb88947b +676, 0x9e8757a5 +677, 0xe100e85c +678, 0x904166eb +679, 0x4996243d +680, 0x4038e1cb +681, 0x2be2c63d +682, 0x77017e81 +683, 0x3b1f556b +684, 0x1c785c77 +685, 0x6869b8bd +686, 0xe1217ed4 +687, 0x4012ab2f +688, 0xc06c0d8e +689, 0x2122eb68 +690, 0xad1783fd +691, 0x5f0c80e3 +692, 0x828f7efa +693, 0x29328399 +694, 0xeadf1087 +695, 0x85dc0037 +696, 0x9691ef26 +697, 0xc0947a53 +698, 0x2a178d2a +699, 0x2a2c7e8f +700, 0x90378380 +701, 0xaad8d326 +702, 0x9cf1c3c8 +703, 0x84eccd44 +704, 0x79e61808 +705, 0x8b3f454e +706, 0x209e6e1 +707, 0x51f88378 +708, 0xc210226f +709, 0xd982adb5 +710, 0x55d44a31 +711, 0x9817d443 +712, 0xa328c626 +713, 0x13455966 +714, 0xb8f681d3 +715, 0x2a3c713b +716, 0xc186959b +717, 0x814a74b0 +718, 0xed7bc90 +719, 0xa88d3d6d +720, 0x88a9f561 +721, 0x73aa1c0a +722, 0xdfeff404 +723, 0xec037e4b +724, 0xa5c209f0 +725, 0xb3a223b4 +726, 0x24ce3709 +727, 0x3184c790 +728, 0xa1398c62 +729, 0x2f92034e +730, 0xbb37a79a +731, 0x605287b4 +732, 0x8faa772c +733, 0x6ce56c1d +734, 0xc035fb4c +735, 0x7cf5b316 +736, 0x6502645 +737, 0xa283d810 +738, 0x778bc2f1 +739, 0xfdf99313 +740, 0x1f513265 +741, 0xbd3837e2 +742, 0x9b84a9a +743, 0x2139ce91 +744, 0x61a8e890 +745, 0xf9ff12db +746, 0xb43d2ea7 +747, 0x88532e61 +748, 0x175a6655 +749, 0x7a6c4f72 +750, 0x6dafc1b7 +751, 0x449b1459 +752, 0x514f654f +753, 0x9a6731e2 +754, 0x8632da43 +755, 0xc81b0422 +756, 0x81fe9005 +757, 0x15b79618 +758, 0xb5fa629f +759, 0x987a474f +760, 0x1c74f54e +761, 0xf9743232 +762, 0xec4b55f +763, 0x87d761e5 +764, 0xd1ad78b7 +765, 0x453d9350 +766, 0xc7a7d85 +767, 0xb2576ff5 +768, 0xcdde49b7 +769, 0x8e1f763e +770, 0x1338583e +771, 0xfd65b9dc +772, 0x4f19c4f4 +773, 0x3a52d73d +774, 0xd3509c4c +775, 0xda24fe31 +776, 0xe2de56ba +777, 0x2db5e540 +778, 0x23172734 +779, 0x4db572f +780, 0xeb941718 +781, 0x84c2649a +782, 0x3b1e5b6a +783, 0x4c9c61b9 +784, 0x3bccd11 +785, 0xb4d7b78e +786, 0x48580ae5 +787, 0xd273ab68 +788, 0x25c11615 +789, 0x470b53f6 +790, 0x329c2068 +791, 0x1693721b +792, 0xf8c9aacf +793, 0x4c3d5693 +794, 0xd778284e +795, 0xae1cb24f +796, 0x3c11d1b3 +797, 0xddd2b0c0 +798, 0x90269fa7 +799, 0x5666e0a2 +800, 0xf9f195a4 +801, 0x61d78eb2 +802, 0xada5a7c0 +803, 0xaa272fbe +804, 0xba3bae2f +805, 0xd0b70fc2 +806, 0x529f32b +807, 0xda7a3e21 +808, 0x9a776a20 +809, 0xb21f9635 +810, 0xb3acc14e +811, 0xac55f56 +812, 0x29dccf41 +813, 0x32dabdb3 +814, 0xaa032f58 +815, 0xfa406af4 +816, 0xce3c415d +817, 0xb44fb4d9 +818, 0x32248d1c +819, 0x680c6440 +820, 0xae2337b +821, 0x294cb597 +822, 0x5bca48fe +823, 0xaef19f40 +824, 0xad60406 +825, 0x4781f090 +826, 0xfd691ffc +827, 0xb6568268 +828, 0xa56c72cb +829, 0xf8a9e0fc +830, 0x9af4fd02 +831, 0x2cd30932 +832, 0x776cefd7 +833, 0xe31f476e +834, 0x6d94a437 +835, 0xb3cab598 +836, 0xf582d13f +837, 0x3bf8759d +838, 0xc3777dc +839, 0x5e425ea8 +840, 0x1c7ff4ed +841, 0x1c2e97d1 +842, 0xc062d2b4 +843, 0x46dc80e0 +844, 0xbcdb47e6 +845, 0x32282fe0 +846, 0xaba89063 +847, 0x5e94e9bb +848, 0x3e667f78 +849, 0xea6eb21a +850, 0xe56e54e8 +851, 0xa0383510 +852, 0x6768fe2b +853, 0xb53ac3e0 +854, 0x779569a0 +855, 0xeca83c6a +856, 0x24db4d2d +857, 0x4585f696 +858, 0xf84748b2 +859, 0xf6a4dd5b +860, 0x31fb524d +861, 0x67ab39fe +862, 0x5882a899 +863, 0x9a05fcf6 +864, 0x712b5674 +865, 0xe8c6958f +866, 0x4b448bb3 +867, 0x530b9abf +868, 0xb491f491 +869, 0x98352c62 +870, 0x2d0a50e3 +871, 0xeb4384da +872, 0x36246f07 +873, 0xcbc5c1a +874, 0xae24031d +875, 0x44d11ed6 +876, 0xf07f1608 +877, 0xf296aadd +878, 0x3bcfe3be +879, 0x8fa1e7df +880, 0xfd317a6e +881, 0xe4975c44 +882, 0x15205892 +883, 0xa762d4df +884, 0xf1167365 +885, 0x6811cc00 +886, 0x8315f23 +887, 0xe045b4b1 +888, 0xa8496414 +889, 0xbed313ae +890, 0xcdae3ddb +891, 0xa9c22c9 +892, 0x275fab1a +893, 0xedd65fa +894, 0x4c188229 +895, 0x63a83e58 +896, 0x18aa9207 +897, 0xa41f2e78 +898, 0xd9f63653 +899, 0xbe2be73b +900, 0xa3364d39 +901, 0x896d5428 +902, 0xc737539e +903, 0x745a78c6 +904, 0xf0b2b042 +905, 0x510773b4 +906, 0x92ad8e37 +907, 0x27f2f8c4 +908, 0x23704cc8 +909, 0x3d95a77f +910, 0xf08587a4 +911, 0xbd696a25 +912, 0x948924f3 +913, 0x8cddb634 +914, 0xcd2a4910 +915, 0x8e0e300e +916, 0x83815a9b +917, 0x67383510 +918, 0x3c18f0d0 +919, 0xc7a7bccc +920, 0x7cc2d3a2 +921, 0x52eb2eeb +922, 0xe4a257e5 +923, 0xec76160e +924, 0x63f9ad68 +925, 0x36d0bbbf +926, 0x957bc4e4 +927, 0xc9ed90ff +928, 0x4cb6059d +929, 0x2f86eca1 +930, 0x3e3665a3 +931, 0x9b7eb6f4 +932, 0x492e7e18 +933, 0xa098aa51 +934, 0x7eb568b2 +935, 0x3fd639ba +936, 0x7bebcf1 +937, 0x99c844ad +938, 0x43cb5ec7 +939, 0x8dfbbef5 +940, 0x5be413ff +941, 0xd93b976d +942, 0xc1c7a86d +943, 0x1f0e93d0 +944, 0x498204a2 +945, 0xe8fe832a +946, 0x2236bd7 +947, 0x89953769 +948, 0x2acc3491 +949, 0x2c4f22c6 +950, 0xd7996277 +951, 0x3bcdc349 +952, 0xfc286630 +953, 0x5f8909fd +954, 0x242677c0 +955, 0x4cb34104 +956, 0xa6ff8100 +957, 0x39ea47ec +958, 0x9bd54140 +959, 0x7502ffe8 +960, 0x7ebef8ae +961, 0x1ed8abe4 +962, 0xfaba8450 +963, 0xc197b65f +964, 0x19431455 +965, 0xe229c176 +966, 0xeb2967da +967, 0xe0c5dc05 +968, 0xa84e3227 +969, 0x10dd9e0f +970, 0xbdb70b02 +971, 0xce24808a +972, 0x423edab8 +973, 0x194caf71 +974, 0x144f150d +975, 0xf811c2d2 +976, 0xc224ee85 +977, 0x2b217a5b +978, 0xf78a5a79 +979, 0x6554a4b1 +980, 0x769582df +981, 0xf4b2cf93 +982, 0x89648483 +983, 0xb3283a3e +984, 0x82b895db +985, 0x79388ef0 +986, 0x54bc42a6 +987, 0xc4dd39d9 +988, 0x45b33b7d +989, 0x8703b2c1 +990, 0x1cc94806 +991, 0xe0f43e49 +992, 0xcaa7b6bc +993, 0x4f88e9af +994, 0x1477cce5 +995, 0x347dd115 +996, 0x36e335fa +997, 0xb93c9a31 +998, 0xaac3a175 +999, 0x68a19647 diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/mt19937-testset-2.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/mt19937-testset-2.csv new file mode 100644 index 0000000000000000000000000000000000000000..453eae146484160d1b342bc0e0dcd6365793cbb3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/mt19937-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x7ab4ea94 +1, 0x9b561119 +2, 0x4957d02e +3, 0x7dd3fdc2 +4, 0x5affe54 +5, 0x5a01741c +6, 0x8b9e8c1f +7, 0xda5bf11a +8, 0x509226 +9, 0x64e2ea17 +10, 0x82c6dab5 +11, 0xe4302515 +12, 0x8198b873 +13, 0xc3ec9a82 +14, 0x829dff28 +15, 0x5278e44f +16, 0x994a7d2c +17, 0xf1c89398 +18, 0xaf2fddec +19, 0x22abc6ee +20, 0x963dbd43 +21, 0xc29edffb +22, 0x41c1ce07 +23, 0x9c90034d +24, 0x1f17a796 +25, 0x3833caa8 +26, 0xb8795528 +27, 0xebc595a2 +28, 0xf8f5b5dd +29, 0xc2881f72 +30, 0x18e5d3f0 +31, 0x9b19ac7a +32, 0xb9992436 +33, 0xc00052b3 +34, 0xb63f4475 +35, 0x962642d9 +36, 0x63506c10 +37, 0x2be6b127 +38, 0x569bdbc6 +39, 0x7f185e01 +40, 0xebb55f53 +41, 0x1c30198c +42, 0x7c8d75c6 +43, 0xd3f2186b +44, 0xaca5b9b1 +45, 0xbc49ff45 +46, 0xc4a802af +47, 0x2cecd86f +48, 0x8e0da529 +49, 0x1f22b00e +50, 0x4559ea80 +51, 0x60f587d8 +52, 0x7c7460e9 +53, 0x67be0a4a +54, 0x987a0183 +55, 0x7bd30f1 +56, 0xab18c4ac +57, 0xffdbfb64 +58, 0x9ea917f9 +59, 0x1239dab7 +60, 0x38efabeb +61, 0x5da91888 +62, 0x8f49ed62 +63, 0x83f60b1e +64, 0x5950a3fc +65, 0xd8911104 +66, 0x19e8859e +67, 0x1a4d89ec +68, 0x968ca180 +69, 0x9e1b6da3 +70, 0x3d99c2c +71, 0x55f76289 +72, 0x8fa28b9e +73, 0x9fe01d33 +74, 0xdade4e38 +75, 0x1ea04290 +76, 0xa7263313 +77, 0xaafc762e +78, 0x460476d6 +79, 0x31226e12 +80, 0x451d3f05 +81, 0xd0d2764b +82, 0xd06e1ab3 +83, 0x1394e3f4 +84, 0x2fc04ea3 +85, 0x5b8401c +86, 0xebd6c929 +87, 0xe881687c +88, 0x94bdd66a +89, 0xabf85983 +90, 0x223ad12d +91, 0x2aaeeaa3 +92, 0x1f704934 +93, 0x2db2efb6 +94, 0xf49b8dfb +95, 0x5bdbbb9d +96, 0xba0cd0db +97, 0x4ec4674e +98, 0xad0129e +99, 0x7a66129b +100, 0x50d12c5e +101, 0x85b1d335 +102, 0x3efda58a +103, 0xecd886fb +104, 0x8ecadd3d +105, 0x60ebac0f +106, 0x5e10fe79 +107, 0xa84f7e5d +108, 0x43931288 +109, 0xfacf448 +110, 0x4ee01997 +111, 0xcdc0a651 +112, 0x33c87037 +113, 0x8b50fc03 +114, 0xf52aad34 +115, 0xda6cd856 +116, 0x7585bea0 +117, 0xe947c762 +118, 0x4ddff5d8 +119, 0xe0e79b3b +120, 0xb804cf09 +121, 0x84765c44 +122, 0x3ff666b4 +123, 0xe31621ad +124, 0x816f2236 +125, 0x228176bc +126, 0xfdc14904 +127, 0x635f5077 +128, 0x6981a817 +129, 0xfd9a0300 +130, 0xd3fa8a24 +131, 0xd67c1a77 +132, 0x903fe97a +133, 0xf7c4a4d5 +134, 0x109f2058 +135, 0x48ab87fe +136, 0xfd6f1928 +137, 0x707e9452 +138, 0xf327db9e +139, 0x7b80d76d +140, 0xfb6ba193 +141, 0x454a1ad0 +142, 0xe20b51e +143, 0xb774d085 +144, 0x6b1ed574 +145, 0xb1e77de4 +146, 0xe2a83b37 +147, 0x33d3176f +148, 0x2f0ca0fc +149, 0x17f51e2 +150, 0x7c1fbf55 +151, 0xf09e9cd0 +152, 0xe3d9bacd +153, 0x4244db0a +154, 0x876c09fc +155, 0x9db4fc2f +156, 0xd3771d60 +157, 0x25fc6a75 +158, 0xb309915c +159, 0xc50ee027 +160, 0xaa5b7b38 +161, 0x4c650ded +162, 0x1acb2879 +163, 0x50db5887 +164, 0x90054847 +165, 0xfef23e5b +166, 0x2dd7b7d5 +167, 0x990b8c2e +168, 0x6001a601 +169, 0xb5d314c4 +170, 0xfbfb7bf9 +171, 0x1aba997d +172, 0x814e7304 +173, 0x989d956a +174, 0x86d5a29c +175, 0x70a9fa08 +176, 0xc4ccba87 +177, 0x7e9cb366 +178, 0xee18eb0a +179, 0x44f5be58 +180, 0x91d4af2d +181, 0x5ab6e593 +182, 0x9fd6bb4d +183, 0x85894ce +184, 0x728a2401 +185, 0xf006f6d4 +186, 0xd782741e +187, 0x842cd5bd +188, 0xfb5883aa +189, 0x7e5a471 +190, 0x83ff6965 +191, 0xc9675c6b +192, 0xb6ced3c7 +193, 0x3de6425b +194, 0x25e14db4 +195, 0x69ca3dec +196, 0x81342d13 +197, 0xd7cd8417 +198, 0x88d15e69 +199, 0xefba17c9 +200, 0x43d595e6 +201, 0x89d4cf25 +202, 0x7cae9b9b +203, 0x2242c621 +204, 0x27fc3598 +205, 0x467b1d84 +206, 0xe84d4622 +207, 0xa26bf980 +208, 0x80411010 +209, 0xe2c2bfea +210, 0xbc6ca25a +211, 0x3ddb592a +212, 0xdd46eb9e +213, 0xdfe8f657 +214, 0x2cedc974 +215, 0xf0dc546b +216, 0xd46be68f +217, 0x26d8a5aa +218, 0x76e96ba3 +219, 0x7d5b5353 +220, 0xf532237c +221, 0x6478b79 +222, 0x9b81a5e5 +223, 0x5fc68e5c +224, 0x68436e70 +225, 0x2a0043f9 +226, 0x108d523c +227, 0x7a4c32a3 +228, 0x9c84c742 +229, 0x6f813dae +230, 0xfcc5bbcc +231, 0x215b6f3a +232, 0x84cb321d +233, 0x7913a248 +234, 0xb1e6b585 +235, 0x49376b31 +236, 0x1dc896b0 +237, 0x347051ad +238, 0x5524c042 +239, 0xda0eef9d +240, 0xf2e73342 +241, 0xbeee2f9d +242, 0x7c702874 +243, 0x9eb3bd34 +244, 0x97b09700 +245, 0xcdbab1d4 +246, 0x4a2f6ed1 +247, 0x2047bda5 +248, 0x3ecc7005 +249, 0x8d0d5e67 +250, 0x40876fb5 +251, 0xb5fd2187 +252, 0xe915d8af +253, 0x9a2351c7 +254, 0xccc658ae +255, 0xebb1eddc +256, 0xc4a83671 +257, 0xffb2548f +258, 0xe4fe387a +259, 0x477aaab4 +260, 0x8475a4e4 +261, 0xf8823e46 +262, 0xe4130f71 +263, 0xbdb54482 +264, 0x98fe0462 +265, 0xf36b27b8 +266, 0xed7733da +267, 0x5f428afc +268, 0x43a3a21a +269, 0xf8370b55 +270, 0xfade1de1 +271, 0xd9a038ea +272, 0x3c69af23 +273, 0x24df7dd0 +274, 0xf66d9353 +275, 0x71d811be +276, 0xcc4d024b +277, 0xb8c30bf0 +278, 0x4198509d +279, 0x8b37ba36 +280, 0xa41ae29a +281, 0x8cf7799e +282, 0x5cd0136a +283, 0xa11324ef +284, 0x2f8b6d4b +285, 0x3657cf17 +286, 0x35b6873f +287, 0xee6e5bd7 +288, 0xbeeaa98 +289, 0x9ad3c581 +290, 0xe2376c3f +291, 0x738027cc +292, 0x536ac839 +293, 0xf066227 +294, 0x6c9cb0f9 +295, 0x84082ae6 +296, 0xab38ae9d +297, 0x493eade9 +298, 0xcb630b3a +299, 0x64d44250 +300, 0xe5efb557 +301, 0xea2424d9 +302, 0x11a690ba +303, 0x30a48ae4 +304, 0x58987e53 +305, 0x94ec6076 +306, 0x5d3308fa +307, 0xf1635ebb +308, 0x56a5ab90 +309, 0x2b2f2ee4 +310, 0x6f9e6483 +311, 0x8b93e327 +312, 0xa7ce140b +313, 0x4c8aa42 +314, 0x7657bb3f +315, 0xf250fd75 +316, 0x1edfcb0f +317, 0xdb42ace3 +318, 0xf8147e16 +319, 0xd1992bd +320, 0x64bb14d1 +321, 0x423e724d +322, 0x7b172f7c +323, 0x17171696 +324, 0x4acaf83b +325, 0x7a83527e +326, 0xfc980c60 +327, 0xc8b56bb +328, 0x2453f77f +329, 0x85ad1bf9 +330, 0x62a85dfe +331, 0x48238c4d +332, 0xbb3ec1eb +333, 0x4c1c039c +334, 0x1f37f571 +335, 0x98aecb63 +336, 0xc3b3ddd6 +337, 0xd22dad4 +338, 0xe49671a3 +339, 0xe3baf945 +340, 0xb9e21680 +341, 0xda562856 +342, 0xe8b88ce4 +343, 0x86f88de2 +344, 0x986faf76 +345, 0x6f0025c3 +346, 0x3fe21234 +347, 0xd8d3f729 +348, 0xc2d11c6f +349, 0xd4f9e8f +350, 0xf61a0aa +351, 0xc48bb313 +352, 0xe944e940 +353, 0xf1801b2e +354, 0x253590be +355, 0x981f069d +356, 0x891454d8 +357, 0xa4f824ad +358, 0x6dd2cc48 +359, 0x3018827e +360, 0x3fb329e6 +361, 0x65276517 +362, 0x8d2c0dd2 +363, 0xc965b48e +364, 0x85d14d90 +365, 0x5a51623c +366, 0xa9573d6a +367, 0x82d00edf +368, 0x5ed7ce07 +369, 0x1d946abc +370, 0x24fa567b +371, 0x83ef5ecc +372, 0x9001724a +373, 0xc4fe48f3 +374, 0x1e07c25c +375, 0xf4d5e65e +376, 0xb734f6e9 +377, 0x327a2df8 +378, 0x766d59b7 +379, 0x625e6b61 +380, 0xe82f32d7 +381, 0x1566c638 +382, 0x2e815871 +383, 0x606514aa +384, 0x36b7386e +385, 0xcaa8ce08 +386, 0xb453fe9c +387, 0x48574e23 +388, 0x71f0da06 +389, 0xa8a79463 +390, 0x6b590210 +391, 0x86e989db +392, 0x42899f4f +393, 0x7a654ef9 +394, 0x4c4fe932 +395, 0x77b2fd10 +396, 0xb6b4565c +397, 0xa2e537a3 +398, 0xef5a3dca +399, 0x41235ea8 +400, 0x95c90541 +401, 0x50ad32c4 +402, 0xc1b8e0a4 +403, 0x498e9aab +404, 0xffc965f1 +405, 0x72633485 +406, 0x3a731aef +407, 0x7cfddd0b +408, 0xb04d4129 +409, 0x184fc28e +410, 0x424369b0 +411, 0xf9ae13a1 +412, 0xaf357c8d +413, 0x7a19228e +414, 0xb46de2a8 +415, 0xeff2ac76 +416, 0xa6c9357b +417, 0x614f19c1 +418, 0x8ee1a53f +419, 0xbe1257b1 +420, 0xf72651fe +421, 0xd347c298 +422, 0x96dd2f23 +423, 0x5bb1d63e +424, 0x32e10887 +425, 0x36a144da +426, 0x9d70e791 +427, 0x5e535a25 +428, 0x214253da +429, 0x2e43dd40 +430, 0xfc0413f4 +431, 0x1f5ea409 +432, 0x1754c126 +433, 0xcdbeebbe +434, 0x1fb44a14 +435, 0xaec7926 +436, 0xb9d9a1e +437, 0x9e4a6577 +438, 0x8b1f04c5 +439, 0x19854e8a +440, 0x531080cd +441, 0xc0cbd73 +442, 0x20399d77 +443, 0x7d8e9ed5 +444, 0x66177598 +445, 0x4d18a5c2 +446, 0xe08ebf58 +447, 0xb1f9c87b +448, 0x66bedb10 +449, 0x26670d21 +450, 0x7a7892da +451, 0x69b69d86 +452, 0xd04f1d1c +453, 0xaf469625 +454, 0x7946b813 +455, 0x1ee596bd +456, 0x7f365d85 +457, 0x795b662b +458, 0x194ad02d +459, 0x5a9649b5 +460, 0x6085e278 +461, 0x2cf54550 +462, 0x9c77ea0b +463, 0x3c6ff8b +464, 0x2141cd34 +465, 0xb90bc671 +466, 0x35037c4b +467, 0xd04c0d76 +468, 0xc75bff8 +469, 0x8f52003b +470, 0xfad3d031 +471, 0x667024bc +472, 0xcb04ea36 +473, 0x3e03d587 +474, 0x2644d3a0 +475, 0xa8fe99ba +476, 0x2b9a55fc +477, 0x45c4d44a +478, 0xd059881 +479, 0xe07fcd20 +480, 0x4e22046c +481, 0x7c2cbf81 +482, 0xbf7f23de +483, 0x69d924c3 +484, 0xe53cd01 +485, 0x3879017c +486, 0xa590e558 +487, 0x263bc076 +488, 0x245465b1 +489, 0x449212c6 +490, 0x249dcb29 +491, 0x703d42d7 +492, 0x140eb9ec +493, 0xc86c5741 +494, 0x7992aa5b +495, 0xb8b76a91 +496, 0x771dac3d +497, 0x4ecd81e3 +498, 0xe5ac30b3 +499, 0xf4d7a5a6 +500, 0xac24b97 +501, 0x63494d78 +502, 0x627ffa89 +503, 0xfa4f330 +504, 0x8098a1aa +505, 0xcc0c61dc +506, 0x34749fa0 +507, 0x7f217822 +508, 0x418d6f15 +509, 0xa4b6e51e +510, 0x1036de68 +511, 0x1436986e +512, 0x44df961d +513, 0x368e4651 +514, 0x6a9e5d8c +515, 0x27d1597e +516, 0xa1926c62 +517, 0x8d1f2b55 +518, 0x5797eb42 +519, 0xa90f9e81 +520, 0x57547b10 +521, 0xdbbcca8e +522, 0x9edd2d86 +523, 0xbb0a7527 +524, 0x7662380c +525, 0xe7c98590 +526, 0x950fbf3f +527, 0xdc2b76b3 +528, 0x8a945102 +529, 0x3f0a1a85 +530, 0xeb215834 +531, 0xc59f2802 +532, 0xe2a4610 +533, 0x8b5a8665 +534, 0x8b2d9933 +535, 0x40a4f0bc +536, 0xaab5bc67 +537, 0x1442a69e +538, 0xdf531193 +539, 0x698d3db4 +540, 0x2d40324e +541, 0x1a25feb2 +542, 0xe8cc898f +543, 0xf12e98f5 +544, 0xc03ad34c +545, 0xf62fceff +546, 0xdd827e1e +547, 0x7d8ccb3b +548, 0xab2d6bc1 +549, 0xc323a124 +550, 0x8184a19a +551, 0xc3c4e934 +552, 0x5487424d +553, 0xd6a81a44 +554, 0x90a8689d +555, 0xe69c4c67 +556, 0xbdae02dd +557, 0x72a18a79 +558, 0x2a88e907 +559, 0x31cf4b5d +560, 0xb157772f +561, 0x206ba601 +562, 0x18529232 +563, 0x7dac90d8 +564, 0x3a5f8a09 +565, 0x9f4b64a3 +566, 0xae373af9 +567, 0x1d79447c +568, 0x2a23684b +569, 0x41fb7ba4 +570, 0x55e4bb9e +571, 0xd7619d3e +572, 0xc04e4dd8 +573, 0x8418d516 +574, 0x2b2ca585 +575, 0xfa8eedf +576, 0x5bafd977 +577, 0x31974fb0 +578, 0x9eb6697b +579, 0xc8be22f5 +580, 0x173b126a +581, 0x8809becf +582, 0x3e41efe1 +583, 0x3d6cbbb8 +584, 0x278c81d8 +585, 0xa6f08434 +586, 0xa0e6601d +587, 0x2fccd88d +588, 0x3cbc8beb +589, 0x5f65d864 +590, 0xa1ff8ddf +591, 0x609dcb7c +592, 0x4a4e1663 +593, 0xeae5531 +594, 0x962a7c85 +595, 0x1e110607 +596, 0x8c5db5d0 +597, 0xc7f2337e +598, 0xc94fcc9c +599, 0xe7f62629 +600, 0x6c9aa9f8 +601, 0x2e27fe0e +602, 0x4d0dae12 +603, 0x9eecf588 +604, 0x977ba3f2 +605, 0xed0a51af +606, 0x3f3ec633 +607, 0xc174b2ec +608, 0x590be8a9 +609, 0x4f630d18 +610, 0xf579e989 +611, 0xe2a55584 +612, 0xee11edcd +613, 0x150a4833 +614, 0xc0a0535c +615, 0xb5e00993 +616, 0xb6435700 +617, 0xa98dbff +618, 0x315716af +619, 0x94395776 +620, 0x6cbd48d9 +621, 0xab17f8fc +622, 0xa794ffb7 +623, 0x6b55e231 +624, 0x89ff5783 +625, 0x431dcb26 +626, 0x270f9bf8 +627, 0x2af1b8d0 +628, 0x881745ed +629, 0x17e1be4e +630, 0x132a0ec4 +631, 0x5712df17 +632, 0x2dfb3334 +633, 0xf5a35519 +634, 0xcafbdac6 +635, 0x73b6189d +636, 0x10107cac +637, 0x18c1045e +638, 0xbc19bbad +639, 0x8b4f05ac +640, 0x5830d038 +641, 0x468cd98a +642, 0x5b83a201 +643, 0xf0ccdd9c +644, 0xcb20c4bd +645, 0x1ff186c9 +646, 0xcdddb47f +647, 0x5c65ce6 +648, 0xb748c580 +649, 0x23b6f262 +650, 0xe2ba8e5c +651, 0x9a164a03 +652, 0x62d3322e +653, 0x918d8b43 +654, 0x45c8b49d +655, 0xce172c6e +656, 0x23febc6 +657, 0x84fdc5b7 +658, 0xe7d1fd82 +659, 0xf0ddf3a6 +660, 0x87050436 +661, 0x13d46375 +662, 0x5b191c78 +663, 0x2cbd99c0 +664, 0x7686c7f +665, 0xcff56c84 +666, 0x7f9b4486 +667, 0xefc997fe +668, 0x984d4588 +669, 0xfa44f36a +670, 0x7a5276c1 +671, 0xcfde6176 +672, 0xcacf7b1d +673, 0xcffae9a7 +674, 0xe98848d5 +675, 0xd4346001 +676, 0xa2196cac +677, 0x217f07dc +678, 0x42d5bef +679, 0x6f2e8838 +680, 0x4677a24 +681, 0x4ad9cd54 +682, 0x43df42af +683, 0x2dde417 +684, 0xaef5acb1 +685, 0xf377f4b3 +686, 0x7d870d40 +687, 0xe53df1c2 +688, 0xaeb5be50 +689, 0x7c92eac0 +690, 0x4f00838c +691, 0x91e05e84 +692, 0x23856c80 +693, 0xc4266fa6 +694, 0x912fddb +695, 0x34d42d22 +696, 0x6c02ffa +697, 0xe47d093 +698, 0x183c55b3 +699, 0xc161d142 +700, 0x3d43ff5f +701, 0xc944a36 +702, 0x27bb9fc6 +703, 0x75c91080 +704, 0x2460d0dc +705, 0xd2174558 +706, 0x68062dbf +707, 0x778e5c6e +708, 0xa4dc9a +709, 0x7a191e69 +710, 0xc084b2ba +711, 0xbb391d2 +712, 0x88849be +713, 0x69c02714 +714, 0x69d4a389 +715, 0x8f51854d +716, 0xaf10bb82 +717, 0x4d5d1c77 +718, 0x53b53109 +719, 0xa0a92aa0 +720, 0x83ecb757 +721, 0x5325752a +722, 0x114e466e +723, 0x4b3f2780 +724, 0xa7a6a39c +725, 0x5e723357 +726, 0xa6b8be9b +727, 0x157c32ff +728, 0x8b898012 +729, 0xd7ff2b1e +730, 0x69cd8444 +731, 0x6ad8030c +732, 0xa08a49ec +733, 0xfbc055d3 +734, 0xedf17e46 +735, 0xc9526200 +736, 0x3849b88a +737, 0x2746860b +738, 0xae13d0c1 +739, 0x4f15154f +740, 0xd65c3975 +741, 0x6a377278 +742, 0x54d501f7 +743, 0x81a054ea +744, 0x143592ba +745, 0x97714ad6 +746, 0x4f9926d9 +747, 0x4f7ac56d +748, 0xe87ca939 +749, 0x58b76f6f +750, 0x60901ad8 +751, 0x3e401bb6 +752, 0xa058468e +753, 0xc0bb14f6 +754, 0x2cb8f02a +755, 0x7c2cf756 +756, 0x34c31de5 +757, 0x9b243e83 +758, 0xa5c85ab4 +759, 0x2741e3b3 +760, 0x1249000e +761, 0x3fc4e72b +762, 0xa3e038a2 +763, 0x952dd92c +764, 0x2b821966 +765, 0xfa81b365 +766, 0x530919b9 +767, 0x4486d66f +768, 0xccf4f3c1 +769, 0xa8bddd1d +770, 0xcc295eb9 +771, 0xfccbe42f +772, 0x38bacd8d +773, 0x2261854f +774, 0x56068c62 +775, 0x9bdaeb8 +776, 0x555fa5b6 +777, 0x20fe615e +778, 0x49fb23d3 +779, 0xd093bad6 +780, 0x54919e86 +781, 0x7373eb24 +782, 0xfbaa7a98 +783, 0x5f62fb39 +784, 0xe03bc9ec +785, 0xa5074d41 +786, 0xa1cefb1 +787, 0x13912d74 +788, 0xf6421b8 +789, 0xfcb48812 +790, 0x8f1db50b +791, 0xc1654b87 +792, 0x948b43c2 +793, 0xf503ef77 +794, 0x117d891d +795, 0x5493ffa +796, 0x171313b1 +797, 0xa4b62e1e +798, 0x77454ea6 +799, 0xbea0aff0 +800, 0x13c36389 +801, 0xe3b60bac +802, 0xa176bed3 +803, 0x2863d428 +804, 0xe2314f46 +805, 0xa85cd3d4 +806, 0x7866e57 +807, 0x8f03f5bc +808, 0x239ae +809, 0x46f279fb +810, 0xcca00559 +811, 0xaa07a104 +812, 0x89123d08 +813, 0x2e6856ba +814, 0x43a9780d +815, 0x676cff25 +816, 0x6744b87d +817, 0xee260d4f +818, 0xb98d8b77 +819, 0x9b0ca455 +820, 0x659f6fe +821, 0x28d20d1c +822, 0x601f2657 +823, 0xdec3073e +824, 0x61263863 +825, 0x1a13435a +826, 0x27497d1e +827, 0x17a8458e +828, 0xdddc407d +829, 0x4bb2e8ac +830, 0x16b2aedb +831, 0x77ccd696 +832, 0x9d108fcd +833, 0x25ad233e +834, 0xaa9bc370 +835, 0xa873ab50 +836, 0xaf19c9d9 +837, 0x696e1e6b +838, 0x1fdc4bf4 +839, 0x4c2ebc81 +840, 0xde4929ed +841, 0xf4d0c10c +842, 0xb6595b76 +843, 0x75cbb1b3 +844, 0xbcb6de49 +845, 0xe23157fd +846, 0x5e596078 +847, 0xa69b0d29 +848, 0x2118a41 +849, 0x7088c16 +850, 0xc75e1e1 +851, 0x6a4af2d6 +852, 0xf19c6521 +853, 0xaff7b3b1 +854, 0x615295c7 +855, 0xbda3a8d7 +856, 0x5b5ca72e +857, 0xdad9d80f +858, 0xfa81c084 +859, 0xf4703fa +860, 0x3ca54540 +861, 0xa8961d51 +862, 0x53d1ecc2 +863, 0x808d83b6 +864, 0x68e8c48e +865, 0x89be2039 +866, 0x9088ea11 +867, 0xb8665d12 +868, 0x91272f9 +869, 0x53dddff2 +870, 0xb7a54ab +871, 0xd2b645ca +872, 0x99fb8590 +873, 0x5315c8e +874, 0x2a913806 +875, 0x7f15eb2b +876, 0xa7f1cc5d +877, 0xbb2ee836 +878, 0xd9fafd60 +879, 0x17448d6f +880, 0x999ec436 +881, 0x482ec606 +882, 0x9b403c0e +883, 0x569eb51b +884, 0xb275d1a6 +885, 0xadd29c31 +886, 0xb7ebdb15 +887, 0xdfef3662 +888, 0x51aba6db +889, 0x6d41946d +890, 0x77bf8896 +891, 0xcafa6fab +892, 0x976ab40f +893, 0x49a6d86b +894, 0x56639e55 +895, 0x9945b996 +896, 0x81459b50 +897, 0xbce97542 +898, 0xe397c9c9 +899, 0x247a5955 +900, 0xb72b1573 +901, 0x86306f86 +902, 0x34f65dc5 +903, 0x909360c0 +904, 0xf3f696ef +905, 0xcb9faae5 +906, 0x93daecd9 +907, 0xde1af7af +908, 0x43a1f2d +909, 0x6d75cde5 +910, 0x9e412b6 +911, 0x5673fed +912, 0x16bb511a +913, 0x35ef4cca +914, 0x4e615aca +915, 0x5cdaf47a +916, 0x26676047 +917, 0x8c199325 +918, 0x2adf0cb9 +919, 0x84f2e6fd +920, 0x5e627f64 +921, 0xb7cee354 +922, 0x542ab4a6 +923, 0xe59cd83b +924, 0x89cc3f10 +925, 0x92b0f5f +926, 0xc1328370 +927, 0x8208d9f7 +928, 0x68eb00cf +929, 0xfadd4ac4 +930, 0x2517784f +931, 0x4042b99 +932, 0x75ce0230 +933, 0x97c5a1b4 +934, 0x1a97f709 +935, 0x4c62781e +936, 0xf530a83 +937, 0x75776413 +938, 0x321c7240 +939, 0x6afe4e36 +940, 0xad00a2b4 +941, 0xbc05477d +942, 0xb0911e80 +943, 0x9935b87d +944, 0xd535eec5 +945, 0x149af45e +946, 0x786934b0 +947, 0xbc13cdac +948, 0x208bfa2e +949, 0xcf4b39cc +950, 0x6ac6c172 +951, 0xbfa9a37 +952, 0x42d28db6 +953, 0x2bf1ea63 +954, 0xbed6e677 +955, 0x50325d27 +956, 0xa79d3b8b +957, 0x52448bb1 +958, 0xefaad1bd +959, 0x833a2e54 +960, 0xd9de549a +961, 0x9f59672f +962, 0x9d5f5f16 +963, 0x1c914489 +964, 0xc08fa058 +965, 0xb188698b +966, 0xdc4672b5 +967, 0x594f720e +968, 0x56ed428f +969, 0x9b0898af +970, 0x8a64d3d5 +971, 0x773308d6 +972, 0x84d62098 +973, 0x46da7cf9 +974, 0x1114eae7 +975, 0xf9f2a092 +976, 0x5363a28 +977, 0xf2db7b3a +978, 0x102c71a9 +979, 0xe8e76aaf +980, 0x77a97b3b +981, 0x77b090d +982, 0x1099620e +983, 0xa6daaae6 +984, 0x86ff4713 +985, 0xc0ef85b8 +986, 0xf621d409 +987, 0xfd1561e2 +988, 0x4bcc687d +989, 0x596f760 +990, 0x7c8819f9 +991, 0x8cb865b8 +992, 0xadea115a +993, 0x56609348 +994, 0xb321ac14 +995, 0x1bac7db2 +996, 0x5fe6ee2 +997, 0xe9bfe072 +998, 0x15549e74 +999, 0xad8c191b diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64-testset-1.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64-testset-1.csv new file mode 100644 index 0000000000000000000000000000000000000000..00532a32e1c7e6c3ef80358ff9d90632d882be15 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0x60d24054e17a0698 +1, 0xd5e79d89856e4f12 +2, 0xd254972fe64bd782 +3, 0xf1e3072a53c72571 +4, 0xd7c1d7393d4115c9 +5, 0x77b75928b763e1e2 +6, 0xee6dee05190f7909 +7, 0x15f7b1c51d7fa319 +8, 0x27e44105f26ac2d7 +9, 0xcc0d88b29e5b415 +10, 0xe07b1a90c685e361 +11, 0xd2e430240de95e38 +12, 0x3260bca9a24ca9da +13, 0x9b3cf2e92385adb7 +14, 0x30b5514548271976 +15, 0xa3a1fa16c124faf9 +16, 0xf53e17e918e45bb6 +17, 0x26f19faaeb833bfc +18, 0x95e1d605730cce1b +19, 0xa7b520c5c093c1aa +20, 0x4b68c010c9b106a3 +21, 0x25e19fe91df703f0 +22, 0x898364bb0bf593cb +23, 0x5bd6ab7dbaa125db +24, 0xd1fe47f25152045c +25, 0x3bb11919addf2409 +26, 0x26a8cb7b3f54af8 +27, 0xe6a27ee11200aa24 +28, 0x7cb585ab01e22000 +29, 0x78e60028676d2ef3 +30, 0x5c32535e5a899528 +31, 0x83e8b6f8c4a46fb3 +32, 0xe56ef7668a161246 +33, 0x36dcbc15aeb73055 +34, 0x5ea247f0bd188acb +35, 0x438b547b84601a80 +36, 0x8acda2a1273e9e3d +37, 0x2b05e30a4b40c24c +38, 0xfd87236bd13af032 +39, 0x471df211d8d985ef +40, 0x18e8a5609a793292 +41, 0x46f0951fab6dc4e3 +42, 0x6c199c4e700f6795 +43, 0xf04aa16bfb7d22cb +44, 0xd763d269fbaffc89 +45, 0x9991930cefbe5c2b +46, 0xb2a11b953f824c96 +47, 0x63fd9f52172c44b0 +48, 0x183bdad907b1d848 +49, 0xe17953cddb931c52 +50, 0x515cf16726ec205a +51, 0x88c327605150711a +52, 0xc7090dd79cbc8dc3 +53, 0xcb487cedeb00a350 +54, 0xc8abf254d87b657 +55, 0xd43cc4cbfb493d1a +56, 0x8705452e5d9ed1e +57, 0xcecd11446769cf43 +58, 0xde72156c8d65bc69 +59, 0x796a8f0f47d52ee8 +60, 0xb4c0da443917d6c3 +61, 0xe07ad7568a8e3dc3 +62, 0xc24a8da39ce6dc21 +63, 0x92b21ea80a8556eb +64, 0x572f21e531edf3af +65, 0x9b917ed56bbed198 +66, 0xe65fd8ddc5ab3d7d +67, 0xf55a80a8ec84fa18 +68, 0x18fc22e1a5227b61 +69, 0x72305dc7eeaa79d3 +70, 0x47ce58a36e7592cf +71, 0x14c6374340c0f7cc +72, 0x6f98273d4eb5a2c +73, 0x59a8702c46fe8f8a +74, 0xb67cbd8113cfe57f +75, 0xaa03c5db5f5b7690 +76, 0x3fb0f77ea4568013 +77, 0x756530990398b26e +78, 0x4c1952b2a3a6a343 +79, 0x1da15c5383074582 +80, 0xb405b21c81c274f7 +81, 0xbe664677a16788b +82, 0x9d2e37550bcee656 +83, 0x8b4589f0d9defe02 +84, 0x2935f018ee06a59 +85, 0x3834bf88be97ed11 +86, 0xa610d049cea79b6d +87, 0xd49ffc0d09a59ea9 +88, 0x4073365b76567adf +89, 0x499eefb9bb7513e2 +90, 0x74a743ee6b0138a9 +91, 0x3bf0880f2d947594 +92, 0x555d1c0498600a99 +93, 0x923b32a88ef2ffa4 +94, 0x7325411065fbedea +95, 0x9f4129ff8b79d300 +96, 0xab2b0a9b8a3785dc +97, 0x11734bdfba3a1713 +98, 0xc8333398841ba585 +99, 0xee2409cc234e6742 +100, 0xf6638e700872ecd2 +101, 0x10875300c13cd284 +102, 0x27a9bbed7c15b2d3 +103, 0x3c87f8fef31ce9bd +104, 0x92be263cd0914a95 +105, 0xa7b0f11bc742307e +106, 0x4a56f788cc1c1a3c +107, 0x4a130fa32257a48b +108, 0x5d4d9eda16e90286 +109, 0x7cc2af564844bedc +110, 0x2532867bfe7cda1a +111, 0xb1c504676611fd17 +112, 0xce8e86cfb4189aee +113, 0x99685898980d1970 +114, 0x8c3b67db23bcf1e +115, 0x73e14c93905b135f +116, 0xf0271b64ac2bd4d3 +117, 0xf4beba82f3ec1b2d +118, 0x1cdbf3ee9f210af +119, 0x2e938557c09c3ea6 +120, 0x2d314ccfa6ffd81d +121, 0x31ad47079950ade4 +122, 0x342b27547b900872 +123, 0x171b0e20b9ef1a76 +124, 0xdf10ce6318b03654 +125, 0x1d625df4aa718897 +126, 0x8712715a9f6e02ec +127, 0xb4a072da725bca3b +128, 0x19d346cb7734bd42 +129, 0xfd4281d311cb2958 +130, 0x58274c9519fc8789 +131, 0x4cacf29d885fd544 +132, 0x784b14d1c2523b80 +133, 0x2d25242131bb2373 +134, 0xcd2a5e43a7d9abf9 +135, 0x15eda3806e650ecb +136, 0xdaac5e277d764d96 +137, 0xdc5a5dd59aaa94e0 +138, 0x40d00237a46d5999 +139, 0x6205dd35a692743f +140, 0xbbd8236740361f09 +141, 0x1625c9f4e7288bf9 +142, 0xb74f12df1479e3ce +143, 0xb2d72a51b43d7131 +144, 0xf006a324b3707c83 +145, 0x28e8ab4abe7655b8 +146, 0xfb480093ad7ab55 +147, 0x3f8abd0d6ff8d272 +148, 0xc81a94177ac26bb7 +149, 0x3cdc178307751b14 +150, 0x9de84cc2b10ba025 +151, 0x3f8ab5aefcd046e2 +152, 0x43bdb894e1ee83b2 +153, 0xe288a40f3f06ac9d +154, 0xdab62a7d04b4f30f +155, 0x49f4e20295e1a805 +156, 0x3643764805e0edef +157, 0x9449954618b6b +158, 0x6c87e0d4508e0ce0 +159, 0x3a334be688a9dd7b +160, 0xb35c39228776e499 +161, 0xc4118bfff938490e +162, 0x88cbde3dcbb034b2 +163, 0xf91b287793c417c3 +164, 0x42b15f731a59f5b3 +165, 0xffa27104bbe4814d +166, 0x1b6789d138beccde +167, 0x542c2c1440d0ceb9 +168, 0x367294504d18fa0d +169, 0xf918b60e804a1b58 +170, 0xd390964e33a9d0e3 +171, 0x23bb1be7c4030fe8 +172, 0x9731054d039a8afb +173, 0x1a6205026b9d139b +174, 0x2fa13b318254a07e +175, 0x69571de7d8520626 +176, 0x641a13d7c03332b7 +177, 0x76a6237818f7a441 +178, 0x4e77860d0c660d81 +179, 0x4441448a1c1cbdb2 +180, 0xccd7783a042046e5 +181, 0xf620d8e0805e3200 +182, 0x7de02971367fdd0c +183, 0x539c263c5914cab1 +184, 0x9c3b9ba1a87bbf08 +185, 0x6d95baa34cda215f +186, 0x2db3f83ace0bac5f +187, 0x7f5af1da2dc670a4 +188, 0xfcc098d16c891bfb +189, 0x81a33df1d7a5ab12 +190, 0x767b0f863c8e9882 +191, 0x7a92983830de483d +192, 0xfa7598c37a79ac25 +193, 0xb89b3ca42ce03053 +194, 0x457a542b8efed4f7 +195, 0x571b7737fd0eeda7 +196, 0xa0f59e524485c0a +197, 0x82dca766b7901efd +198, 0xa68243caf6a3bd5d +199, 0x1bac981c6c740e5e +200, 0xbcd51bedf9103e44 +201, 0x4e197efd3ae5a7bf +202, 0x523568efd782268b +203, 0x5ec4ef1191fef09 +204, 0xed751ed5e31c9ab +205, 0x44eac24de03e1b29 +206, 0x9237d57c011d3fb3 +207, 0xa8c6da0f7692f235 +208, 0x9f9eb6bc15d6cac7 +209, 0x34bb8e0c93427aad +210, 0x115febd738eaac4a +211, 0xa439991ed139d27a +212, 0x45c7c2633d8710a2 +213, 0x48b7475f3405a3ce +214, 0x80158497c77bd00b +215, 0x935c316a5b1657cb +216, 0x59c5d54440e9695e +217, 0x337c78c5b3d0ede2 +218, 0x8c46bb956b93790d +219, 0xbf1dd03e471d71c5 +220, 0x2d375e90a4bef583 +221, 0xd0365428331b3790 +222, 0xfcd3969ac827ecd4 +223, 0x392fb6c580498410 +224, 0x6d6db4ceab5ea6c0 +225, 0x9bf84f1972e24786 +226, 0x798dfd820959dcc5 +227, 0x2e425095e65e8bfb +228, 0x8c1aa11536b1c9c3 +229, 0xd28e2ef9b12f6f74 +230, 0x86583bc98c8f78d2 +231, 0x489877530e3f93e7 +232, 0xb1d9430631104a15 +233, 0x1814f6098e6263bd +234, 0x8e2658a4e0d4cd53 +235, 0x5afe20e2531cdb2a +236, 0x30d02f7c4755c9bf +237, 0xe1e217cda16ed2d2 +238, 0xccb4913a42e3b791 +239, 0xfff21363ac183226 +240, 0xe788690bbda147a7 +241, 0x76905cf5917bfc6a +242, 0x2a8fa58f7916f52c +243, 0xf903c0cc0357815a +244, 0x15d20f243a4998d2 +245, 0x5b7decee5a86ea44 +246, 0x114f7fc421211185 +247, 0x328eb21715764c50 +248, 0xaffaa3f45c0678fd +249, 0x2579e6ef50378393 +250, 0x7610ab7743c19795 +251, 0xf9923d2bd101b197 +252, 0x57e42e7a62ba7e53 +253, 0x9f1dc217b4f02901 +254, 0x88a9ebd86509b234 +255, 0x867fc926aecc8591 +256, 0xaf22c1bfef04c718 +257, 0x39f701f0313f4288 +258, 0x6171ad397e6faab2 +259, 0x239bb5b9abdec4fc +260, 0xd9a591e25dd01c6e +261, 0x826dc4a75b628e49 +262, 0xf112b152c408f47 +263, 0x6843a06110f86c0 +264, 0x965e56a7185c1332 +265, 0x8d84492edbc71710 +266, 0xeee8ec111cfd1319 +267, 0xf2858e94ad98e458 +268, 0xbc9589fdf5f3a97e +269, 0xaf0ceef3bc375130 +270, 0x48f4aaf13fa75c1e +271, 0x111e9db47bee758f +272, 0xea3171df130164ba +273, 0x2a7bbe30bf827ab6 +274, 0xc516c3fdbf758c35 +275, 0xec55097754b04be5 +276, 0x374a997d52b6d3e6 +277, 0x487df5456085ffbc +278, 0x528883b84df8eafe +279, 0x805f77ab5ba26f86 +280, 0x8eb81477dc04f213 +281, 0x471ea08ec6794d72 +282, 0x69d3667ecc4d2176 +283, 0x98b7b6e295548a66 +284, 0x3877713c173f8f2 +285, 0xa00542570d0e8de3 +286, 0xf534b1bfa4033e50 +287, 0x7e1fedeac8bf6b26 +288, 0x8043f37c89628af4 +289, 0x1dd7039ec295e86d +290, 0xce9c05b763a40cc4 +291, 0x246926481e61028f +292, 0xb7cb0f1babf5893b +293, 0xefe6b777f37fc63e +294, 0xebbcabb4cb35cdcb +295, 0x39fa63cd711eeea9 +296, 0xad5d3ba7aaf30c8d +297, 0x8e9e78fe46021990 +298, 0xc7eaef6e7d5a3c62 +299, 0xefccdd5495d3f386 +300, 0x2179557ee8cfc76a +301, 0x88a77f621f0885ce +302, 0xafda62674543d90c +303, 0xb8e6fbe2e13e56c0 +304, 0x8bfbbe26a14f9b1a +305, 0x1404f59f5851f8c3 +306, 0x1140c53a0489566d +307, 0x3edf2d138b5c3f1d +308, 0x75d6bb275d817dc +309, 0x8e660ae27107664e +310, 0x7a8021038ee303e1 +311, 0x2042ef5eefa9079f +312, 0xe3e7b90bbf6d457a +313, 0xf3f819d2bb9405b +314, 0x522e42155cae0c10 +315, 0xf5bfbb975b40e233 +316, 0x2cf82b614dd95cfa +317, 0x183ef4a96bc40e55 +318, 0x9f6e351c5ba4e752 +319, 0x37c1110683c90846 +320, 0x1d89b7a996d8a977 +321, 0x18a444f77c7cb4d9 +322, 0xd0a8a971b78dc893 +323, 0x860232fb9e6543f1 +324, 0x60b6097f51002555 +325, 0xca1e5214123e3894 +326, 0xe03fe695c95f99bb +327, 0x2c7c6779d5f03622 +328, 0xafeeee42f63055d1 +329, 0x670dde905515936a +330, 0x9a922f42b59fb094 +331, 0xddb5ff49af5a651a +332, 0xe61b04c9e58ebbf8 +333, 0x4e459dcf272e7fc4 +334, 0xd549e92c16adceeb +335, 0x7a17dba1299d4a9c +336, 0x825d756109f2b585 +337, 0xba142e61a9cb203e +338, 0xc2a19f00e9c04a30 +339, 0x2d0f8140d23d0652 +340, 0x8b866d4d4d6caaf4 +341, 0x4f11d90dd91f8217 +342, 0xf6efc37373b9e0d +343, 0x248493d6cd6a4736 +344, 0xd12b6ae74a951a3e +345, 0x56e34722070b70a7 +346, 0x22d3f201cc9fa0eb +347, 0xbfdcc320008291b7 +348, 0x1a7a6922e9204fbd +349, 0x831421e0c4945ae4 +350, 0x66316feddddf0e11 +351, 0xa8c86a1517456554 +352, 0x14a9049ad989e335 +353, 0x837022259f141ecd +354, 0xcb71793a06c261f7 +355, 0x4aeefc07ebe09a79 +356, 0x8982f15aa3b6594b +357, 0x67bccfa7ed9b0d5b +358, 0xb377463b523e9dec +359, 0x53d3d594870fecb7 +360, 0xa5274b1caec5a60a +361, 0xd6316d0cb643db39 +362, 0xabc1a9b536de88ce +363, 0xed2fdb1383d2a077 +364, 0x12319c6feb97221b +365, 0x7e0f6cd40ef47403 +366, 0x86135c84fe26dbf8 +367, 0xc96622d3fbbee19b +368, 0xe3989d8d8511573f +369, 0x42cc365554d1fdc7 +370, 0x4c1a1eb8bbce8b4f +371, 0xfc4e30e7ef2034c1 +372, 0xc490444317a91e76 +373, 0x7ccdf469ff5dc81c +374, 0xf5a0da4110cc09d7 +375, 0x505227baf34c0fb5 +376, 0xbe58737e8a35cc88 +377, 0xd449bee91b3e8c41 +378, 0x3e590e23299d0e6 +379, 0x291a7d9e0a64caf7 +380, 0xdc6fafbdfebd2293 +381, 0x8223f1e259fe8a65 +382, 0x6186fbc9efd9e3df +383, 0xfda39b07e4007ffb +384, 0xfc19aea98574dc02 +385, 0xd0e10d354fcacd8c +386, 0xc9619916544a55a5 +387, 0xd454d50a8c8558cd +388, 0xcd94a246712d91e +389, 0x76a771f5d1231cce +390, 0xdd20cb2b7b370ee5 +391, 0xa6f4f50feca57c49 +392, 0x78c8fb431f17ab9c +393, 0x1b692b79a59b43cc +394, 0x4c45045d287da7e6 +395, 0x522132e18bf43928 +396, 0x25c458983138b41c +397, 0x2a1fb426ef229796 +398, 0x74dc324c74e5dd3d +399, 0x6df75e3eb6eb5374 +400, 0xb63f2f4f9ca25b61 +401, 0xac72286112ee54d6 +402, 0x5a966f3d0a6863c4 +403, 0x8d7046bc64a46fc2 +404, 0xa7b740fd6e3087eb +405, 0xcdbcbe0340cfcdf5 +406, 0xcb632613bf312b65 +407, 0xa91b3f2c2aac238b +408, 0xa06deb3f5ae555a3 +409, 0x29d72e1f8db69 +410, 0x2d004bae09728ea6 +411, 0xc6eee5dce0736cc1 +412, 0xa7493145500ff60f +413, 0xc4d68c4aa18ab93c +414, 0x8210c29e79d48d7f +415, 0xd0999d7889ecbef6 +416, 0x6e3bd61e66e93566 +417, 0xe6cc13d47d7d7b1f +418, 0x3d6f181f42e03979 +419, 0xbed4e14fd867604a +420, 0xbe511c84067bd86d +421, 0x49a876d89e697d38 +422, 0xc04c3dde8f889c98 +423, 0xaf293eeab0f53e3f +424, 0x9f6291dd65732cd6 +425, 0xd7811ac01de78c01 +426, 0xe385cf0261d50ec2 +427, 0x5a64134b3542bbf +428, 0xf9d1302bc6f13a68 +429, 0x5d2aabbea37d8c31 +430, 0xd9842e99a5192970 +431, 0x713eadc4cd30e837 +432, 0xb7b002fc72abb413 +433, 0x276cfeea526af1cf +434, 0x8519fe79b633a0ce +435, 0x2f0e87363705a3e2 +436, 0x9adbac0be3c371e7 +437, 0xf3f44ba899a6173c +438, 0x782d6c29618fde2b +439, 0x7f61062acec408f +440, 0x6e79cd836359258f +441, 0x5c8e9b138df5785a +442, 0xa54359c9f39a9a84 +443, 0xeec3f033135084b0 +444, 0x883ee717787a535c +445, 0x9a2422b513a73b00 +446, 0x2dd4beddcdd64a58 +447, 0x90c8a13202239c7b +448, 0x85b352ab759646d9 +449, 0x139f5cb2e46c53aa +450, 0xe1d3ba6c721c66d1 +451, 0xaa66e0edc4b60a98 +452, 0x3521275c75be29b6 +453, 0x490a5190b3edfa5d +454, 0xd2abcdd2ccb2f14e +455, 0x9d9be8bef4a5857d +456, 0xde19676f13ef7755 +457, 0xdac2fee2e42615f3 +458, 0xf4239801cb02f2ab +459, 0xaa8bf923ed91875c +460, 0x61d18a1940e4c7c0 +461, 0x1eb6aa3d5f077a6d +462, 0xee7374c063bf29d8 +463, 0x2f0a59e34d76268d +464, 0xc92e80e17d1eb3e9 +465, 0xafd05b3ec3d2ca72 +466, 0x28a61ad8d6c497b8 +467, 0xa7094d6834ad7d47 +468, 0x57d80ea9eccbb4f +469, 0xb047e0fee6cdaf16 +470, 0x44f41b5eb48c00bb +471, 0xd6dc8e1eb9c8c9ba +472, 0x47adfd2c638c7849 +473, 0x365d63db7d526c68 +474, 0xc21cda439016135d +475, 0x14d10c3f0f98863c +476, 0xa93e56f74e037602 +477, 0x3b4e9c8915bdc9 +478, 0xb46f5ae155e54aa2 +479, 0x8e470d21ce1943e1 +480, 0x60b96301b5ba2e8d +481, 0x1b473a41d381f9ff +482, 0xabcf5a8e3269e73f +483, 0xd410f6e94fb21fa1 +484, 0x65d1a47eebf87e5e +485, 0x48eaa201c61cb843 +486, 0x212c1abc2499bfc5 +487, 0x4255ad8377d2d8d +488, 0x44caeef472010612 +489, 0xffae764524f572f2 +490, 0x78d374d20c9ee550 +491, 0x6e003206c0511cee +492, 0x7998a159145bfb82 +493, 0x921239650bda1d4d +494, 0xae05025509bcfdc5 +495, 0xc6430c980be407b4 +496, 0x78524f1744b153f1 +497, 0x84089e6f468181fe +498, 0x8d0d21d7dfb6c254 +499, 0x90bad90502a33603 +500, 0x3072a403cbd16315 +501, 0xdfadddf3f1c040c2 +502, 0x22f0b0639d9ff975 +503, 0xb49e48a4cad0765b +504, 0x95a0a04f8239709d +505, 0x56e147a24a4c481f +506, 0xacf16ef61dea4c7e +507, 0x424040afd2700de6 +508, 0xc67e8096a3c717a9 +509, 0x39f164181dd0a399 +510, 0x2449cedc1d62198c +511, 0x7a53df11a1f1a61c +512, 0x5596f1d4a3badae3 +513, 0x38ed4c822072b3d0 +514, 0xf07ef346b3fd730a +515, 0xfd349c35c3ed51fd +516, 0x2f15c9c7890f8f32 +517, 0x3b470df52b173c29 +518, 0xd31bfc8981281af7 +519, 0xbbcc9bdf561215bb +520, 0x5782fffea326574f +521, 0xb0ebdcfcc5e03290 +522, 0x7fd89d93d2b3fbef +523, 0x280ea1865d9ba2 +524, 0xe726959845b2c100 +525, 0xd0361f032cd7dbb1 +526, 0x3c65ec2028b81a22 +527, 0x5221e9b2188920bf +528, 0xeb5ab27c4125ec20 +529, 0x80a32dd48b54f0a4 +530, 0x369b5ced1012bebb +531, 0x582d35d76530bc6f +532, 0x7b50dc9b48e1e37d +533, 0x37fdfe8bbacf8dad +534, 0x7a0cb7e6e93840ea +535, 0xa1132c870be0b2ce +536, 0x9d8ac2c68267cd1a +537, 0x470969b647fa7df4 +538, 0xabcb7d8adf7e2d24 +539, 0xacdebec9bdf9eb1c +540, 0xe30f4cbf7eb6a59 +541, 0x746673836c4df41d +542, 0x75120a6b647bb326 +543, 0x2f4eab556c3f6878 +544, 0xd84651ab05405b7a +545, 0x9e695808b9622284 +546, 0xc93b71e56aa6e1a5 +547, 0x2be7f3be4a7b7050 +548, 0x6497e910b6733241 +549, 0xcf7050dfd08076fc +550, 0x4e3cc156eca183f7 +551, 0xf801a33d9326c265 +552, 0x6aa293c8a47d40e6 +553, 0x28c429755faa6230 +554, 0x82b818651f54e7bb +555, 0xa84d726d7acdbead +556, 0x5cfa535d5774965d +557, 0x4a34b7b1cb48d53 +558, 0x86a7b5bce426de84 +559, 0xfcd2307cecdb7318 +560, 0x16dbaaa71181a038 +561, 0x88e7e8cd261c2547 +562, 0x3c09ba6d1d5ea913 +563, 0x5dd3d643734ee5b6 +564, 0x326d725fe8cbb33 +565, 0x7bcca9ca2da8e784 +566, 0x482dcf6b11d7f9a4 +567, 0x1291b605b4cd3e04 +568, 0x6988181b50e2f4a8 +569, 0x649e3c37131fc292 +570, 0x4eeb67b9e21eba54 +571, 0xc051d39073dec45f +572, 0xc99c52e110270d67 +573, 0xcb813d5d77868add +574, 0x423a5f13573e7ac0 +575, 0x231ac4cc4fe73616 +576, 0x4c22b888a6e600ea +577, 0x8059a6dc7c9e25c6 +578, 0x49f498a5b8ad22de +579, 0xf1e812cc6d1826c8 +580, 0xbbaf60abe8b11e00 +581, 0x1d31d7f4d8be9a6a +582, 0xfeadce70a9a10c14 +583, 0xb47c635bc136996a +584, 0xd88e694c8da030cb +585, 0xc41bbe132aff1364 +586, 0x34249ab18a4b0800 +587, 0xf14b5c825aa736cc +588, 0x2710be6b08df78e +589, 0x2ab56bcc9bf9e740 +590, 0x9b7f6e591b5f648 +591, 0xfb665c3772f34135 +592, 0x628a0a5d2db5d8d5 +593, 0xb3e3f251e61b5259 +594, 0x82310ae33faf1b23 +595, 0x24af8723a65cbd0b +596, 0x671c93282fc4ad97 +597, 0x6cabeaac77270cad +598, 0xef4643fe38b02b7f +599, 0x7b011549d1ac6653 +600, 0xe2af87b9fccfe89 +601, 0x36b71ad67197ac8a +602, 0xdbba55d06f2fd93b +603, 0xf571dbd764b7f7e5 +604, 0x38ea402501cdbd45 +605, 0xb8ab5b5b1bab2913 +606, 0xfab973c4d45f32bd +607, 0x9364f1717c2636b9 +608, 0xfad00f4d983e00fe +609, 0xc90c532a11aef75a +610, 0x64a6eda96e44783c +611, 0x35891f2eb84520be +612, 0x28d216080caed43 +613, 0x129629cc5bd206f6 +614, 0x22c3d39822cbb4b3 +615, 0xf1efbf4cce1eaa2b +616, 0x7070cba12524ed08 +617, 0xa7ed0be9deabf20d +618, 0x8ddb4cd6b454f76b +619, 0xb82814b1db37b63 +620, 0x418e83b36de01876 +621, 0x9a538c7f39c6413 +622, 0xee0cd7abf8a2ecb9 +623, 0xa9222b07e95590f3 +624, 0x6296a415d68341e6 +625, 0x981e0a5a8f811929 +626, 0x4bb372d3b0de283d +627, 0xa9805b5971866e16 +628, 0xaf3b5f5183497657 +629, 0x2152b0fd23c3d9f +630, 0xb730c325b7173180 +631, 0x1e3439d231608c19 +632, 0x1c5ba6031379823c +633, 0x87f5d12d6d365cbc +634, 0xd3bc7f29614bc594 +635, 0x63102214bb391268 +636, 0x482bbd5bba648a44 +637, 0x6a23604690759dc4 +638, 0x4091d41408d3a39e +639, 0x7cd017f922101b15 +640, 0x7ce9004ac5f9231 +641, 0x978bc3d8ec7f7fdf +642, 0x5bd0c4d780580c11 +643, 0x4313c068bb040153 +644, 0x3ab7dab7bc38bf80 +645, 0x3aaf9c187728deea +646, 0x6633a4ce8efb88d9 +647, 0x7263b089878f00fc +648, 0xd0d767e96fe00eb8 +649, 0x184a7c0c01908028 +650, 0x1ebdf41e6f76e186 +651, 0xeb740ee1d0402083 +652, 0xfccf4974edb1c339 +653, 0x16e2707aa28306d +654, 0x1684f0bdb018c3a5 +655, 0x887b6b67b88aa862 +656, 0x923d7810a2bea33a +657, 0x56b3560babef5d6b +658, 0xb39a14614c54b8c6 +659, 0x33e4dc545a509fc8 +660, 0x26e21f84142da9b +661, 0xdd07598125756855 +662, 0x572d49a071d7ae0a +663, 0xba3c7e3baea28760 +664, 0x7ecdb2d714db4b61 +665, 0x1c62b4920e1b2fe2 +666, 0x71bfafb70092834a +667, 0xd710a4228f60d56a +668, 0xeb16277d4ce4e95b +669, 0x968168c90b16d3a1 +670, 0xac3439dfe8ad0062 +671, 0x5a8226f9dd5876ad +672, 0xb843affe917291b0 +673, 0xd76d1e67051f8259 +674, 0xb73a6638cce8ccde +675, 0xa0e6afd3c7295f9 +676, 0xff8857b4bbb5f4c6 +677, 0x99becf78938f0426 +678, 0xfcd17edc1e70f004 +679, 0x6223b8b23f2f50 +680, 0xca875f3e84587b4c +681, 0x7d1e81e589f87fb9 +682, 0x9eb621586aa826fc +683, 0xf46fb9ef5b9c2086 +684, 0x2882c9b7092725f3 +685, 0x5493f099bbedcd02 +686, 0x90c1ec979ffa811d +687, 0x963f765025bcc53 +688, 0x56194e3ec3d9d4e9 +689, 0x7ec4720954cac1f0 +690, 0xfab3145171af7f90 +691, 0x52a0b4e41a13b593 +692, 0x740e2d4d5909d126 +693, 0x98f5339c09c94a28 +694, 0x1700e462fe8dec76 +695, 0x3dbffc2aa4695ac3 +696, 0x5763edacabdfe2a1 +697, 0x7b5b623ce49ef21d +698, 0x30addc66f49860df +699, 0xcc7511a6c31bceda +700, 0x1b25b61ca75db43b +701, 0x416bc4c298e59046 +702, 0x4cd11fe2d74e4649 +703, 0xb54458a9229fc978 +704, 0x8c21a27882b6ca35 +705, 0x57887c8b5e01639b +706, 0xf4e893da996680bb +707, 0x8d601297702c9c0d +708, 0x2a27904a30aa53af +709, 0x497800f6917ea8d0 +710, 0xe96db3340ada9c00 +711, 0xcc23166f14c010ee +712, 0x782690d78fa65ec9 +713, 0xf3e00d74a0878eda +714, 0xa7cbb683decca0a3 +715, 0xdd2e038e683a94aa +716, 0xe2096ff8da896ca5 +717, 0xf7c83400afdabe11 +718, 0x395b8c6f6a4086a4 +719, 0x4a164ec05bee71d4 +720, 0xe87aa5d1ca0462fe +721, 0x8dbc5aed6dff9ceb +722, 0x12120d1e9552707b +723, 0x877dca6889b3e6cd +724, 0xbd65605c01e900fb +725, 0xbd6b82c4157c3115 +726, 0x8b60282732caf78a +727, 0x279fcf5e5de9e57f +728, 0x34b34ebfb6a37eae +729, 0xd258cc1a14e03b7b +730, 0x9a528ba3db4a13fb +731, 0xffa0aea59d057746 +732, 0x27fa7f456cd37c4e +733, 0xe1117a57a6fdce63 +734, 0xdc8fc903970a1551 +735, 0x492dd104f30faf29 +736, 0x110def0959e5652b +737, 0x7f8d1997636fdd15 +738, 0xfb77b05e538a9b59 +739, 0x2e41fa35b4b01fc6 +740, 0xbc35ae69a3374085 +741, 0x192c2a681c2d9b4b +742, 0x12566b8866c189d6 +743, 0x9d88ea785c5185c8 +744, 0x30a621ad5f983c4 +745, 0x8b875efe1206f587 +746, 0x224d25c3af6e3423 +747, 0x7503e976a1ac7bcc +748, 0x3c98aa869e823859 +749, 0x3d8835304b646892 +750, 0xf6353330ff970bc2 +751, 0x8a673f5e2edb8acb +752, 0xf2fdcc53493838b9 +753, 0x85ddcd526236af16 +754, 0x60afb99814c676c5 +755, 0x32a1c2749e281ca8 +756, 0x2367a92ae3bee9ca +757, 0x219fe082703743cc +758, 0x34d8b74dc85182a9 +759, 0xdd04164c72db23f +760, 0xe293ac28fe2671a9 +761, 0x9ca7d169cbda6f45 +762, 0x705c47972b4240ed +763, 0xc10eda9eeb536209 +764, 0xc36ddacd0c94e85d +765, 0x8eb592c27e8cd0d2 +766, 0x3e815991c76e7cc4 +767, 0xac9cfce31acf7580 +768, 0xbf7a4cb31c7aee94 +769, 0x663077444aceecf6 +770, 0xe7f614ff386eb568 +771, 0x79d7a229c66912c0 +772, 0x161ed4311f63e1f3 +773, 0x308a5faeb9982ede +774, 0x7b38ddb9b7efd10 +775, 0x1e103a2589b27ecf +776, 0x67b02baf4259f27e +777, 0x868921c115ea2eee +778, 0x959791912200f71e +779, 0x4dd55f36dec10557 +780, 0xe3464d90080cb99d +781, 0xfb2d4f6accce652f +782, 0x109900a9257d77ba +783, 0x3c4bda8e2c83684c +784, 0xc9ae040fb7f868c6 +785, 0x78098ffe994f4905 +786, 0x7a94c33eca77f0b4 +787, 0xbe6a2a95e9b5c0e8 +788, 0x797d39cf963f4837 +789, 0x8d2e249e4425d06d +790, 0x6ae2c30cd5da06f4 +791, 0x904489de762b179f +792, 0x84713e2dfb591e3b +793, 0x6405a40da3f6f51b +794, 0x976b560d663a2df1 +795, 0xed1c544784ba1e22 +796, 0xca658e995ed9344c +797, 0x2b1c6b8e4db49025 +798, 0x52b1513da528bad +799, 0x3c63406d256d9968 +800, 0x63a31ca3d423f85e +801, 0xb05a81f55789a720 +802, 0xd04412992c476c8e +803, 0x828ec2f77a150a3d +804, 0xee50926671bb60c6 +805, 0x5aa70f93e2df61b4 +806, 0x94d60fa2e8655858 +807, 0x3f5e5b770703cc7d +808, 0xc62dfb2688ca7784 +809, 0xaaf02e1e8ba89fe4 +810, 0x4ab74e0d8c047405 +811, 0x31ee04fbac6fcead +812, 0x1203b78b8228f5af +813, 0x412a70836f9aa71a +814, 0xab51cf98c03f1819 +815, 0x783a3ce9ce137f65 +816, 0x8897085b0a072cf2 +817, 0x685dd9bde8798cb +818, 0x9a1fac7b1705e2c1 +819, 0xf3e9ff98de48e9cb +820, 0x5c2d3eb1a1fbe917 +821, 0x3bda718b6b54d82e +822, 0x29f2dd18f22f0821 +823, 0xb992da1572ac3597 +824, 0xacb69e7aa14b34f7 +825, 0xcd36e3ad14f088d1 +826, 0x6aaacc96a1ec55e8 +827, 0xf8ac593f154fe68f +828, 0x18fc9cbff012339f +829, 0x2f3368ccbbb99899 +830, 0x7cec7d17f37031f7 +831, 0x96e86bfaadcb8fc2 +832, 0x74f9e7ee3d42a752 +833, 0xbd52f6c7d9b0733 +834, 0xa48e6d96bb6ce1c9 +835, 0xaefa058254b82133 +836, 0xb7a19edfd0929107 +837, 0x6160ce9125b26e26 +838, 0x6537dbbde1d2aed +839, 0xc567f9a6bec52dde +840, 0xca29fd3f22443342 +841, 0x7732aa6db6a1c476 +842, 0x8f5a4d7df6b11b3 +843, 0x76649262aa7e31e1 +844, 0x60a13eb125fbc829 +845, 0xc81e4d123dd21ac1 +846, 0x643cbb09bb72f86b +847, 0xf971a98fb25555a6 +848, 0xffa2774c66692d56 +849, 0xcb33c16c50b13ea9 +850, 0xfabf388dffda0e9b +851, 0x55d41ec12ca24b9f +852, 0x91cf693a3467e807 +853, 0x6be2c00b2c31d6dd +854, 0xc5cf513b5251ae28 +855, 0xffc4384212403dec +856, 0x45d4e1865255a69d +857, 0xfb1dcf956972086a +858, 0xcae946a55c4c55b8 +859, 0x7351ac7720e385c1 +860, 0x19aa8ffd86240254 +861, 0x8f515ae78f4040da +862, 0x1e1ed2058de50fce +863, 0x22d006dcdb374243 +864, 0x6e0f0ede7c95b441 +865, 0x70e8aa81b53b4d25 +866, 0x998f309ea41e3814 +867, 0x89ed6598fb66f390 +868, 0xb5997dc3278060df +869, 0xb2a021eac4f7e046 +870, 0x3705b60aa2fd0768 +871, 0xfc415079ab9200e +872, 0xf2871ac4cf45ecc9 +873, 0x24bf758d2246175f +874, 0xac503dd6f8141b3 +875, 0x4e879d12d9f03b3 +876, 0x82034af8cf93b644 +877, 0x59899dd7e478a6c7 +878, 0xae90addb6eb11507 +879, 0x1524ddf76730cdef +880, 0x6fd4afd5456b1c9d +881, 0xcddb9221ea001cbc +882, 0x64ff400bbf2e8604 +883, 0x6dda10549b06ed9b +884, 0xed2c85104c261527 +885, 0xc7e09217d29929a8 +886, 0x56284df611a428b1 +887, 0x1a7608289c0a61 +888, 0x7cb63db15166ff66 +889, 0xc6013c76fcdcdc72 +890, 0x8e5dd566c7a5a676 +891, 0x5a8e8565f40d133b +892, 0xe465973455848c44 +893, 0xf92eecbfe0f3c2c0 +894, 0x7d64155d4dcc5cac +895, 0xf17595706f988dad +896, 0xd590a001a6a19c5c +897, 0x82a164475758db3d +898, 0x6b144993ea1bbe32 +899, 0x22a81a7a6e453779 +900, 0x8e8c298df1a68a73 +901, 0x78056afd6d936b4c +902, 0xaaceef0325faaf62 +903, 0xe78bb7699f82266f +904, 0x523a2d283c5a5166 +905, 0x7076d87088f6c6db +906, 0x6087dd54cff5aeb2 +907, 0x7ef82e62cb851680 +908, 0x4e8bcc8ed84d03d8 +909, 0xd12fa0361df3cfd3 +910, 0xefb89c79f8127297 +911, 0xa9af4e2fbce0b1f8 +912, 0x462136685b70331e +913, 0xe9e74c93da699b77 +914, 0x9ec69215fb11d0c3 +915, 0xc10f229939e3e111 +916, 0x3f67fa79e41d2374 +917, 0xd5e7c1a9a7185162 +918, 0xa1dcce9ec91492fe +919, 0xd4e61f0727b5d21b +920, 0xdf6cdce46551800a +921, 0xa3f256ce906982d3 +922, 0x209742a6b9ffc27 +923, 0x4006c96958526a57 +924, 0x9606aebc75a1967e +925, 0x91b9f42fb64189df +926, 0xb27119defcb938bc +927, 0x128cc7a84ba05597 +928, 0x6c3df613c62d0d30 +929, 0x3adf69d48b629ec7 +930, 0xda42ee493837b128 +931, 0xb8e770480e760bb5 +932, 0x9feb55d57c99c626 +933, 0x29812d80afdae3ed +934, 0xae4222a64276a8c7 +935, 0xe3897212a5b4ed53 +936, 0x98bedfd13886e669 +937, 0xca858675d7fc0d0e +938, 0x28a359f665354234 +939, 0xfac2ccabe4128b35 +940, 0x61373cc5d11ca180 +941, 0x7007605a4512a87a +942, 0xe71f8eade7b30b3d +943, 0x3a9e77f9b99bd04d +944, 0x70d3e42488098866 +945, 0xd30fc159c7cd4d99 +946, 0xe4d3f6600d2e2d6f +947, 0x1088324dfa955c25 +948, 0x516437acd4764623 +949, 0x38a31abe50d0aa03 +950, 0x72e1054e9dc02ba +951, 0xe6971dd664d1a2e2 +952, 0xf6698cb095d3b702 +953, 0xad995a5a8c19bd92 +954, 0x34e53c6936f656e6 +955, 0x10de240bc07c757a +956, 0x3e3b9a6861c2bd1c +957, 0x9c0b0b97d3712ec9 +958, 0xabf1505a75043aed +959, 0xbdf93d3de3274179 +960, 0x28fa5904d3f62c28 +961, 0xc3b97b39ef6c5133 +962, 0xf2b2219225b8679d +963, 0x8be4ec0f930c0aaa +964, 0x47de5a56aa590643 +965, 0xb6f871b304129856 +966, 0x80a61c06233ab0f9 +967, 0x3ce6c3af8101b055 +968, 0x85b911708274e7d1 +969, 0x4cab65d093a488b7 +970, 0xaabc4b10661fe28e +971, 0x35b16dea64474a68 +972, 0x1d6eb5b093361223 +973, 0xc39107b92f0fe1fb +974, 0x1d09e048073c4841 +975, 0xc6a02f43aca8cb2f +976, 0xaf6613dbc7da909c +977, 0x5ac2a40c230aa756 +978, 0x33afb5e7c01c39a5 +979, 0xc7b0b20ea8b7d0ef +980, 0xdf7306c8ccb1bbea +981, 0x9710efc0c188b2a0 +982, 0xd6303eadb72c873e +983, 0xa38ca609b118f35a +984, 0x8390613065c6e535 +985, 0xdf9a0106757e431f +986, 0x8bcf77039788e143 +987, 0x6026806a986b378e +988, 0x482ff3b1394cb1dc +989, 0x2a27d0ccac9ede9c +990, 0x53c77f26e271b3ab +991, 0x1ba004cf276cf3f +992, 0xc135b0517dc81f7c +993, 0x5d137838db75e442 +994, 0x3fe505f93d1dbdd7 +995, 0x351654ae7d598294 +996, 0x173f8d182af9d84d +997, 0xf97dfcd164fe11c5 +998, 0xcda423e5ad43b290 +999, 0xa5cb380b8de10d10 diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64-testset-2.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64-testset-2.csv new file mode 100644 index 0000000000000000000000000000000000000000..84717817db771ff7058b16aba58b0899a79acb99 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0xa30febcfd9c2825f +1, 0x4510bdf882d9d721 +2, 0xa7d3da94ecde8b8 +3, 0x43b27b61342f01d +4, 0xd0327a782cde513b +5, 0xe9aa5979a6401c4e +6, 0x9b4c7b7180edb27f +7, 0xbac0495ff8829a45 +8, 0x8b2b01e7a1dc7fbf +9, 0xef60e8078f56bfed +10, 0xd0dbc74d4700374c +11, 0xb37868abbe90b0 +12, 0xdb7ed8bf64e6f5f0 +13, 0x89910738de7951f +14, 0xbacab307c3cfd379 +15, 0x2cf7c449d8b927a6 +16, 0xdcf94b3a16db7f0e +17, 0x8a9d33d905a8792e +18, 0x4cb9eb2014951238 +19, 0x6c353acf7b26d6f1 +20, 0x73ff53d673aa30c +21, 0x1fd10760015eca68 +22, 0xabae0aa9021eeba8 +23, 0xa5ae363a868ee2bb +24, 0x9d89e0f041de6631 +25, 0x6238b133c3991a65 +26, 0xff49267d75fef51a +27, 0xfb180656ce13c53f +28, 0xaf7fadf36128712d +29, 0xa6847fc6f339c63e +30, 0xb03e0b80d71ea5bc +31, 0x63905abcb43969af +32, 0x2295af3ee00a3bba +33, 0xb8b375b994330415 +34, 0x867d9ef1d8716a3b +35, 0x4f6c02f5601b4e18 +36, 0x7c5fb4c16c470d18 +37, 0xe3b57986b804b343 +38, 0xef1d79d212aca692 +39, 0x5b98774c8806209c +40, 0x924fc76bac38a5d1 +41, 0x5266084c412ddeed +42, 0x98240bf9b831d6a3 +43, 0x5681599e81219442 +44, 0x6441248fc2ba92bc +45, 0xe3e9051a540349ea +46, 0x3a2700034390baa3 +47, 0x9f893155b6d402bc +48, 0x158207910c6d8aef +49, 0xd5282ab7608c2cbc +50, 0xc97f4651669dee4f +51, 0x3d4750d95103ed60 +52, 0xe0614542caac1f04 +53, 0xefe5092144cfc6c +54, 0x560bc486abd7e9ae +55, 0x2678b71392daa4b8 +56, 0x734970d3dc2ba416 +57, 0xcbdbe849e51e4aaf +58, 0x3b0b5e28b491556c +59, 0xd51449ac45abd88 +60, 0x6790b59991f1b7ab +61, 0x32d1c039ff2415bc +62, 0x173b9772f24f72e0 +63, 0x9490a9ca9f883b1b +64, 0x4c775989e6214222 +65, 0xac07db37e6ee6114 +66, 0x331371b2e3f10aee +67, 0xf12e5326c21c28e4 +68, 0x5d77dc280c70d614 +69, 0x1b01bd17a2f281ec +70, 0xa10d3b5882938487 +71, 0xed5a0033c394ae8f +72, 0x70bc8ea568ea44b4 +73, 0xf4600ae77965e730 +74, 0x7ff92c0b321ce233 +75, 0x6cdbc87d0cc1d670 +76, 0x9ec64f0cf2000eb1 +77, 0xfebea50259800f68 +78, 0xf2edf9019a8fd343 +79, 0x75c584ac042e5468 +80, 0xc1fa8481d5bf9a1d +81, 0x7f57180168514ac2 +82, 0x878100716b94f81e +83, 0xc929406e3af17fd2 +84, 0x6a26e2c013e4bf4d +85, 0xbc071d8848280955 +86, 0xb60d75abbfd1bdac +87, 0xee9b76afeca9fa69 +88, 0x1d6c399d2f452810 +89, 0xbaa0bc1621e25c83 +90, 0xed6ba792f8671ba5 +91, 0xf7ca02c2ab11d8d7 +92, 0x3c3cadadf0b21e3 +93, 0xdd1784571e864e9c +94, 0xfb2f992015157509 +95, 0xf50bb9f0d3ced743 +96, 0x261565f75c3e185f +97, 0xf8fe33b284513e60 +98, 0xe3d2d10b5e024664 +99, 0xd28717566242cf35 +100, 0x7ae07d133ac5b789 +101, 0x3b7ccaaa53ac338e +102, 0xcd480bace4871650 +103, 0xec6c78f923c080e9 +104, 0x44211d0ff8919d59 +105, 0x89f79af76d2a45fe +106, 0x71583fd8a837548b +107, 0xee57269c261511f5 +108, 0xa5ee8f3b128c5d1 +109, 0xbb64c20ed0765a17 +110, 0x9d4790ab2eeaf7e4 +111, 0x742f3db806d9e98 +112, 0xb81ec97aed6a0d1b +113, 0x41808b34f6a8a23 +114, 0xc20913af175dfd4d +115, 0x834427db263b22bb +116, 0xedd9c632e611828a +117, 0x10eac8524496f571 +118, 0xd76091b97eb00ab7 +119, 0x111298ae9fe95666 +120, 0x5824b2e2a6719c43 +121, 0x6e280ec539e934ed +122, 0xf74fd832df90083e +123, 0x8fee6d0f241c2e97 +124, 0x4244f331c2f19c3c +125, 0x3dde75a845cce97f +126, 0xe35bb8e635a9915b +127, 0x39d2943037f7932e +128, 0x1fe2d134201d0970 +129, 0x49d00b63c749b804 +130, 0x960c2942cd4e4e04 +131, 0x8dd8e009dbc0435f +132, 0xcf493495c3a055cd +133, 0x8f7b5a1c0f9fe9cd +134, 0x49d5f90374641a25 +135, 0x69b3932073d3524c +136, 0xd170603e7de84ee2 +137, 0xa062ba3ed3539948 +138, 0xf5861cc5b5d56c82 +139, 0x5e914998a30c7e76 +140, 0x8d77f2ad1503c0f1 +141, 0x980b6a9e3b4181fb +142, 0xd9299cd50694c084 +143, 0x253dc0f8f1cec4c5 +144, 0x68110fb9d1b3e695 +145, 0xe8f3120d0aabc461 +146, 0xb066e7df0dfb042 +147, 0xd29ce0f797e6b60b +148, 0x6a569bb7ca33bd42 +149, 0xd46e08b2dc2385f8 +150, 0x28c61d11d055767 +151, 0x5d73aa3d1a2bb725 +152, 0x1421191e1c14829a +153, 0xa711bfb6423df35e +154, 0x461af97a86308006 +155, 0xb3e1018ff3519367 +156, 0xf19cf866a268ef2b +157, 0x207715eac9199d1d +158, 0xdd621c410975b78c +159, 0xf390aea68683610 +160, 0x617a2d107a0047d9 +161, 0x6e05ac416e5bebf0 +162, 0x7d253e70506c1bed +163, 0xf9f96f4a7dd53810 +164, 0xc693b29cb1573f73 +165, 0x4f1146b0020ea544 +166, 0x45140608fbd40579 +167, 0xdcf57219828ce6be +168, 0xe19d58cca37b5b32 +169, 0x82bda95b2a161235 +170, 0x5823c3d8a2b6c9ba +171, 0xfeb2e74092fdf89a +172, 0x50e1ad1abc8f869d +173, 0x2ec63d0c105eb8da +174, 0xe14e1c4845a3264a +175, 0xcff53670455eb6aa +176, 0xaafaccd24619fa3e +177, 0xf55a988486e2422a +178, 0xecfba16a90ff4d04 +179, 0xbf8d36c2f644757a +180, 0xdc56ed75a0dd6249 +181, 0x3f45023eff17c3bb +182, 0x2428bbfe90023fab +183, 0xab892c611adcb70c +184, 0xb6f13d8c0c2b9d74 +185, 0x2ac3fb11d224f2a8 +186, 0x65433dcfae2d9351 +187, 0xe906859ae4b45f82 +188, 0x8fb7f5f093d76a3b +189, 0x940dd290b5e88d1a +190, 0x31b27d21bef116e7 +191, 0x86a964e2c83b5296 +192, 0x85ffd17bc079a9e8 +193, 0x16c47c724e7ab7f1 +194, 0xfb6098a9867e7d7f +195, 0x9246fb69092c6cb2 +196, 0x1a4033572760f32 +197, 0xc5cc568a8b273b84 +198, 0xfa6f9f2fbdd44abc +199, 0x9701b8e087718ba3 +200, 0x51d6a7dcf73f8f3a +201, 0x30008172cc6a972d +202, 0xac2ab49a5ca6ac81 +203, 0x31f28ef79461e54c +204, 0x93e35a8da8cc6132 +205, 0x9a2c58beeba3d5b9 +206, 0xf6615c1de266ac39 +207, 0x127ff9f8166b766b +208, 0x7ffe380e80a69556 +209, 0xbe7d2c228e1542f7 +210, 0x2d5ebb4e50ba1746 +211, 0x63585761ae1bf684 +212, 0x1019eb5cee022fea +213, 0xb9d3540ab58da30d +214, 0x1677f4cb45620eb9 +215, 0x6524baee51783822 +216, 0xdf9f2ddcfabb0adc +217, 0x78e8acc43b287935 +218, 0xe9a1974e999222b5 +219, 0xc41324ec2291e780 +220, 0xea52abc9ecdcbc9f +221, 0x209d7bcd46ec6b04 +222, 0x12d504c09803db2e +223, 0x1200e6bf21475d81 +224, 0xde6d3c2b35fd2cfc +225, 0xa2526900ac33bd3c +226, 0x7f1f5290fc432bc5 +227, 0x29ddfb380a3d69c8 +228, 0xac79cb6942a2909d +229, 0x516996685b67a92a +230, 0xb5fc39041cb828bb +231, 0x75d9d8ca0644a276 +232, 0x81e98b76be92a3e9 +233, 0xca27888fafe12179 +234, 0x17be2ae039925765 +235, 0x9429846c0e6d0342 +236, 0x327dfd50439815e9 +237, 0xcee20cd7bc254aeb +238, 0x7d250389f453f29e +239, 0xfd1b232a85c95569 +240, 0x2ed55fac80f3e9e9 +241, 0xf6886c20417a1be7 +242, 0xcd08e61f0b0fdfde +243, 0x7b33e34da5c27bff +244, 0xd043c4b7d5603dd5 +245, 0x9a544e4c70a3b686 +246, 0xa7b60398c381f771 +247, 0xe9e7a3487c4bd4f2 +248, 0x10b58fdfe1ff112c +249, 0xd5c1c9748c0f4ceb +250, 0x61be9d09159d54ff +251, 0x5356f51e8239f510 +252, 0xfe7889d9b202ecef +253, 0xc7fc19ca5d263d5d +254, 0x7c4c07e61dfd9f69 +255, 0x6c315fe5015f300a +256, 0xe0a5bc00039747b4 +257, 0x16397fdcf829ee80 +258, 0xb55aee80d16a5169 +259, 0xca0609944d007eea +260, 0xcc982249f65a02ce +261, 0x528161feb149c148 +262, 0xcbf08ba49b41c006 +263, 0x39af1ff0b6f14138 +264, 0x5cc036be69799aec +265, 0x6adde125b1db21c5 +266, 0x8a99d83d6b613b67 +267, 0x1cd43fca9451f74c +268, 0x682dbb26ecc96365 +269, 0x13b4be2ceb43e3 +270, 0xbe8fbc3b6f4f581e +271, 0xda148a2f4bda5719 +272, 0x239106ca3319f393 +273, 0xb42b4dde641f0dd5 +274, 0xd233cfdf4cb0af74 +275, 0xfb5919d905589afc +276, 0xd802a8860c10b66a +277, 0x6c923e1d00e7b5bc +278, 0xfacce1134f383b89 +279, 0xf9570abda7a6d553 +280, 0x80f0f9796a208f18 +281, 0xc0e1df5280951c57 +282, 0xe9f143f08257bbe0 +283, 0x79e4c6463123d588 +284, 0xdd2118583f2b1684 +285, 0xb399ff5f2329fa18 +286, 0x4b3e9ebae96f813c +287, 0xc484dbf247787384 +288, 0x921865eb97603f2c +289, 0x18063c68e257d300 +290, 0x643181f345e7fc26 +291, 0x12e0b0e8eadf9fa7 +292, 0x79e613fe73dfa354 +293, 0x6db4c59203b7217a +294, 0x6c7a0e9ba6139eaf +295, 0x9617c7ac4e3f6d97 +296, 0x1f68a7b4fb1b4b75 +297, 0xef0b7ab24944f466 +298, 0xaf1dee1f4be1bc89 +299, 0xd2e355c959f5fd8d +300, 0xe594c3fb95d96efc +301, 0x9554766ca3342906 +302, 0xa4bbdc77d12842c +303, 0xb62400211ee489a8 +304, 0x91abadaaa3bbe67c +305, 0xd371eeb91deb42bb +306, 0x883bab35cbd2b6e5 +307, 0xd030c3d9411a9041 +308, 0xff3c110a858ff000 +309, 0x59bdf5ca47d0bde7 +310, 0x2bc80fa3cdba1853 +311, 0x6444ccb652662cb8 +312, 0xc0c7e256b9e90339 +313, 0x70714ea9c9d72302 +314, 0x96a0142f9d897d27 +315, 0x209a9097c5a91ef7 +316, 0xb9e33afc5171e009 +317, 0x47b37af433a58d40 +318, 0x30cc4ffbfa831d26 +319, 0xdcea4a85ff815466 +320, 0x907d5bd027f2e5cc +321, 0x7c081f6852e04a4b +322, 0xe61950749c1d502b +323, 0x1604e937ee69834a +324, 0xb2372d952dd25309 +325, 0x53f6a5b834c72577 +326, 0x2ce7a74395e0b694 +327, 0xacbf9ab4fe91f225 +328, 0x5ce1e63d3a2bb90f +329, 0x54740da3a5ed139b +330, 0xf194ddb39f29880b +331, 0x3305374f5d8ec08b +332, 0x831dd0164927ff4a +333, 0x625baa78e4458cf +334, 0x29d27dc0a4a71152 +335, 0xe227bae9a1401034 +336, 0xca0c209831846b2b +337, 0x8e8cc54b08b5a411 +338, 0x38f2b4acaac27db6 +339, 0x8ec88baac814e86b +340, 0x31c08e46b007bde +341, 0xb686c02722794c09 +342, 0xb77cf8fc682e3907 +343, 0xa56334e7f606f4b2 +344, 0x9c80b127bddd5f4f +345, 0x12df14834cd858bf +346, 0x3f14762a9cf5fb9f +347, 0x930a70941ef5779e +348, 0x64e96c849c30c080 +349, 0xfdf53bfba1300484 +350, 0xec7a9363c21bc616 +351, 0x26e9fd6a115ecb47 +352, 0x9707a84b5bc77fbb +353, 0xb23b2737b20d5903 +354, 0x22f4825ae80f6501 +355, 0x500644b12be6a01b +356, 0xb746645b2af082db +357, 0xe6af051f697892f8 +358, 0x577c724248a1cfc6 +359, 0x3d2b6a434c84eed3 +360, 0xd260f5efd7328314 +361, 0x95c16cc84bb3f55c +362, 0x7a01b2e4e0e80ca7 +363, 0x41930c3ce70a0935 +364, 0x1299bccf39d4e110 +365, 0x494883ba1a8a87f +366, 0x9478ecfe2d918e60 +367, 0x30ec9a5670cda8af +368, 0xf9bc877e833e2b99 +369, 0x1b83a0acfbb4a8db +370, 0x73bc1740c0d18880 +371, 0x65086ca9773cb3e1 +372, 0x3b78c3ccd63cff2e +373, 0xbfae748795acfb31 +374, 0xa4c9d5d56a15ba20 +375, 0xb9cb41721e52b71e +376, 0x1532f15d4dc47748 +377, 0x5a4d647a4b9ee632 +378, 0x8513c7c5a50898d9 +379, 0x6d3d98ccd5461b2e +380, 0xa65e99be2fe98d6 +381, 0x31abc8855334a0e5 +382, 0xf1ed22a661dca5b8 +383, 0x299e2b63229e03be +384, 0xda201a06687bce48 +385, 0xd27794b302142c55 +386, 0x642bd3e1c7898a9d +387, 0x777f1ff00afa1a87 +388, 0xd2f1c84fb3877baa +389, 0xae417583289191fd +390, 0xd641f1d88e0e2d55 +391, 0xc1f1d98fb5d18ebf +392, 0xb0f72aecdadce97b +393, 0xe9b8abc764f6018a +394, 0xd2a37cff8e890594 +395, 0x2dd70d631a528771 +396, 0xbf8ba0478c18e336 +397, 0x1630bf47f372ce0a +398, 0x6d04ea20dc3f46b8 +399, 0x6591881bf34337f2 +400, 0x33c149c7eb5b4103 +401, 0xf01a8c9857c86748 +402, 0x184348cdfc16d215 +403, 0x141168b253d2ed7 +404, 0x52aaf012ef50a6f1 +405, 0xfda1722387e16f4c +406, 0x43c30f57d6c038fa +407, 0xd4a8611f5f96d214 +408, 0x2c512ce17e987f2c +409, 0x961ce450f0fa2822 +410, 0xf55a506ec6cea9cd +411, 0xb76d694d9c7f5ef6 +412, 0xfb029216dbd8e988 +413, 0x93162501896a0081 +414, 0xfbbbd2c5ab300f5c +415, 0xd648b6da7387d491 +416, 0xc73b4697471d9d98 +417, 0xe37412bf1c93ee76 +418, 0xa1a96d96570e6637 +419, 0x5b3ab4f82428f65c +420, 0x873d849b188aa36f +421, 0x39fbee0ffc9fa9ff +422, 0xc70d21b744d677fe +423, 0x2b8a43c23043d209 +424, 0x93c33eaa37370d16 +425, 0x8930ac1880f2b0ef +426, 0xac01d27707036af0 +427, 0xc2af3fee504343a0 +428, 0x1c1dae2ad5535d97 +429, 0x9ffc21804b76a480 +430, 0x69f903412cc13563 +431, 0x9d3c4e2759a0c47d +432, 0xb1a8f894be6302b9 +433, 0x95e1fd7951479506 +434, 0xbb9e6c03cd4ae8e3 +435, 0x85206010c9b737cf +436, 0x767e813694d6238c +437, 0x4969af329ccbb30a +438, 0x3aa9af1075aaea5c +439, 0xb1ff519e8118a993 +440, 0xb21a23a3c91180fe +441, 0x320b24582ca3fd88 +442, 0xf8ca56415fb4e453 +443, 0xabd0899c07205e77 +444, 0x87fdc7a44b4ad50f +445, 0xd75744911641a278 +446, 0x7c8c9a65df6fcb95 +447, 0x79d785e3c7a5b695 +448, 0x421e4565ba1f592f +449, 0x27f87eb2517835cf +450, 0xb62cc4297441c83e +451, 0xd817a80ac815ca6d +452, 0xad84388130df2aa8 +453, 0x5e6b1640452d6ac8 +454, 0x936285e15edce2a3 +455, 0x903bccc4969768e8 +456, 0xefc2cb7b109d3140 +457, 0x633e9dfdda2d903a +458, 0x2a2f3225925678a1 +459, 0xe07eac91a27f8547 +460, 0xe50ced40eda78cb3 +461, 0xc5b22500e1c7441 +462, 0x32becf61bca3aa72 +463, 0xa2e37c4b30671344 +464, 0xc9f1c1910f45d544 +465, 0x9b50333b2dcdf730 +466, 0x310bfd53a1684b94 +467, 0x1e1dc21e66ac6455 +468, 0x81876c2bfb1ed5a1 +469, 0xd0c54a3e25eadc7b +470, 0x3791b6fbbd5c7ba0 +471, 0x133be57356c599fc +472, 0x8d1148eb8e83fdea +473, 0x311aedba0d8b42cc +474, 0x1142ae52745f94bb +475, 0xc5f4ab2fbde8c4a3 +476, 0xd23be827b5b24f6d +477, 0x65f95194cd122715 +478, 0x4b48969d73125922 +479, 0x46f165052b8ff988 +480, 0x5c689f94b9275ff4 +481, 0x93b03823ff2d536b +482, 0x871f3775aa4e3523 +483, 0x5af829f7cc0f66a5 +484, 0xa32e05739cbeac8c +485, 0xacff1856ddace0fe +486, 0x8eeb5e7f991a5322 +487, 0x6325c2720e0dbdea +488, 0x9fb817bc4fdf5200 +489, 0x9786f0d850e43d78 +490, 0x571f76dd7f9fb77a +491, 0x4d9e94e181cbc63f +492, 0x8bb632d3376c547a +493, 0x9cc26d9efd1c88b9 +494, 0x9c5d49579df52b0b +495, 0x6201abf7e1cda07b +496, 0x90d68f0c6c884963 +497, 0xfc5b66188ef7f561 +498, 0x6d9303cf2e0e0f95 +499, 0xd7cfcff535f5ed07 +500, 0x14d1a1228daa4ac6 +501, 0xe00ef5762f66ae50 +502, 0xf113a79471582978 +503, 0x430985281785dc7a +504, 0x31914108c206ed5 +505, 0x7ba6707b6419971c +506, 0x2ec63b033ce112e5 +507, 0xf8bcd36ced3b41e3 +508, 0xe5cf908c8010414b +509, 0xf5ee224b7c703e30 +510, 0x9a9733af0b12338b +511, 0x83e18cc00ace34f8 +512, 0xd52cff39e23008b8 +513, 0xa700578136b9c0c5 +514, 0x3fa179d32ac51f99 +515, 0xef2d5eab6d4ad380 +516, 0x709024a5abd032df +517, 0xc607c7ee349ede87 +518, 0x803d784e9731eb5f +519, 0x2ef06f4ba769282d +520, 0x4bc1dca1e9f07eb9 +521, 0x930c958a7a72f94d +522, 0x249bc8db2cc7a3bf +523, 0x3845305798f9a5d +524, 0x6f137eca9ab6f948 +525, 0xc31f5a963d31bd67 +526, 0x9d39693d5383626f +527, 0x52fb41c335a8b98e +528, 0xb79d1a29a06006ec +529, 0x7c0926a7a3eda2cc +530, 0xffdf5214406fd53e +531, 0xc6aa02a7e94282b9 +532, 0xd4a4431b4aa301ee +533, 0x4271cc0f9420d3ab +534, 0x26fccd7cc7fc2485 +535, 0x330594bb945b8d5a +536, 0x6ea8eaad12e5cb8c +537, 0x831c3467726bede3 +538, 0x31d1eb10017eaa61 +539, 0xc7aa75e41508f5cb +540, 0xde51810f0cadd0b5 +541, 0x50e5b3e73692f80b +542, 0x82107ec55636e188 +543, 0x9828ef175d843ab4 +544, 0xb8edc6a860dd421e +545, 0x25c0c138fd537ac3 +546, 0x47e72a771e8eb563 +547, 0xbb0f8c5333f4a2cc +548, 0x91750d2fb9b2d479 +549, 0xe662d8f6fe38df36 +550, 0x72a6d879fb5619f0 +551, 0x6817c7878dcbf077 +552, 0x4e7741cb484661e8 +553, 0x3b3b3ba0be5711bf +554, 0xa6989f5d25868765 +555, 0x43c276398997e4e0 +556, 0xdcbe16a94da28870 +557, 0x454936980a699c99 +558, 0xac614bfa8f0266c6 +559, 0x9174841392e213d5 +560, 0xa0e2acffc5fc9d1f +561, 0xe53a08a7a0e6521a +562, 0x2b845cf7c24172e0 +563, 0x265a4fc5f7adec0d +564, 0x1f34fbe5f1e49420 +565, 0x139181f6fb647f20 +566, 0x88c35d46e2fcd05e +567, 0x2a6d5b55903c0459 +568, 0xcea28eb621ad7bf1 +569, 0x5c9cdc13e7aaa30 +570, 0x5fe63e14746e7103 +571, 0x7923e53d73835db9 +572, 0x376e661210bf1b06 +573, 0x5b1cab85450efdd5 +574, 0x3908dc096c70b452 +575, 0x4825e303cd1f396f +576, 0xed476bfd702957c3 +577, 0x6acc013aff5db743 +578, 0x62c80b776343d488 +579, 0x9c75edcd5b012697 +580, 0xaa053362a3b9770a +581, 0xa907e236c7c07e94 +582, 0x15b2c380451692c0 +583, 0x94f79142697bd61f +584, 0xbc657d31ea98d44f +585, 0xcbaa5e52517a1f5e +586, 0x96aa2e44a7c4a03f +587, 0x216d3c66db2b515d +588, 0x157001807e3ca88a +589, 0x52b3a596bdd3859a +590, 0xed747e7fc5e3adac +591, 0x78fd765ddb2c448d +592, 0xe53dc7299ed8614e +593, 0x75ad41fb1d7a790a +594, 0xc14f6b944b0e6cb1 +595, 0x7c314b69fce3df1c +596, 0xb56d82eb740d7abc +597, 0x5132a93c41251fdb +598, 0xe3ce35bd2a82f958 +599, 0x440571a981c722f2 +600, 0x194cdfd9f186bc9 +601, 0xb89e522a5db00939 +602, 0xad35f339f68df3c8 +603, 0xa82ab18420322293 +604, 0xaffa6df9b72b27c4 +605, 0x9615694d23beaa2c +606, 0x1d82ebe563abad91 +607, 0xab50ef65fbd94385 +608, 0x1b070dbd70a9a14 +609, 0x2ececa796abbadf0 +610, 0x6bbeafe9e81ab2a2 +611, 0x60dcd0d2a9b76914 +612, 0x1e748039ef05c33f +613, 0x6d4d17f2213ccdff +614, 0x9fa56132957bc987 +615, 0x60a17185de2428eb +616, 0xb56038ddf306479c +617, 0x3b1db5df92d06d8b +618, 0x24d1bba8bdedf580 +619, 0xbfb7e6740ebaa4d9 +620, 0xab31c4473e46f61d +621, 0x6deb3cdd8fd5869f +622, 0x23032e47746d72d6 +623, 0xa9e72d734e10f2e8 +624, 0xbffd199b6157bc23 +625, 0x29f8254df273fb62 +626, 0xb076142130ee55ec +627, 0x5b0b08374126c309 +628, 0xea4536aae979521f +629, 0xc064e7abec91a174 +630, 0x46133ef80c59d935 +631, 0xf0227e2da1b14160 +632, 0x675a76641e1af5a +633, 0x2f50a069b33d198c +634, 0x3ded5a65e1d657eb +635, 0xbb6999b020694f6b +636, 0x86b2f2b33487aed7 +637, 0x76e14e85f8bfb4cf +638, 0x38f7f1e44bd4e0db +639, 0xc1a7d41b7e80d4ae +640, 0x1dfaaf80bbceb42e +641, 0x3f51c11497720c2b +642, 0xce6da1415ddb8b80 +643, 0x7377d8bcd359b5f3 +644, 0xe077208f3f810aca +645, 0x9a06a8a2dacbffce +646, 0xca1f99156b09b735 +647, 0x2ff9a93064d91451 +648, 0x50f3ea93f351a7ef +649, 0x606fceccb07054de +650, 0x7e83d6d2f8f6685d +651, 0x78f3995291c5d407 +652, 0xd28d2460e22d0228 +653, 0x2c5636f68a0054dd +654, 0xd9fafb1c56c8f6cb +655, 0xe39889b5f9d74464 +656, 0x1355372bf5db2cc1 +657, 0x26768426b9ac323 +658, 0x4af1dbdc1111fd89 +659, 0x66973587943b927f +660, 0xf86f5f50684dfb1d +661, 0x1247d574ff79b534 +662, 0xc8039f3259210fe2 +663, 0x79b573235c92a9f5 +664, 0x213f642d8450e2f0 +665, 0x5db7706973376566 +666, 0x6182c12e69b373d7 +667, 0x3e5ac47300aec07f +668, 0x4b5b6c57b1574376 +669, 0x6b7fcceefd56b17c +670, 0xf656c3455cb9d4b8 +671, 0x7577e2e13329721f +672, 0xf33c0c53ce956e8d +673, 0x7d0f328ee356174 +674, 0x10ec9a168088686e +675, 0x71ef1776d062dfa +676, 0xaa7b590a488a6bc4 +677, 0x38612b6dd8049a1c +678, 0x939045e36874f731 +679, 0xcb9d1d74c56d5ac9 +680, 0x54f1c1c8fef1d8ff +681, 0x3ee4b85c8c7e939e +682, 0xb9b4608e019f352c +683, 0x79d4701275d12e6a +684, 0x2632a2d9835c7f19 +685, 0x1662cd9fba293692 +686, 0xbcb70265115ee944 +687, 0xdc43fb9761468604 +688, 0xe3eec4e7d3871352 +689, 0x829531753226989d +690, 0x2748cc67f540e074 +691, 0x39c4af25d607837d +692, 0x741a243f4cb5df99 +693, 0xda1353287e18b49a +694, 0xa6735689d751ea74 +695, 0x46326d587340ce0b +696, 0xc18531df4550012b +697, 0x6f7901e05dd4b818 +698, 0xfb966afc4c001d63 +699, 0x6dc10fca67a9cfdb +700, 0xd6527ffadf0feaae +701, 0x3b900172045e25d +702, 0xb7dd594cdded6a46 +703, 0x6602aee7ec1599fc +704, 0x7fbf12f23747546a +705, 0x32e63f662bd2de0d +706, 0xedf47770b67ed641 +707, 0x331bef83481c5c2a +708, 0x8fc4256fdf05158c +709, 0x98eba48dabccf5e0 +710, 0xdbc2f2cdb7b1c154 +711, 0x7777755616517ad3 +712, 0xd473c147d2628ac1 +713, 0x861e15d1d760b5a7 +714, 0xf4d25926405ecb07 +715, 0xb7739c69effff86e +716, 0xe97fbafa6f96830c +717, 0xf13e8a334e8bede1 +718, 0xcd60010cba4ee4f9 +719, 0x1f537ac2b82e6008 +720, 0x1fda8d781a89140a +721, 0x9dc204f3f4a463f0 +722, 0x456dcd18eb56a1ab +723, 0x629957bc87bd16a1 +724, 0x2c8000ddb8c75253 +725, 0xc31dae9ec8449284 +726, 0xdac05c8baa2b691a +727, 0x21ff7be9ffa3e7ac +728, 0x844f4b5ed4ee08d0 +729, 0x651f913fd636c994 +730, 0xca3e71a2110b2d49 +731, 0x7709bc42253ed09d +732, 0xbb164d45b6569d43 +733, 0x90ec2f040c20a112 +734, 0xfa6e77e9166f5be4 +735, 0x6b6d12c1842d587d +736, 0xfcd7ff8466e25e2a +737, 0x6a5a2ed8bd971297 +738, 0x2ec35f6bba5adcbc +739, 0xc83676e16651249a +740, 0x458f6064cefe10ba +741, 0x90d54d527e6cd028 +742, 0xa5613e88db27c388 +743, 0x331e0c7d85aa1abc +744, 0x8cee4977e210358 +745, 0xfcae379aa6cbff8e +746, 0xd1407afc97a57e86 +747, 0x1fab25c864f094ae +748, 0xd914864a63004552 +749, 0x4214d226a20f1384 +750, 0x3f4e0d80c488b715 +751, 0xc5ca2f654024b7c8 +752, 0xc1e27a124e7c821c +753, 0xd890a915ffc7918c +754, 0x22fba040ce51a9f8 +755, 0xbf61cebd8891617a +756, 0x7846609ee228e319 +757, 0x536d1854375509b8 +758, 0xbbfb45fc6e666f50 +759, 0xd85b4c0527f9d7d6 +760, 0x528cc9c7fa2a84c8 +761, 0x27a1baece647f2cb +762, 0xfddf0cb92fe09dc3 +763, 0xeb5008fe965d8d96 +764, 0x4a3307937eb2e5c8 +765, 0xd07d74c240c6c363 +766, 0x16f62290179d1bbf +767, 0xe99c9bcc9cb1ece7 +768, 0xc64f9be03c8a93be +769, 0x32659effaf666c1f +770, 0x4bb228cfb30b6672 +771, 0x98764870842068a5 +772, 0x5b12ef2d2cd8bdcc +773, 0xbc79d1c1b41f28b8 +774, 0x97a517cf3279fc9a +775, 0x34ffd46c1d4d6025 +776, 0x9c302307ee25c8f0 +777, 0x399604eed1f18a8 +778, 0x1c9b813c2043142a +779, 0x2944ea5e55267fe9 +780, 0x5a8a9f5e728ea667 +781, 0x30c8440adb804a0 +782, 0xee0e6b627099a937 +783, 0x3d50757ada3c52da +784, 0x4548916b32c813ab +785, 0x602a186fe5bf109b +786, 0xf0d440a2227ba304 +787, 0x5a10d4e0ca9ea32b +788, 0x6e5eb90da13ba64c +789, 0x4c6af8fd04241ab2 +790, 0xf9eb31d26e093006 +791, 0x5d674878839fe3ea +792, 0x1562b55b2484e47c +793, 0xa87188c099c1cb61 +794, 0xb7736b8aa02a3392 +795, 0x5f4b301125abb20f +796, 0x361d566984637f44 +797, 0x68c4b3feac8bd0c3 +798, 0x7066c634dd2503c1 +799, 0xfecbf7c9441eb6ea +800, 0xdbc26ae0fc81436b +801, 0x9ef3e2b48252e7a4 +802, 0x31a49b4c339b37c7 +803, 0xb01b2a83cf346cf4 +804, 0xc24dc2347f82fbe3 +805, 0x134cad272dcd410f +806, 0x61260742823ba59c +807, 0x53ac4c193a97c730 +808, 0x9207c9833af34b52 +809, 0xa72e7ee77078d1f5 +810, 0x2e6f6e1b05936885 +811, 0x783b99ce5dbf9464 +812, 0xfdfeb6f0d027bb44 +813, 0x40eeb27096f92b0 +814, 0x5ef96ff5d4a4521f +815, 0x5595806ae873718a +816, 0x67d449eecf4ca1c3 +817, 0xde837ab611364f3f +818, 0x7034c24d2b139be9 +819, 0xe21166603e0a9c86 +820, 0x935694435c1f0d51 +821, 0x6cb3bec90c126088 +822, 0x4096ef662b7a9f89 +823, 0xd2d85b8d238d8c15 +824, 0xa4ea533ce3ec59b2 +825, 0x3654729d80a2db29 +826, 0x214c4cc3906d29d4 +827, 0x201c447e7588e373 +828, 0xe8b8f0ae25f683eb +829, 0x6744aaf5754e38af +830, 0xd1ffb10d6f27a061 +831, 0xe536733a7b3a6c30 +832, 0x39f0f66e47cbf2c9 +833, 0x856a9593526fde2 +834, 0x2e2a817a0098ea4b +835, 0xc5e1eeb551a0e3d3 +836, 0x3f21e2f5e2d50b2 +837, 0x906af56c66dd9f8c +838, 0x30f6dbd70329fac8 +839, 0xc443dfddf3c01a60 +840, 0x7ab85d9aa9675470 +841, 0x8c9080bd39717bfc +842, 0x4b1ccdb3c3597f6f +843, 0x74e2542d70ab5d67 +844, 0xbb3d236aad00f74 +845, 0xcf3cadf9a2804774 +846, 0xe851d9750e42bd07 +847, 0xc0ad82029b1c371f +848, 0x7ee119eb552d6c07 +849, 0xd8024049bd1d784a +850, 0xfa67a899760363 +851, 0xaa7c2f438b178197 +852, 0xc473674a47ffe064 +853, 0x539fbe3fc674c270 +854, 0xdb48484748a76f3b +855, 0xc73b2b092060d +856, 0xa1d2a15345016f5d +857, 0x4d0fe8599f9bba47 +858, 0xa0edc275e6f8f1d1 +859, 0x40590a8655bc8d72 +860, 0x35b4223161f05f75 +861, 0xa04c0c0f616752dc +862, 0x7f371ed2ca45432d +863, 0x2ff1a08f75ac6438 +864, 0xe2dc5c3682282f48 +865, 0xe1e4179fa98d9013 +866, 0x8cb083d6843a73d5 +867, 0xb4c2b5921b706854 +868, 0x738e14c0e7352445 +869, 0xcd2b646f91afd8c7 +870, 0xd5779a5b57a264fd +871, 0xc39ff855586c7d07 +872, 0x3e3f0098c631a859 +873, 0x644e02fae032110 +874, 0xa8834613c0a45278 +875, 0x69482f2c08e10657 +876, 0xe4ee475bdb87e69a +877, 0xdc1ef7b25c0d0019 +878, 0x88a3fa2be18d8744 +879, 0x60a02e0b21c5bec7 +880, 0xb6867b88aa19bc1a +881, 0xb599409affcf10eb +882, 0xaeaa1778a5e59daa +883, 0xd7a91a52c16663e3 +884, 0x93cb269affe07b1c +885, 0x841b6ced3a4ba815 +886, 0x84541768e1540a5c +887, 0xe3943c84f83b3020 +888, 0x5de366fbd7b45258 +889, 0xd787cc3bde91a661 +890, 0x814071446edecb57 +891, 0x15d8c602a1141514 +892, 0x72f07bc8002d1d0d +893, 0x4a8bd8dc9a1f0f3e +894, 0x8723796ae0f20d35 +895, 0xda7283c2051f73b2 +896, 0x2df0cc247f90bd3b +897, 0x79a8522b968f990a +898, 0x951ede190c8b9d02 +899, 0xc512f1a5b14b018a +900, 0xf0e3ddc03b9a4259 +901, 0x8cf4a35ad312e15f +902, 0xebef28926b11094b +903, 0x5628ba687325921c +904, 0xc3aa75e57edc49c3 +905, 0xc38382fa98e762ba +906, 0x8d209e896285848e +907, 0x2c7d6adf592b4a3e +908, 0x62de48e36f8338f3 +909, 0x4a752741e00de30e +910, 0xf7855b70f1f6ec2b +911, 0xa505fa4428199e43 +912, 0xe8b6b423b826bbac +913, 0x4bd1206cf8786d05 +914, 0x6dcf040391fe3bf4 +915, 0x913f500f87e1bba3 +916, 0x5acf775aa180a5d5 +917, 0x74dd28d9432ce739 +918, 0x996c2ff2f0dc2495 +919, 0x73dbfe6c56effe4 +920, 0x56fddd25196f5e40 +921, 0xe87810158f5b7 +922, 0x7b8795e996383f1f +923, 0x9ba5ee7c777c4c82 +924, 0x17ce3908d270fe1c +925, 0x3df9e613c1aedfae +926, 0xcdd26871b32fc8e1 +927, 0xd71cb13afc633979 +928, 0x63427c8ea9b1c79e +929, 0xd070f7664d3b405d +930, 0x46f2a9e32d9fb769 +931, 0xb4c3822a45e9fe9b +932, 0x8ba30b97fe6f5ec7 +933, 0x70aa554ee2fc11f9 +934, 0xa80c99dbe0cfcfaf +935, 0x36d9250cb2d68ed +936, 0x2995e4b9e1cd1db4 +937, 0x4b3803ba57fc570f +938, 0xae3959e7d740eaa5 +939, 0xb4cbd6662adbae08 +940, 0xae46576446e8dbc4 +941, 0xc4828e008a9a8a54 +942, 0x145d7db8e6554b2f +943, 0x1b1b8916a730c371 +944, 0xdaf84b2bebe31963 +945, 0x5b59b80ef23a2403 +946, 0x9180c7e89cab6fd3 +947, 0x80e58f5411babf34 +948, 0xa06cf55185b9b005 +949, 0x13b2c798424173ad +950, 0xc510f8e706311d49 +951, 0x1f974b83b6046d3a +952, 0xae6e8e85e822d1c3 +953, 0x66f2c8dc3274a31a +954, 0x7e04dbcbf65bd377 +955, 0xabf41ede01ec20a4 +956, 0x5efa0948f6bbb2ea +957, 0xbc91c99d8592255 +958, 0xf6d6917911d86d75 +959, 0x85ce273d54e9097a +960, 0xbdfd30f2420fff92 +961, 0x8802f02f610b537c +962, 0xd1d70037ed543229 +963, 0x908aaf97f9693a46 +964, 0x1f6cfeaa0834d53a +965, 0xa453fd1648ce04d2 +966, 0x2c38bb85ebc64af9 +967, 0xd2daff551c90c4f8 +968, 0xae5a0d949797d784 +969, 0xf0974c8552ac9593 +970, 0xa10b70499f65c693 +971, 0x39a449ebd594ddff +972, 0x8ea090f2b17b9b49 +973, 0xc592de318090fd83 +974, 0xb63e4fbc467b6912 +975, 0x57a0c1c5ce0e4dcc +976, 0xa7c517cf3d436b35 +977, 0xef6dcb0f3fad038b +978, 0xaf4fb60315b91287 +979, 0x5e0776f67304f331 +980, 0xe927753b8e6f7932 +981, 0xd3df2dd92559e304 +982, 0xdaed52aa6af44413 +983, 0x1b59f4dac1e181f8 +984, 0x4a73c2293877ef39 +985, 0xca45d0d015fe44de +986, 0x4659c8b7853735a8 +987, 0x12de6466bdf8adeb +988, 0xaeea857a09bfec15 +989, 0xcc9cf4b3c0b88a23 +990, 0xa44ae52396a5e1bf +991, 0x5847a724305d137f +992, 0x8f4d4de223956182 +993, 0x58254dfada867a8 +994, 0x900a98222c2f339e +995, 0xdb575260935d51d5 +996, 0x13fb4bfbbc0d7b53 +997, 0x62213850186bb92b +998, 0x2a34823312c00388 +999, 0x6148329042f743b0 diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv new file mode 100644 index 0000000000000000000000000000000000000000..c4a091210175e916a041b1cedf144577172d56bd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xdf1ddcf1e22521fe +1, 0xc71b2f9c706cf151 +2, 0x6922a8cc24ad96b2 +3, 0x82738c549beccc30 +4, 0x5e8415cdb1f17580 +5, 0x64c54ad0c09cb43 +6, 0x361a17a607dce278 +7, 0x4346f6afb7acad68 +8, 0x6e9f14d4f6398d6b +9, 0xf818d4343f8ed822 +10, 0x6327647daf508ed6 +11, 0xe1d1dbe5496a262a +12, 0xfc081e619076b2e0 +13, 0x37126563a956ab1 +14, 0x8bb46e155db16b9 +15, 0x56449f006c9f3fb4 +16, 0x34a9273550941803 +17, 0x5b4df62660f99462 +18, 0xb8665cad532e3018 +19, 0x72fc3e5f7f84216a +20, 0x71d3c47f6fd59939 +21, 0xfd4218afa1de463b +22, 0xc84054c78e0a9a71 +23, 0xae59034726be61a8 +24, 0xa6a5f21de983654d +25, 0x3b633acf572009da +26, 0x6a0884f347ab54c8 +27, 0x7a907ebe9adcab50 +28, 0xbe779be53d7b8d4a +29, 0xf5976e8c69b9dcd1 +30, 0x1d8302f114699e11 +31, 0x7d37e43042c038a0 +32, 0x2cc1d4edc2a40f35 +33, 0x83e3347bb2d581f1 +34, 0x253f8698651a844d +35, 0x4312dea0dd4e32f6 +36, 0x10f106439964ea3a +37, 0x810eb374844868cc +38, 0x366342a54b1978cc +39, 0x9fb39b13aaddfb5e +40, 0xdb91fd0d9482bed7 +41, 0x89f6ea4ca9c68204 +42, 0x146b31ccca461792 +43, 0x203fd9724deb2486 +44, 0x58a84f23748e25cb +45, 0x2f20eb6aeb94e88 +46, 0x14d3581460e473c +47, 0xad5bd0d25f37d047 +48, 0x1cf88fa16de258b2 +49, 0x3bcab6485b7a341 +50, 0xb2433b37f227d90c +51, 0x2cffd7e0a8360cc8 +52, 0x5d2eeff7c9ebc847 +53, 0x6fd7c7ae23f9f64b +54, 0x381650b2d00f175d +55, 0x9d93edcedc873cae +56, 0x56e369a033d4cb49 +57, 0x7547997116a3bac +58, 0x11debaa897fd4665 +59, 0xdf799d2b73bd6fb8 +60, 0x3747d299c66624d +61, 0xac9346701afd0cfa +62, 0xac90e150fa13c7bf +63, 0x85c56ad2248c2871 +64, 0xdea66bf35c45f195 +65, 0x59cf910ea079fb74 +66, 0x2f841bb782274586 +67, 0x9814df4384d92bd9 +68, 0x15bc70824be09925 +69, 0x16d4d0524c0503a3 +70, 0xf04ea249135c0cc7 +71, 0xa707ab509b7e3032 +72, 0x465459efa869e372 +73, 0x64cbf70a783fab67 +74, 0x36b3541a14ca8ed7 +75, 0x9a4dfae8f4c596bf +76, 0x11d9a04224281be3 +77, 0xe09bbe6d5e98ec32 +78, 0xa6c60d908973aa0d +79, 0x7c524c57dd5915c8 +80, 0xa810c170b27f1fdc +81, 0xce5d409819621583 +82, 0xfe2ee3d5332a3525 +83, 0x162fb7c8b32045eb +84, 0x4a3327156b0b2d83 +85, 0x808d0282f971064 +86, 0x2e6f04cf5ed27e60 +87, 0xaf6800699cca67a9 +88, 0xc7590aae7244c3bf +89, 0x7824345f4713f5f9 +90, 0x8f713505f8fd059b +91, 0x3d5b5b9bb6b1e80e +92, 0x8674f45e5dc40d79 +93, 0xcb1e36846aa14773 +94, 0xe0ae45b2b9b778c1 +95, 0xd7254ce931eefcfb +96, 0xef34e15e4f55ac0a +97, 0xf17cc0ba15a99bc4 +98, 0x77bb0f7ffe7b31f1 +99, 0x6ee86438d2e71d38 +100, 0x584890f86829a455 +101, 0x7baf0d8d30ba70fe +102, 0xb1ac8f326b8403ae +103, 0xcc1963435c874ba7 +104, 0x9c483b953d1334ce +105, 0xc0924bcbf3e10941 +106, 0x21bcc581558717b1 +107, 0x2c5ad1623f8d292b +108, 0xa8ea110f6124557e +109, 0x15f24a6c5c4c591 +110, 0x40fe0d9cd7629126 +111, 0xcfe8f2b3b081484d +112, 0x891383f4b4cac284 +113, 0x76f2fcdef7fa845 +114, 0x4edd12133aed0584 +115, 0xd53c06d12308873d +116, 0xf7f22882c17f86bf +117, 0xfbaa4aad72f35e10 +118, 0x627610da2e3c0cc3 +119, 0x582b16a143634d9a +120, 0x9b4a7f69ed38f4a0 +121, 0x2df694974d1e1cbe +122, 0xe5be6eaafed5d4b +123, 0xc48e2a288ad6605e +124, 0xbcb088149ce27c2b +125, 0x3cb6a7fb06ceecbe +126, 0x516735fff3b9e3ac +127, 0x5cbafc551ee5008d +128, 0xee27d1ab855c5fd5 +129, 0xc99fb341f6baf846 +130, 0x7ad8891b92058e6d +131, 0xf50310d03c1ac6c7 +132, 0x947e281d998cbd3e +133, 0x1d4d94a93824fe80 +134, 0x5568b77289e7ee73 +135, 0x7d82d1b2b41e3c8b +136, 0x1af462c7abc787b +137, 0xcfd8dfe80bfae1ef +138, 0xd314caeb723a63ea +139, 0x1c63ddcfc1145429 +140, 0x3801b7cc6cbf2437 +141, 0xc327d5b9fdafddd3 +142, 0xe140278430ca3c78 +143, 0x4d0345a685cb6ef8 +144, 0x47640dc86e261ff9 +145, 0xab817f158523ebf4 +146, 0x37c51e35fbe65a6b +147, 0xab090f475d30a178 +148, 0x4d3ec225bf599fc1 +149, 0xefd517b0041679b1 +150, 0x20ad50bca4da32c5 +151, 0x75e1f7cd07fad86d +152, 0x348cf781ee655f4b +153, 0x9375f0e5ffc2d2ec +154, 0x7689082fd5f7279c +155, 0x633e56f763561e77 +156, 0x9d1752d70861f9fd +157, 0xa3c994b4e70b0b0f +158, 0xabf7276a58701b88 +159, 0xbfa18d1a0540d000 +160, 0xc6a28a2475646d26 +161, 0x7cdf108583f65085 +162, 0x82dcefb9f32104be +163, 0xc6baadd0adc6b446 +164, 0x7a63cff01075b1b4 +165, 0x67ac62e575c89919 +166, 0x96fa4320a0942035 +167, 0xc4658859385b325f +168, 0xde22c17ff47808f6 +169, 0xbb952c4d89e2f2ec +170, 0x638251fbc55bdc37 +171, 0x38918b307a03b3ea +172, 0xccb60f2cedbb570b +173, 0x3c06f4086a28f012 +174, 0x4e8d238388986e33 +175, 0x1760b7793514a143 +176, 0xa3f924efe49ee7d6 +177, 0xaf6be2dbaebc0bdf +178, 0x6782682090dffe09 +179, 0xb63a4d90d848e8ef +180, 0x5f649c7eaf4c54c5 +181, 0xbe57582426a085ba +182, 0xb5dd825aa52fb76d +183, 0x74cb4e6ca4039617 +184, 0x382e578bf0a49588 +185, 0xc043e8ea6e1dcdae +186, 0xf902addd5c04fa7c +187, 0xf3337994612528db +188, 0x4e8fd48d6d15b4e6 +189, 0x7190a509927c07ab +190, 0x864c2dee5b7108ae +191, 0xbb9972ddc196f467 +192, 0x1ea02ab3ca10a448 +193, 0xe50a8ffde35ddef9 +194, 0x7bd2f59a67183541 +195, 0x5a940b30d8fcd27a +196, 0x82b4cea62623d4d3 +197, 0x6fbda76d4afef445 +198, 0x8b1f6880f418328e +199, 0x8b69a025c72c54b7 +200, 0xb71e0f3986a3835f +201, 0xa4a7ddb8b9816825 +202, 0x945dcda28228b1d8 +203, 0xb471abf2f8044d72 +204, 0xf07d4af64742b1ba +205, 0xfca5190bc4dd6a2a +206, 0xd681497262e11bc5 +207, 0xbe95d5f00c577028 +208, 0x56313439fd8bde19 +209, 0x3f3d9ac9b5ee6522 +210, 0x7b8d457dd2b49bbe +211, 0xe76b5747885d214b +212, 0xa8a695b3deb493ea +213, 0x5292446548c95d71 +214, 0xbf5cdf0d436412df +215, 0x7936abaed779d28d +216, 0x659c6e8073b3a06d +217, 0x86c9ff28f5543b71 +218, 0x6faa748445a99146 +219, 0xdcc1e6ab57904fd7 +220, 0x770bd61233addc5f +221, 0x16963e041e46d94f +222, 0x158e6cb2934157ac +223, 0xb65088a8fd246441 +224, 0x2b12ced6ce8a68c3 +225, 0x59a18d02cd6082b3 +226, 0x4ddbc318cb5488ee +227, 0x3d4cf520b3ed20a1 +228, 0x7028b3a92e2b292d +229, 0xf141da264a250e4d +230, 0x9788d53e86041c37 +231, 0x1bb91238a7c97dbf +232, 0x81953d0ddb634309 +233, 0xfa39ccfe14d2d46 +234, 0xf7c7861c9b7e8399 +235, 0x18d27ca50d9dc249 +236, 0x258dfdf38510d0d9 +237, 0x9e72d8af910ea76f +238, 0x4f8ef24b96de50ad +239, 0xb9d9c12297e03dc9 +240, 0x91994e41b4a1929c +241, 0x8defa79b2ccc83b9 +242, 0x948566748706dac5 +243, 0x7b0454946e70e4cf +244, 0x340b7cb298c70ed7 +245, 0x6602005330cebd95 +246, 0xf71cb803aa61f722 +247, 0x4683fb07fc70ae8a +248, 0xc6db9f0c4de3ed88 +249, 0x3e8dfae2a593cef9 +250, 0x615f7c38e3862b33 +251, 0x676c7996550d857 +252, 0xc6d520d54a5c266a +253, 0x202b1e8eef14aa2e +254, 0xa3a84891a27a582 +255, 0x84dbee451658d47f +256, 0x254c7cd97e777e3a +257, 0xf50b6e977f0eba50 +258, 0x2898b1d3062a4798 +259, 0x4096f7cbbb019773 +260, 0x9fb8e75548062c50 +261, 0x4647071e5ca318ec +262, 0x2b4750bdb3b3b01 +263, 0x88ac41cc69a39786 +264, 0x705e25476ef46fa3 +265, 0xc0c1db19884a48a6 +266, 0x1364c0afdbb465e5 +267, 0x58e98534701272a6 +268, 0x746a5ea9701517c0 +269, 0x523a70bc6b300b67 +270, 0x9b1c098eda8564ad +271, 0xfbaeb28d3637067f +272, 0xddd9a13551fdba65 +273, 0x56461a670559e832 +274, 0xab4fd79be85570ad +275, 0xd4b691ecaff8ca55 +276, 0x11a4495939e7f004 +277, 0x40d069d19477eb47 +278, 0xe790783d285cd81e +279, 0xde8218b16d935bc7 +280, 0x2635e8c65cd4182d +281, 0xeae402623e3454 +282, 0x9f99c833184e0279 +283, 0x3d0f79a0d52d84e7 +284, 0xc1f8edb10c625b90 +285, 0x9b4546363d1f0489 +286, 0x98d86d0b1212a282 +287, 0x386b53863161200d +288, 0xbe1165c7fe48a135 +289, 0xb9658b04dbbfdc8c +290, 0xcea14eddfe84d71a +291, 0x55d03298be74abe7 +292, 0x5be3b50d961ffd7e +293, 0xc76b1045dc4b78e1 +294, 0x7830e3ff3f6c3d4c +295, 0xb617adb36ca3729 +296, 0x4a51bdb194f14aa9 +297, 0x246024e54e6b682a +298, 0x33d42fc9c6d33083 +299, 0xadccba149f31e1d +300, 0x5183e66b9002f8b +301, 0x70eb2416404d51b7 +302, 0x26c25eb225535351 +303, 0xbc2d5b0d23076561 +304, 0x5823019ddead1da +305, 0x85cfa109fca69f62 +306, 0x26017933e7e1efd9 +307, 0x3ec7be9a32212753 +308, 0x697e8a0697cd6f60 +309, 0x44735f6cca03920f +310, 0x8cc655eb94ee212e +311, 0x8b8b74eba84929a0 +312, 0x7708ccedd0c98c80 +313, 0x1b6f21f19777cbe1 +314, 0x363e564bd5fadedb +315, 0x5921543a641591fe +316, 0xc390786d68ea8a1b +317, 0x9b293138dc033fca +318, 0x45447ca8dc843345 +319, 0xee6ef6755bc49c5e +320, 0x70a3a1f5163c3be5 +321, 0xf05e25448b6343b0 +322, 0x4739f4f8717b7e69 +323, 0xb006141975bf957 +324, 0x31874a91b707f452 +325, 0x3a07f2c90bae2869 +326, 0xb73dae5499a55c5e +327, 0x489070893bb51575 +328, 0x7129acf423940575 +329, 0x38c41f4b90130972 +330, 0xc5260ca65f5a84a1 +331, 0x6e76194f39563932 +332, 0x62ca1f9ca3de3ca6 +333, 0xb4a97874e640853f +334, 0x38ed0f71e311cc02 +335, 0xde183b81099e8f47 +336, 0x9bb8bf8e6694346 +337, 0xd15497b6bf81e0f2 +338, 0xaaae52536c00111 +339, 0x4e4e60d1435aaafd +340, 0x5a15512e5d6ea721 +341, 0xff0f1ffabfc6664f +342, 0xba3ffcedc5f97fec +343, 0xef87f391c0c6bfb6 +344, 0x4a888c5d31eb0f98 +345, 0x559a3fbfd7946e95 +346, 0xe45b44a0db5a9bad +347, 0x9457898964190af1 +348, 0xd9357dfaab76cd9e +349, 0xa60e907178d965a1 +350, 0x76b2dc3032dc2f4a +351, 0x13549b9c2802120 +352, 0x8656b965a66a1800 +353, 0x16802e6e22456a23 +354, 0x23b62edc60efaa9 +355, 0x6832a366e1e4ea3b +356, 0x46b1b41093ff2b1e +357, 0x55c857128143f219 +358, 0x7fc35ddf5e138200 +359, 0x790abe78be67467e +360, 0xa4446fc08babd466 +361, 0xc23d70327999b855 +362, 0x2e019d1597148196 +363, 0xfefd98e560403ab8 +364, 0xbe5f0a33da330d58 +365, 0x3078a4e9d43ca395 +366, 0x511bfedd6f12f2b3 +367, 0x8bc138e335be987c +368, 0x24640f803465716d +369, 0xf6530b04d0bd618f +370, 0x9b7833e5aa782716 +371, 0x778cd35aea5841b1 +372, 0xecea3c458cefbc60 +373, 0x5107ae83fc527f46 +374, 0x278ad83d44bd2d1a +375, 0x7014a382295aeb16 +376, 0xf326dd762048743f +377, 0x858633d56279e553 +378, 0x76408154085f01bc +379, 0x3e77d3364d02e746 +380, 0x2f26cea26cadd50b +381, 0x6d6846a4ecb84273 +382, 0x4847e96f2df5f76 +383, 0x5a8610f46e13ff61 +384, 0x4e7a7cac403e10dd +385, 0x754bdf2e20c7bc90 +386, 0x8bdd80e6c51bd0be +387, 0x61c655fae2b4bc52 +388, 0x60873ef48e3d2f03 +389, 0x9d7d8d3698a0b4a4 +390, 0xdf48e9c355cd5d4b +391, 0x69ecf03e20be99ac +392, 0xc1a0c5a339bd1815 +393, 0x2e3263a6a3adccb +394, 0x23557459719adbdc +395, 0xd1b709a3b330e5a +396, 0xade5ab00a5d88b9d +397, 0x69a6bd644120cfad +398, 0x40187ecceee92342 +399, 0x1c41964ba1ac78da +400, 0x9ac5c51cbecabe67 +401, 0xbdc075781cf36d55 +402, 0xeaf5a32246ded56 +403, 0xcda0b67e39c0fb71 +404, 0x4839ee456ef7cc95 +405, 0xf17092fdd41d5658 +406, 0x2b5d422e60ae3253 +407, 0x3effe71102008551 +408, 0x20a47108e83934b7 +409, 0xd02da65fe768a88f +410, 0xeb046bd56afa4026 +411, 0x70c0509c08e0fbe0 +412, 0x1d35c38d4f8bac6c +413, 0x9aa8eb6466f392e0 +414, 0x587bd4a430740f30 +415, 0x82978fe4bad4195 +416, 0xdc4ebc4c0feb50ab +417, 0xd3b7164d0240c06f +418, 0x6e2ad6e5a5003a63 +419, 0xa24b430e2ee6b59c +420, 0x2905f49fd5073094 +421, 0x5f209e4de03aa941 +422, 0x57b7da3e0bedb1dc +423, 0x5e054018875b01f5 +424, 0xb2f2da6145658db3 +425, 0xbd9c94a69a8eb651 +426, 0x9c5f9a07cd6ac749 +427, 0x2296c4af4d529c38 +428, 0x522ed800fafdefab +429, 0xe2a447ced0c66791 +430, 0x937f10d45e455fef +431, 0xc882987d9e29a24 +432, 0x4610bfd6a247ee1a +433, 0x562ba3e50870059 +434, 0x59d8d58793602189 +435, 0xfe9a606e3e34abe +436, 0x6825f7932a5e9282 +437, 0xe77f7061bab476ad +438, 0xbf42001da340ace3 +439, 0x9c3e9230f5e47960 +440, 0x2c0f700d96d5ad58 +441, 0x330048b7cd18f1f9 +442, 0xffc08785eca5cca9 +443, 0xb5879046915f07a5 +444, 0xef51fe26f83c988e +445, 0xfa4c2968e7881a9a +446, 0xc0a9744455a4aad +447, 0xbd2ad686d6313928 +448, 0x6b9f0984c127682a +449, 0xc9aaa00a5da59ed8 +450, 0x762a0c4b98980dbf +451, 0x52d1a2393d3ca2d1 +452, 0x1e9308f2861db15c +453, 0xe7b3c74fe4b4a844 +454, 0x485e15704a7fc594 +455, 0x9e7f67ea44c221f6 +456, 0xbab9ad47fde916e0 +457, 0x50e383912b7fc1f4 +458, 0xaad63db8abcef62d +459, 0xc2f0c5699f47f013 +460, 0xee15b36ada826812 +461, 0x2a1b1cf1e1777142 +462, 0x8adb03ede79e937d +463, 0xf14105ef65643bf3 +464, 0x752bbaefc374a3c7 +465, 0xa4980a08a5a21d23 +466, 0x418a1c05194b2db7 +467, 0xdd6ff32efe1c3cd6 +468, 0x272473ed1f0d3aa2 +469, 0x1e7fdebadabe6c06 +470, 0xd1baa90c17b3842f +471, 0xd3d3a778e9c8404a +472, 0x781ae7fda49fa1a0 +473, 0x61c44fdbdacc672d +474, 0x6d447d0a1404f257 +475, 0x9303e8bdfbfb894d +476, 0x3b3482cdec016244 +477, 0xb149bf245d062e7b +478, 0x96f8d54b14cf992d +479, 0x4741549a01f8c3d0 +480, 0x48270811b2992af +481, 0x7b58f175cd25d147 +482, 0x8f19a840b56f4be9 +483, 0x84a77f43c0951a93 +484, 0x34e1a69381f0c374 +485, 0xb158383c9b4040f +486, 0x372f1abc7cf3a9fa +487, 0x5439819a84571763 +488, 0xabf8515e9084e2fa +489, 0xb02312b9387ff99 +490, 0x238a85bb47a68b12 +491, 0x2068cb83857c49bb +492, 0xc6170e743083664c +493, 0x745cf8470bcb8467 +494, 0xe3a759a301670300 +495, 0x292c7686ad3e67da +496, 0x359efedaff192a45 +497, 0x511f2c31a2d8c475 +498, 0x97fd041bf21c20b3 +499, 0x25ef1fe841b7b3f6 +500, 0xbb71739e656f262d +501, 0x2729b0e989b6b7b8 +502, 0xd2142702ec7dbabf +503, 0x7008decd2488ee3f +504, 0x69daa95e303298d7 +505, 0xc35eca4efb8baa5a +506, 0xf3f16d261cec3b6c +507, 0x22371c1d75396bd3 +508, 0x7aefa08eccae857e +509, 0x255b493c5e3c2a2f +510, 0x779474a077d34241 +511, 0x5199c42686bea241 +512, 0x16c83931e293b8d3 +513, 0xa57fe8db8c0302c7 +514, 0xd7ace619e5312eb1 +515, 0x8740f013306d217c +516, 0xb6a1ad5e29f4d453 +517, 0x31abf7c964688597 +518, 0xbc3d791daed71e7 +519, 0x31ee4ca67b7056ed +520, 0x1ab5416bfe290ea3 +521, 0x93db416f6d3b843a +522, 0xed83bbe5b1dd2fed +523, 0xece38271470d9b6d +524, 0x3a620f42663cd8ae +525, 0x50c87e02acafee5d +526, 0xcabeb8bedbc6dab5 +527, 0x2880a6d09970c729 +528, 0x4aba5dd3bfc81bc +529, 0xaba54edf41080cec +530, 0xb86bb916fc85a169 +531, 0x4c41de87bc79d8ca +532, 0xcce2a202622945fe +533, 0x513f086fad94c107 +534, 0x18b3960c11f8cc96 +535, 0x2f0d1cfd1896e236 +536, 0x1702ae3880d79b15 +537, 0x88923749029ae81 +538, 0x84810d4bdec668eb +539, 0xf85b0a123f4fc68d +540, 0x93efd68974b6e4d1 +541, 0x5d16d6d993a071c9 +542, 0x94436858f94ca43b +543, 0xb3dbb9ed0cb180b6 +544, 0x6447030a010b8c99 +545, 0xd7224897c62925d8 +546, 0xb0c13c1d50605d3a +547, 0xdff02c7cb9d45f30 +548, 0xe8103179f983570d +549, 0xbc552037d6d0a24e +550, 0x775e500b01486b0d +551, 0x2050ac632c694dd6 +552, 0x218910387c4d7ae7 +553, 0xf83e8b68ff885d5d +554, 0xe3374ec25fca51a3 +555, 0xfa750ffa3a60f3af +556, 0x29ee40ba6df5592e +557, 0x70e21a68f48260d2 +558, 0x3805ca72cd40886e +559, 0x2f23e73f8eabf062 +560, 0x2296f80cdf6531ae +561, 0x903099ed968db43a +562, 0xf044445cf9f2929f +563, 0xcd47fdc2de1b7a1 +564, 0xaab1cbd4f849da99 +565, 0x5fc990688da01acb +566, 0xa9cee52ea7dab392 +567, 0xecefc3a4349283a8 +568, 0xdd6b572972e3fafc +569, 0xc1f0b1a2ffb155da +570, 0xc30d53fc17bd25c8 +571, 0x8afa89c77834db28 +572, 0x5569a596fb32896c +573, 0x36f207fc8df3e3d4 +574, 0x57c2bd58517d81db +575, 0xb524693e73d0061c +576, 0xb69f6eb233f5c48b +577, 0x4f0fb23cab8dc695 +578, 0x492c1ad0a48df8df +579, 0xf6dcc348ec8dec1f +580, 0xa4d8708d6eb2e262 +581, 0x4c2072c2c9766ff1 +582, 0xa9bf27c4304875f0 +583, 0xfc8fb8066d4f9ae2 +584, 0x188095f6235fec3c +585, 0x1d8227a2938c2864 +586, 0x89ea50c599010378 +587, 0xcac86df0a7c6d56d +588, 0x47a8c5df84c7d78 +589, 0xe607ae24ea228bfa +590, 0x36624a7996efe104 +591, 0x5d72881c1227d810 +592, 0x78694a6750374c8 +593, 0x7b9a217d4ab5ff45 +594, 0xd53e5d6f7504becc +595, 0x197a72d3f4889a0e +596, 0xfdc70c4755a8df36 +597, 0xd0fda83748c77f74 +598, 0x7ddc919ac9d6dcc9 +599, 0x785c810a6a2dc08b +600, 0xba4be83e7e36896c +601, 0x379d6fe80cf2bffe +602, 0x74cae2dabc429206 +603, 0x1efac32d5d34c917 +604, 0x3cb64e2f98d36e70 +605, 0xc0a7c3cdc3c60aa7 +606, 0x699dfadd38790ebe +607, 0x4861e61b3ecfbeac +608, 0x531744826c345baa +609, 0x5ec26427ad450cba +610, 0xf2c1741479abdcae +611, 0xe9328a78b2595458 +612, 0x30cd1bdf087acd7f +613, 0x7491ced4e009adbe +614, 0xdcd942df1e2e7023 +615, 0xfe63f01689fee35 +616, 0x80282dfe5eaedc42 +617, 0x6ecdea86495f8427 +618, 0xe0adfdd5e9ed31c3 +619, 0xf32bd2a7418127e +620, 0x8aabba078db6ee2 +621, 0xa8a8e60499145aca +622, 0xf76b086ac4e8a0f2 +623, 0x6e55b3c452ff27f8 +624, 0xe18fa7cd025a71bf +625, 0xeed7b685fde0fa25 +626, 0xba9b6c95867fa721 +627, 0x4c2603bc69de2df2 +628, 0xaac87eee1b58cd66 +629, 0x3c9af6656e01282c +630, 0x2dfa05ce8ff476b6 +631, 0xeae9143fcf92f23d +632, 0x3f0699f631be3bc8 +633, 0xa0f5f79f2492bd67 +634, 0x59c47722388131ed +635, 0x5f6e9d2941cef1de +636, 0xe9ad915c09788b7b +637, 0x92c6d37e4f9482f5 +638, 0x57d301b7fdadd911 +639, 0x7e952d23d2a8443 +640, 0xbb2fa5e0704b3871 +641, 0xe5642199be36e2d5 +642, 0x5020b60d54358291 +643, 0xa0b6317ec3f60343 +644, 0xb57b08b99540bc5c +645, 0x21f1890adc997a88 +646, 0xfcf824200dd9da2d +647, 0x8146293d83d425d1 +648, 0xdadfbf5fbb99d420 +649, 0x1eb9bbc5e6482b7d +650, 0xd40ff44f1bbd0f1c +651, 0xa9f948ba2d08afa5 +652, 0x638cc07c5301e601 +653, 0x1f984baa606e14e8 +654, 0x44e153671081f398 +655, 0xb17882eeb1d77a5d +656, 0x5fd8dbee995f14c +657, 0xff3533e87f81b7fe +658, 0x2f44124293c49795 +659, 0x3bf6b51e9360248 +660, 0x72d615edf1436371 +661, 0x8fc5cf4a38adab9d +662, 0xfa517e9022078374 +663, 0xf356733f3e26f4d8 +664, 0x20ea099cdc6aad40 +665, 0xe15b977deb37637d +666, 0xcc85601b89dae88d +667, 0x5768c62f8dd4905c +668, 0xa43cc632b4e56ea +669, 0xc4240cf980e82458 +670, 0xb194e8ffb4b3eeb6 +671, 0xee753cf2219c5fa1 +672, 0xfe2500192181d44d +673, 0x2d03d7d6493dd821 +674, 0xff0e787bb98e7f9b +675, 0xa05cf8d3bd810ce7 +676, 0x718d5d6dcbbdcd65 +677, 0x8d0b5343a06931c +678, 0xae3a00a932e7eaf9 +679, 0x7ed3d8f18f983e18 +680, 0x3bb778ee466dc143 +681, 0x711c685c4e9062c0 +682, 0x104c3af5d7ac9834 +683, 0x17bdbb671fb5d5cf +684, 0xabf26caead4d2292 +685, 0xa45f02866467c005 +686, 0xf3769a32dc945d2d +687, 0xe78d0007f6aabb66 +688, 0x34b60be4acbd8d4b +689, 0x58c0b04b69359084 +690, 0x3a8bb354c212b1 +691, 0x6b82a8f3d70058d5 +692, 0x405bdef80a276a4a +693, 0xe20ca40ee9195cad +694, 0xf5dd96ba2446fefd +695, 0xc1e180c55fe55e3c +696, 0xa329caf6daa952b3 +697, 0xb4809dd0c84a6b0a +698, 0xd27f82661070cee7 +699, 0xa7121f15ee2b0d8a +700, 0x4bdaea70d6b34583 +701, 0xe821dc2f310f7a49 +702, 0x4c00a5a68e76f647 +703, 0x331065b064a2d5ea +704, 0xac0c2ce3dc04fa37 +705, 0x56b32b37b8229008 +706, 0xe757cdb51534fcfa +707, 0xd3ff183576b2fad7 +708, 0x179e1f4190f197a7 +709, 0xf874c626a7c9aae5 +710, 0xd58514ffc37c80e4 +711, 0xc65de31d33fa7fd3 +712, 0x6f6637052025769b +713, 0xca1c6bdadb519cc0 +714, 0xd1f3534cde37828a +715, 0xc858c339eee4830a +716, 0x2371eacc215e02f4 +717, 0x84e5022db85bbbe9 +718, 0x5f71c50bba48610e +719, 0xe420192dad9c323f +720, 0x2889342721fca003 +721, 0x83e64f63334f501d +722, 0xac2617172953f2c +723, 0xfa1f78d8433938ff +724, 0x5578382760051462 +725, 0x375d7a2e3b90af16 +726, 0xb93ff44e6c07552d +727, 0xded1d5ad811e818c +728, 0x7cf256b3b29e3a8c +729, 0x78d581b8e7bf95e8 +730, 0x5b69192f2caa6ad3 +731, 0xa9e25855a52de3ce +732, 0x69d8e8fc45cc188d +733, 0x5dd012c139ad347d +734, 0xfcb01c07b77db606 +735, 0x56253e36ab3d1cce +736, 0x1181edbb3ea2192 +737, 0x325bef47ff19a08d +738, 0xd3e231ceb27e5f7 +739, 0x8e819dd2de7956d2 +740, 0x34a9689fe6f84a51 +741, 0x3e4eeb719a9c2927 +742, 0x5c3b3440581d0aaf +743, 0x57caf51897d7c920 +744, 0xec6a458130464b40 +745, 0xe98f044e0da40e9b +746, 0xbe38662020eeb8e7 +747, 0x7b8c407c632724ae +748, 0x16c7cfa97b33a544 +749, 0xd23359e2e978ae5a +750, 0x4fdba458250933dd +751, 0x3c9e0713cfe616ba +752, 0x6f0df87b13163b42 +753, 0xc460902cb852cc97 +754, 0x289df8fefd6b0bce +755, 0x4ac2a2a1c3fb8029 +756, 0x2fc3e24d8b68eef7 +757, 0x34564386a59aab9a +758, 0x31047391ebd67ce4 +759, 0x6c23d070a0564d41 +760, 0xba6387b2b72545f7 +761, 0xcdcf1008058387af +762, 0xc9308fa98db05192 +763, 0xdbdbb5abd01a9d84 +764, 0x937088275c7804ab +765, 0x6f6accfefe34ee81 +766, 0x5c33c74c49cfdb2c +767, 0x5e1a771edfb92bd3 +768, 0x6e89b009069ecae7 +769, 0x34d64e17ec0e8968 +770, 0x841203d0cde0c330 +771, 0x7642cc9d7eb9e9cb +772, 0xca01d2e8c128b97e +773, 0x5b8390617b3304ab +774, 0x52ec4ed10de1eb2d +775, 0xb90f288b9616f237 +776, 0x5bd43cd49617b2e2 +777, 0x1a53e21d25230596 +778, 0x36ccd15207a21cd6 +779, 0xc8263d780618fd3c +780, 0x6eb520598c6ce1cb +781, 0x493c99a3b341564f +782, 0xab999e9c5aa8764f +783, 0xab2fa4ceaba84b +784, 0xbbd2f17e5cb2331b +785, 0xc8b4d377c0cc4e81 +786, 0x31f71a6e165c4b1e +787, 0xd1011e55fb3addaa +788, 0x5f7ec34728dfa59 +789, 0x2aef59e60a84eb0f +790, 0x5dde6f09aec9ad5f +791, 0x968c6cdbc0ef0438 +792, 0x1957133afa15b13a +793, 0xbaf28f27573a64c2 +794, 0xc6f6ddd543ebf862 +795, 0xdd7534315ec9ae1e +796, 0xd2b80cd2758dd3b +797, 0xa38c3da00cc81538 +798, 0x15c95b82d3f9b0f9 +799, 0x6704930287ce2571 +800, 0x9c40cc2f6f4ecb0c +801, 0xc8de91f50b22e94e +802, 0x39272e8fddbfdf0a +803, 0x879e0aa810a117d +804, 0xa312fff4e9e5f3bd +805, 0x10dd747f2835dfec +806, 0xeb8466db7171cdae +807, 0xaa808d87b9ad040a +808, 0xab4d2229a329243a +809, 0x7c622f70d46f789c +810, 0x5d41cef5965b2a8e +811, 0xce97ec4702410d99 +812, 0x5beba2812c91211b +813, 0xf134b46c93a3fec7 +814, 0x76401d5630127226 +815, 0xc55fc9d9eacd4ec1 +816, 0xaec8cefaa12f813f +817, 0x2f845dcfd7b00722 +818, 0x3380ab4c20885921 +819, 0xdb68ad2597691b74 +820, 0x8a7e4951455f563f +821, 0x2372d007ed761c53 +822, 0xcab691907714c4f1 +823, 0x16bc31d6f3abec1a +824, 0x7dff639fbcf1824 +825, 0x6666985fbcff543d +826, 0xb618948e3d8e6d0c +827, 0x77b87837c794e068 +828, 0xcd48288d54fcb5a8 +829, 0x47a773ed6ae30dc3 +830, 0xba85ae44e203c942 +831, 0xa7a7b21791a25b2d +832, 0x4029dd92e63f19e0 +833, 0xc2ad66ab85e7d5aa +834, 0xa0f237c96fdab0db +835, 0xffefb0ab1ca18ed +836, 0x90cb4500785fd7d5 +837, 0xa7dd3120f4876435 +838, 0x53f7872624694300 +839, 0xea111326ff0040d9 +840, 0x5f83cb4cce40c83b +841, 0x918e04936c3b504d +842, 0x87a8db4c0e15e87c +843, 0x7cff39da6a0dedd0 +844, 0x36f7de2037f85381 +845, 0xd1d8d94022a1e9a7 +846, 0x2c9930127dc33ec9 +847, 0x6cb4719dcd0101c6 +848, 0xc01868cde76935f7 +849, 0x6b86f2ec1ab50143 +850, 0x68af607d8d94ae61 +851, 0xe216c5b95feedf34 +852, 0x4b866bd91efe2e4b +853, 0x4bff79df08f92c99 +854, 0x6ff664ea806acfd1 +855, 0x7fce0b3f9ece39bc +856, 0x29bc90b59cb3db97 +857, 0x833c4b419198607d +858, 0xf3573e36ca4d4768 +859, 0x50d71c0a3c2a3fa8 +860, 0xd754591aea2017e7 +861, 0x3f9126f1ee1ebf3 +862, 0xe775d7f4b1e43de8 +863, 0xe93d51628c263060 +864, 0x83e77f6fb32d6d82 +865, 0x43dd7eef823408e4 +866, 0x1c843c2c90180662 +867, 0xe924dafb9a16066b +868, 0x6af3ee96e7b7fbd9 +869, 0x94d5c4f37befcd1f +870, 0x40ffb04bedef4236 +871, 0x71c17bbc20e553e +872, 0x101f7a0a6208729f +873, 0x5ca34570cf923548 +874, 0x8e3139db2e96e814 +875, 0x3ab96d96263d048d +876, 0x97f3c0bbc6755c3c +877, 0x31fc72daedaef3dc +878, 0x71f8d7855d10789b +879, 0xce6dc97b4662333b +880, 0xfddc2aabd342bc61 +881, 0xefbd4007ff8c7d2e +882, 0xf72cd6c689ef8758 +883, 0x932c8b0c0e755137 +884, 0x94cc4dedd58ff69 +885, 0xde4dfd6890535979 +886, 0xdb00dcd2dcb4a50a +887, 0xb0466240b4548107 +888, 0x9cb9264c7b90d1a3 +889, 0x357e378e9be5766b +890, 0x6e0316ef03367bbf +891, 0x201ea18839544ca +892, 0x803ff3406be5f338 +893, 0xf9d5e82fd4144bb2 +894, 0x1b6b88ca701e9f47 +895, 0xd1fe5ab8e1f89cc0 +896, 0x14171fe176c4bece +897, 0x887948bdef78beaa +898, 0x80449ddc3eb9b977 +899, 0x5f4e1f900fb4bcf3 +900, 0xbe30f8701909f8e2 +901, 0xd1f2a2fb5503306d +902, 0x6b1c77238dc23803 +903, 0x102156a6c9860f66 +904, 0x4cd446e099edf4c1 +905, 0xc79ac6cbc911f33b +906, 0x3ee096ffe3384f1c +907, 0xb58f83b18a306dc7 +908, 0x9f76582141de56b2 +909, 0x9ddfa85e02c13866 +910, 0x4d9a19d4ce90a543 +911, 0xbf81ab39fd17d376 +912, 0x5327e5054c6a74f1 +913, 0xd5062dd31db1a9b7 +914, 0x645853735527edc +915, 0x485393967f91af08 +916, 0xeff9667dcf77ca68 +917, 0xd012313f5fbec464 +918, 0xbeae35bdfae55144 +919, 0x302c41ebac8444a0 +920, 0x9ccdb6c2fe58fba8 +921, 0x567753af68ed23f8 +922, 0xff90f790e43efec3 +923, 0x970cc756fb799696 +924, 0xe59239d1c44915 +925, 0x4d2d189fb3941f05 +926, 0x96f23085db165a9c +927, 0xa1202dec7a37b1a5 +928, 0xc0c1ee74bcd7dc1a +929, 0x9edcf2048b30333a +930, 0xd848588ba7e865fb +931, 0x8d9f0897317cab40 +932, 0x67b96f15e25924fb +933, 0xefc8d8536619ee42 +934, 0xf3f621d22bdde0c2 +935, 0x68610a0de862ae32 +936, 0xa22ca5142de24cbd +937, 0x8815452f4e6b4801 +938, 0x4e9c1b607b2750e5 +939, 0x19b3c09ba6fc9b25 +940, 0x9b2543c8836780ac +941, 0xe702b8f950e56431 +942, 0xb357cc329cac3917 +943, 0x387bf86a17a31e08 +944, 0x9940b983d331b163 +945, 0xf5d89d7fe9095e18 +946, 0x4362682329e5c4d1 +947, 0xd2132573f6ae7b42 +948, 0xc0a5849e23a61606 +949, 0xdadbddf47265bc02 +950, 0x1b96f00339a705f7 +951, 0x94e6642329288913 +952, 0x825ab3f10e6d330b +953, 0x1a1c31ac9d883ea0 +954, 0xb49076b7155c6f47 +955, 0x920cf3085dfe3ccb +956, 0x9743407c9f28e825 +957, 0x6ce8a28622402719 +958, 0xce2fe67e06baf8a6 +959, 0x3a16b34784ecf5e6 +960, 0x140467cc1d162a0c +961, 0x32d4772692ab625 +962, 0xa4f4b28562f43336 +963, 0x885b4335457bd84a +964, 0x499d3ed26c87ad8a +965, 0xc7328bcedb9a545e +966, 0xc6dd76a6cbf5d2b2 +967, 0xba9c22be404ee1aa +968, 0x70e6aee45f23521d +969, 0x61e03a798593c177 +970, 0x171671f809c68213 +971, 0x28d54872fc1d914c +972, 0x43c2fcd9bd098b53 +973, 0x172ad4c4a98b9d37 +974, 0x330860c9460f2516 +975, 0x49547f472df984f4 +976, 0x873b2436d3f0e114 +977, 0x6f99accf4ea050b6 +978, 0x5968ac874ed51613 +979, 0x4939d70d29a3c611 +980, 0x11f381ed28738d3d +981, 0xa97430d36ab3a869 +982, 0xe6fa880801129e22 +983, 0xf84decbd8f48c913 +984, 0x4425c0ed1e9a82a5 +985, 0x7a1f9485e9929d5a +986, 0xc7c51f155dfce1c6 +987, 0x9619a39501d74f2b +988, 0x7c7035955dbf4c1b +989, 0xc61ee569cf57c2c9 +990, 0x3eaf7c5b0df734e1 +991, 0xe71cb4064d1ede05 +992, 0x356e3cec80e418b2 +993, 0xca04306243a15be6 +994, 0x941cf3881fa18896 +995, 0x30dbb0e819d644e0 +996, 0xaae22c0bef02859a +997, 0x7bd30917bbaa8a94 +998, 0x2672547bc8d7d329 +999, 0x4955c92aaa231578 diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv new file mode 100644 index 0000000000000000000000000000000000000000..dc94eca86e909d967b8d266c49fba512b843dbf5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0xd97e4a147f788a70 +1, 0x8dfa7bce56e3a253 +2, 0x13556ed9f53d3c10 +3, 0x55dbf1c241341e98 +4, 0xa2cd98f722eb0e0a +5, 0x83dfc407203ade8 +6, 0xeaa083df518f030d +7, 0x44968c87e432852b +8, 0x573107b9cb8d9ecc +9, 0x9eedd1da50b9daca +10, 0xb33a6735ca451e3c +11, 0x72830d2b39677262 +12, 0x9da8c512fd0207e8 +13, 0x1fc5c91954a2672b +14, 0xd33479437116e08 +15, 0x9ccdd9390cee46f3 +16, 0x1fd39bb01acd9e76 +17, 0xedc1869a42ff7fe5 +18, 0xbd68ca0b42a6e7e9 +19, 0x620b67df09621b1f +20, 0xfa11d51bd6950221 +21, 0xc8c45b36e7d28d08 +22, 0xe9c91272fbaad777 +23, 0x2dc87a143f220e90 +24, 0x6376a7c82361f49d +25, 0x552c5e434232fe75 +26, 0x468f7f872ac195bc +27, 0x32bed6858125cf89 +28, 0xe4f06111494d09d3 +29, 0xa5c166ffea248b80 +30, 0x4e26605b97064a3f +31, 0xceafd9f6fc5569d +32, 0xb772f2f9eed9e106 +33, 0x672c65e6a93534e2 +34, 0xcdc5e1a28d1bd6a0 +35, 0x1ed9c96daeebd3e3 +36, 0x4d189dcfc0c93c3f +37, 0x50df5a95c62f4b43 +38, 0xcccf4949fa65bbb8 +39, 0x19b8073d53cdc984 +40, 0x6fb40bba35483703 +41, 0xb02de4aef86b515a +42, 0x4d90c63655350310 +43, 0xea44e4089825b16c +44, 0x8d676958b1f9da2b +45, 0x6d313940917ae195 +46, 0x1b1d35a4c1dd19f4 +47, 0x117720f8397337ef +48, 0xcc073cf3ac11eeaa +49, 0x8331ec58a9ff8acb +50, 0xf3dc2a308b6b866f +51, 0x7eba1202663382b6 +52, 0x8269839debeb4e5a +53, 0x87fd3dc0f9181a8e +54, 0xabe62ddd3c925f03 +55, 0x7f56f146944fe8d4 +56, 0xc535972150852068 +57, 0x60b252d453bd3a68 +58, 0x4251f0134634490a +59, 0x338950da210dfeb2 +60, 0xcadfe932971c9471 +61, 0xfb7049457fab470e +62, 0x9bfb8145a4459dff +63, 0x4a89dda3898f9d8a +64, 0x88cc560151483929 +65, 0x277dc820f4b6796e +66, 0x3524bd07ea0afb88 +67, 0x92eb6ffb2bf14311 +68, 0xf6559be0783f3fe9 +69, 0xf0844f9af54af00d +70, 0xdd5e0b59adcef8a +71, 0x4ff7e4f2ab18554c +72, 0x3fa22c8a02634587 +73, 0x1db8e1a9442fe300 +74, 0x40cf15953ad3d3e7 +75, 0x92af15fe1a9f6f0a +76, 0xab4a0e466fb0cfd +77, 0x944f1555a06cca82 +78, 0x10cf48412f1f6066 +79, 0x7f51f9a455f9e8e1 +80, 0x47ee93530f024c7e +81, 0x36cf2f0413e0f6f2 +82, 0xa315e23731969407 +83, 0xd8e2796327cf5f87 +84, 0xa86072696a555c34 +85, 0xee3f0b8804feaab7 +86, 0x41e80dc858f8360b +87, 0x31ec2e9b78f5b29 +88, 0xd397fb9b8561344c +89, 0x28081e724e649b74 +90, 0x5c135fc3fc672348 +91, 0x9a276ca70ce9caa0 +92, 0x9216da059229050a +93, 0xcf7d375ed68007b0 +94, 0xa68ad1963724a770 +95, 0xd4350de8d3b6787c +96, 0xee7d2c2cc275b6d2 +97, 0x71645ec738749735 +98, 0x45abdf8c68d33dbb +99, 0xe71cadb692c705ea +100, 0x60af6f061fd90622 +101, 0x1eabe2072632c99d +102, 0x947dda995a402cb6 +103, 0xbb19f49a3454f3b +104, 0xe6e43e907407758c +105, 0xfe2b67016bd6873a +106, 0x7fdb4dd8ab30a722 +107, 0x39d3265b0ff1a45b +108, 0xed24c0e4fce8d0c2 +109, 0xf6e074f86faf669d +110, 0x9142040df8dc2a79 +111, 0x9682ab16bc939a9c +112, 0x6a4e80c378d971c8 +113, 0x31309c2c7fc2d3d6 +114, 0xb7237ec682993339 +115, 0x6a30c06bb83dccd9 +116, 0x21c8e9b6d8e7c382 +117, 0x258a24ae6f086a19 +118, 0xb76edb5be7df5c35 +119, 0x3c11d7d5c16e7175 +120, 0xbdfc34c31eff66e1 +121, 0x8af66e44be8bf3a2 +122, 0x3053292e193dec28 +123, 0xd0cc44545b454995 +124, 0x408ac01a9289d56 +125, 0x4e02d34318ec2e85 +126, 0x9413ff3777c6eb6b +127, 0xa3a301f8e37eb3df +128, 0x14e6306bd8d8f9f9 +129, 0xd3ea06ce16c4a653 +130, 0x170abe5429122982 +131, 0x7f9e6fddc6cacb85 +132, 0xa41b93e10a10a4c8 +133, 0x239216f9d5b6d0b5 +134, 0x985fcb6cb4190d98 +135, 0xb45e3e7c68f480c6 +136, 0xc1b2fc2e0446211c +137, 0x4596adb28858c498 +138, 0x2dd706f3458ddc75 +139, 0x29c988c86f75464 +140, 0xac33a65aa679a60 +141, 0xa28fef762d39d938 +142, 0x541e6fa48647f53 +143, 0x27838d56b2649735 +144, 0x8e143d318a796212 +145, 0xaea6097745f586b8 +146, 0x636143330f8ee2e6 +147, 0xc2d05fd8b945b172 +148, 0x6e355f9eb4353055 +149, 0xeb64ca42e8bf282e +150, 0xe8202dfd9da0fe5 +151, 0x7305689c9d790cba +152, 0xf122f8b1bef32970 +153, 0x9562887e38c32ba5 +154, 0xf9cd9be121b738d +155, 0x6238e0c398307913 +156, 0x5f2e79bb07c30f47 +157, 0x8ce8e45c465006e +158, 0x39281fe1e99e2441 +159, 0xafb10c2ca2874fea +160, 0x6e52f91633f83cf +161, 0x8ff12c1ac73c4494 +162, 0xe48608a09365af59 +163, 0xefd9bbc7e76e6a33 +164, 0xbe16a39d5c38ec92 +165, 0x6a6ffbcaf5a2330f +166, 0xdd5d6ac7d998d43d +167, 0x207bf978226d4f11 +168, 0xf8eec56bd2a0f62e +169, 0xa5bccf05dce0d975 +170, 0x93cf3ec1afe457a6 +171, 0x38651466d201f736 +172, 0x3ad21473985c9184 +173, 0xc6407a3bd38c92a6 +174, 0xb1ec42c7afa90a25 +175, 0xbdeca984df8b7dd3 +176, 0xb6926b1d00aa6c55 +177, 0x86141d0022352d49 +178, 0x169316256135ee09 +179, 0xffb1c7767af02a5c +180, 0x502af38ad19f5c91 +181, 0xfbf6cbc080086658 +182, 0x33cf9b219edae501 +183, 0x46e69bebd77b8862 +184, 0xf11e0cc91125d041 +185, 0xb4cd1649f85e078f +186, 0xb49be408db4e952 +187, 0xb0b8db46140cce3c +188, 0xba647f2174012be7 +189, 0x4f0a09e406970ac9 +190, 0xf868c7aec9890a5c +191, 0xde4c8fa7498ea090 +192, 0x872ceb197978c1d4 +193, 0x1eb5cd9c3269b258 +194, 0x3ea189f91724f014 +195, 0x41379656f7746f2c +196, 0x7bd18493aca60e51 +197, 0x5380c23b0cbbf15e +198, 0x920b72835f88246b +199, 0x24d7f734a4548b8e +200, 0x9944edb57e5aa145 +201, 0x4628e136ebb8afe1 +202, 0xb4ee6a776356e2a7 +203, 0x481cbe9744ccf7d7 +204, 0x7e8d67e8b0b995d9 +205, 0xeeacde100af7b47e +206, 0x103da08f2487dab7 +207, 0x6b9890a91d831459 +208, 0xd0c5beae37b572c7 +209, 0xfdccc371ee73fcc +210, 0x65438f0a367a2003 +211, 0x5d23b2c818a7e943 +212, 0x9a8ed45ac04b58b3 +213, 0xdaf3c3f1695dce10 +214, 0x5960eec706fa2bc0 +215, 0x98ca652facb80d40 +216, 0x72970ae5e2194143 +217, 0x18c6374d878c5c94 +218, 0x20fa51f997381900 +219, 0x3af253dba26d6e1d +220, 0x1b23d65db15c7f78 +221, 0x9f53ae976259b0e3 +222, 0x9a6addb28dc92d49 +223, 0x1e085c4accd0a7d7 +224, 0xe9d3f4cc9bad6ce5 +225, 0xe018fad78b5b1059 +226, 0x5ef7682232b4b95 +227, 0xb2242aa649f5de80 +228, 0x8f3e6d8dd99b9e4e +229, 0xb9be6cc22949d62a +230, 0xecbdc7beaa5ff1fe +231, 0xd388db43a855bdf0 +232, 0xd71ee3238852568d +233, 0x85ab3056304c04b5 +234, 0x2ed7ae7ad3cfc3cb +235, 0x781d1b03d40b6c48 +236, 0x7d3c740886657e6d +237, 0x982cfa6828daa6b0 +238, 0x278579599c529464 +239, 0x773adecfae9f0e08 +240, 0x63a243ea4b85c5d7 +241, 0x59940074fc3709e1 +242, 0xc914a2eed58a6363 +243, 0x2602b04274dd724c +244, 0xdf636eb7636c2c42 +245, 0x891a334d0d26c547 +246, 0xde8cd586d499e22d +247, 0x3ea1aa4d9b7035b6 +248, 0xd085cff6f9501523 +249, 0xe82a872f374959e +250, 0x55cb495bbd42cc53 +251, 0x5f42b3226e56ca97 +252, 0xea463f6f203493a3 +253, 0xeef3718e57731737 +254, 0x1bd4f9d62b7f9f3c +255, 0x19284f5e74817511 +256, 0xaf6e842c7450ca87 +257, 0x1d27d2b08a6b3600 +258, 0xfb4b912b396a52e3 +259, 0x30804d4c5c710121 +260, 0x4907e82564e36338 +261, 0x6441cf3b2900ddb7 +262, 0xd76de6f51988dc66 +263, 0x4f298ef96fd5e6d2 +264, 0x65432960c009f83d +265, 0x65ebed07e1d2e3df +266, 0xf83ee8078febca20 +267, 0x7bb18e9d74fc5b29 +268, 0x597b5fbc2261d91 +269, 0xea4f8ed0732b15b2 +270, 0xba2267f74f458268 +271, 0x3f304acabd746bbb +272, 0x7bd187af85659a82 +273, 0x88e20dbdb7a08ea3 +274, 0x2a2dc948c772fcb4 +275, 0x87784fec2993c867 +276, 0x89163933cd362d4e +277, 0xfd7b24f04302f957 +278, 0x9bdd544405dfb153 +279, 0xddee0fac58ffc611 +280, 0xa8e8993417e71ec1 +281, 0x55e0ab46ff7757af +282, 0x53e7645f08d3d7df +283, 0xbf78e563bc656ba2 +284, 0x1d162253b45ee2de +285, 0x15e2bfefedf29eb4 +286, 0x4e2a4584aa394702 +287, 0xa89fb12b01525897 +288, 0x825bd98f0544e4df +289, 0xfc6c50da6750700 +290, 0xc24aaabde7d28423 +291, 0x79d6f4660fcb19e5 +292, 0xee7d4fb40c8d659f +293, 0x70bc281b462e811d +294, 0x23ed4dc9636519a7 +295, 0xcb7c3f5a5711b935 +296, 0xe73090e0508c5d9d +297, 0xb25a331f375952a6 +298, 0xa64c86e0c04740f6 +299, 0xb8f3ffc8d56ac124 +300, 0x2479266fc5ee6b15 +301, 0x8d5792d27f5ffbcb +302, 0xb064298be946cd52 +303, 0xf0934a98912ffe26 +304, 0xbe805682c6634d98 +305, 0xe0e6e2c010012b4f +306, 0x58c47d475f75976 +307, 0x358c9a6e646b2b4a +308, 0x7e7c4ffca5b17ba7 +309, 0x43585c8c9a24a04c +310, 0x5154ddbcd68d5c2c +311, 0x4a2b062d3742a5e +312, 0xca5691191da2b946 +313, 0x696a542109457466 +314, 0x9eb5d658a5022ba5 +315, 0x8158cf6b599ab8dc +316, 0x1b95391eaa4af4a6 +317, 0x9953e79bd0fc3107 +318, 0x8639690086748123 +319, 0x2d35781c287c6842 +320, 0x393ef0001cd7bc8f +321, 0xe3a61be8c5f2c22a +322, 0x5e4ff21b847cc29b +323, 0x4c9c9389a370eb84 +324, 0xd43a25a8fc3635fa +325, 0xf6790e4a85385508 +326, 0x37edf0c81cb95e1d +327, 0x52db00d6e6e79af8 +328, 0x3b202bceeb7f096 +329, 0x2a164a1c776136bb +330, 0x73e03ee3fd80fd1b +331, 0xd2c58c0746b8d858 +332, 0x2ed2cb0038153d22 +333, 0x98996d0fc8ceeacc +334, 0xa4ed0589936b37f +335, 0x5f61cf41a6d2c172 +336, 0xa6d4afb538c110d7 +337, 0xe85834541baadf1a +338, 0x4c8967107fd49212 +339, 0x49bafb762ab1a8c1 +340, 0x45d540e2a834bf17 +341, 0x1c0ec8b4ed671dac +342, 0x3d503ce2c83fe883 +343, 0x437bfffd95f42022 +344, 0xc82d1e3d5c2bc8d2 +345, 0x7a0a9cbfcb0d3f24 +346, 0xc0a4f00251b7a3be +347, 0xb5be24e74bb6a1c6 +348, 0xa3104b94b57545b1 +349, 0x86de7d0c4b97b361 +350, 0x879c1483f26538a6 +351, 0xd74c87557f6accfb +352, 0x2f9be40dbf0fe8a1 +353, 0x445a93398f608d89 +354, 0x7b3cb8a7211d7fdc +355, 0xe86cc51290d031e7 +356, 0x33ef3594052ad79f +357, 0xc61911d241dbb590 +358, 0x37cccb0c0e3de461 +359, 0xb75259124080b48b +360, 0xd81e8961beb4abe5 +361, 0xf4542deb84a754e +362, 0x6ea036d00385f02e +363, 0xa7b60b0ac3b88681 +364, 0x108a6c36ca30baf5 +365, 0x4a2adc5bbfe2bf07 +366, 0x4079501f892a5342 +367, 0x55e113963c5448f0 +368, 0x8019ff4903b37242 +369, 0x109c6dcdb7ec6618 +370, 0x1239ac50944da450 +371, 0xe1399c7f94c651c1 +372, 0x5a6bbbae388d365a +373, 0x4d72be57b8810929 +374, 0x3f067df24384e1fb +375, 0x4f8b9e0f7f6c7be +376, 0x202492c342a3b08 +377, 0x250753192af93a3 +378, 0xfba1159d9de2cb8e +379, 0xba964497ab05505c +380, 0x1329ec5d8a709dca +381, 0x32927cacb6cd22bb +382, 0x6b4d7db904187d56 +383, 0xe76adccf8e841e02 +384, 0x8c4bf4b6a788202 +385, 0x3013a3b409831651 +386, 0x7427d125c475412f +387, 0x84dcc4bb2bf43202 +388, 0x117526f1101372a5 +389, 0xfe95d64b8984bd72 +390, 0x524e129934cc55c1 +391, 0xc3db4b0418c36d30 +392, 0xe1cb2047e9c19f7a +393, 0xea43d6c8d8982795 +394, 0xe80ac8a37df89ed +395, 0xfecc2104329ed306 +396, 0xa5c38aac9c1d51ea +397, 0x3abe5d1c01e4fe17 +398, 0x717a805d97fcc7ac +399, 0x94441f8207a1fb78 +400, 0x22d7869c5f002607 +401, 0x349e899f28c3a1b9 +402, 0x5639950cdea92b75 +403, 0x7e08450497c375b +404, 0x94bf898b475d211d +405, 0x75c761a402375104 +406, 0x1930920ec9d2a1e7 +407, 0xb774ba1bc6f6e4e2 +408, 0xf715602412e5d900 +409, 0x87bb995f4a13f0ba +410, 0xa3c787868dfa9c8d +411, 0xa17fd42a5a4f0987 +412, 0x4a9f7d435242b86 +413, 0x240364aff88f8aef +414, 0xe7cd4cf4bf39f144 +415, 0xd030f313ca4c2692 +416, 0xc46696f4e03ec1e9 +417, 0x22c60f1ec21060b3 +418, 0x16c88058fd68986f +419, 0x69ca448e8e6bde3f +420, 0x3466c2cdec218abd +421, 0x837ac4d05e6b117d +422, 0x911210e154690191 +423, 0x9ece851d6fa358b7 +424, 0x42f79cb0c45e7897 +425, 0xbf7583babd7c499b +426, 0x2059fe8031c6e0b9 +427, 0xabbec8fc00f7e51d +428, 0x88809d86a3a256e1 +429, 0xd36056df829fdcb5 +430, 0x515632b6cb914c64 +431, 0xba76d06c2558874 +432, 0x632c54ca4214d253 +433, 0xadec487adf2cb215 +434, 0x521e663e1940513d +435, 0xb1b638b548806694 +436, 0xbe2d5bfbe57d2c72 +437, 0x8b89e7719db02f7 +438, 0x90ba5281c1d56e63 +439, 0x899e1b92fceea102 +440, 0xf90d918e15182fa6 +441, 0x94a489ce96c948c4 +442, 0xad34db453517fcd4 +443, 0xc5264eb2de15930f +444, 0x101b4e6603a21cee +445, 0xef9b6258d6e85fff +446, 0x6075c7d6c048bd7a +447, 0x6f03232c64e438aa +448, 0x18c983d7105ee469 +449, 0x3ffc23f5c1375879 +450, 0xbc1b4a00afb1f9f +451, 0x5afa6b2bb8c6b46e +452, 0xe7fce4af2f2c152a +453, 0x5b00ab5c4b3982c7 +454, 0x2d4b0c9c0eb4bd0c +455, 0x61d926270642f1f2 +456, 0x7219c485c23a2377 +457, 0x7e471c752fecd895 +458, 0x23c4d30a4d17ba1f +459, 0x65cb277fe565ca22 +460, 0xcbb56ed9c701363b +461, 0xfd04ab3a6eba8282 +462, 0x19c9e5c8bab38500 +463, 0xea4c15227676b65b +464, 0x20f3412606c8da6f +465, 0xb06782d3bf61a239 +466, 0xf96e02d5276a9a31 +467, 0x835d256b42aa52a6 +468, 0x25b09151747f39c1 +469, 0x64507386e1103eda +470, 0x51cbc05716ef88e4 +471, 0x998cd9b7989e81cc +472, 0x9d7115416bec28d1 +473, 0xc992ca39de97906b +474, 0xd571e6f7ca598214 +475, 0xafc7fb6ccd9abbf8 +476, 0x88ef456febff7bf4 +477, 0xdbe87ccc55b157d2 +478, 0xaab95e405f8a4f6d +479, 0xad586a385e74af4f +480, 0x23cd15225c8485aa +481, 0x370940bf47900ac7 +482, 0xefd6afda1a4b0ead +483, 0x9cb1a4c90993dd7a +484, 0xff7893e8b2f70b11 +485, 0xb09e1807c0638e8e +486, 0xb10915dcb4978f74 +487, 0x88212ab0051a85eb +488, 0x7af41b76e1ec793f +489, 0x2e5c486406d3fefd +490, 0xebe54eff67f513cc +491, 0xab6c90d0876a79b8 +492, 0x224df82f93fe9089 +493, 0xc51c1ce053dc9cd2 +494, 0x5ef35a4d8a633ee7 +495, 0x4aca033459c2585f +496, 0xd066932c6eefb23d +497, 0x5309768aab9a7591 +498, 0xa2a3e33823df37f9 +499, 0xcec77ff6a359ee9 +500, 0x784dc62d999d3483 +501, 0x84e789fb8acc985d +502, 0xd590237e86aa60f +503, 0x737e2ffe1c8ad600 +504, 0xc019c3a39a99eab8 +505, 0x6a39e9836964c516 +506, 0xe0fe43129535d9da +507, 0xdfc5f603d639d4de +508, 0x7b9a7d048a9c03b6 +509, 0xbb5aa520faa27fdd +510, 0x2a09b4200f398fa2 +511, 0x38cc88107904064e +512, 0xa9a90d0b2d92bb25 +513, 0x9419762f87e987e3 +514, 0x1a52c525153dedcd +515, 0xc26d9973dd65ae99 +516, 0x8e89bd9d0dc6e6a1 +517, 0x2f30868dc01bfb53 +518, 0x20f09d99b46501c4 +519, 0x78b468a563b8f1e9 +520, 0xcccf34b0b6c380c7 +521, 0xf554e7dc815297e6 +522, 0x332a585cfb4a50ef +523, 0xa9fb64a2b6da41d7 +524, 0xdcd2a5a337391ce0 +525, 0x8a9bd3e324c6463d +526, 0x9f4487d725503bdd +527, 0xf72282d82f1d0ff +528, 0x308f4160abb72d42 +529, 0x648de1db3a601b08 +530, 0x36cab5192e7ebd39 +531, 0x7975fbe4ab6a1c66 +532, 0xd515b4d72243864e +533, 0x43a568f8b915e895 +534, 0x15fa9f2057bdb91d +535, 0x7a43858ef7a222dc +536, 0x17b4a9175ac074fe +537, 0xa932c833b8d0f8f8 +538, 0x1d2db93a9a587678 +539, 0x98abd1d146124d27 +540, 0xf0ab0431671740aa +541, 0xa9d182467540ad33 +542, 0x41c8a6cfc331b7fc +543, 0xa52c6bd0fcd1d228 +544, 0x2773c29a34dc6fa3 +545, 0x3098230746fc1f37 +546, 0xd63311bb4f23fabe +547, 0x6712bf530cd2faec +548, 0x342e8f342e42c4dd +549, 0xfbd83331851cdcad +550, 0xe903be1361bbc34d +551, 0xd94372e5077e3ef9 +552, 0x95aaa234f194bd8 +553, 0x20c0c8fb11e27538 +554, 0xfaf47dc90462b30b +555, 0x8ddc6d144147682a +556, 0xf626833fd926af55 +557, 0x5df93c34290d1793 +558, 0xb06a903e6e9fca5e +559, 0x10c792dc851d77ca +560, 0xd9b1b817b18e56cb +561, 0x3a81730c408eb408 +562, 0x65052c04a8d4b63c +563, 0x3328546598e33742 +564, 0xeca44a13f62d156d +565, 0x69f83d1d86b20170 +566, 0x937764200412027d +567, 0xc57eb1b58df0f191 +568, 0xa1c7d67dce81bc41 +569, 0x8e709c59a6a579ce +570, 0x776a2f5155d46c70 +571, 0xd92906fbbc373aa5 +572, 0xe97ad478a2a98bf6 +573, 0xc296c8819ac815f +574, 0x613ede67ba70e93e +575, 0xe145222498f99cde +576, 0xafcdfa7a3c1cf9bf +577, 0x1c89252176db670d +578, 0xad245eda5c0865ff +579, 0x249463d3053eb917 +580, 0xc9be16d337517c0b +581, 0xefcc82bf67b8f731 +582, 0x1e01577d029e0d00 +583, 0xad9c24b2a4f3d418 +584, 0xed2cceb510db4d0f +585, 0xbddadcdb92400c70 +586, 0x67d6b0476ef82186 +587, 0xbc7662ff7bf19f73 +588, 0x9d94452a729e6e92 +589, 0x6b278d8594f55428 +590, 0x6c4b31cceb1b2109 +591, 0xccc6c3a726701e9 +592, 0x6bc28ece07df8925 +593, 0xc0422b7bf150ccc4 +594, 0xab7158f044e73479 +595, 0xdf3347546d9ed83f +596, 0x3b3235a02c70dff4 +597, 0x2551c49c14ea8d77 +598, 0xee2f7f5bb3cc228e +599, 0x39b87bfe8c882d39 +600, 0x7dd420fad380b51c +601, 0xffe64976af093f96 +602, 0x4a4f48dc6e7eaa5f +603, 0x85f2514d32fdc8cc +604, 0x1ab1215fd7f94801 +605, 0x4cd1200fc795b774 +606, 0xcf8af463a38942ee +607, 0x319caa7ce3022721 +608, 0x8cd9798a76d1aea4 +609, 0x2bd3933ac7afd34e +610, 0x85d4c323403cf811 +611, 0xd7b956d3064efa30 +612, 0x67a078dbf1f13068 +613, 0x665fa6c83e87c290 +614, 0x9333ac2416d2469b +615, 0xdfb1fd21a0094977 +616, 0xa1962a6e2c25f8ff +617, 0x1f3b10a7ed5287cf +618, 0x70641efb3d362713 +619, 0xe527a2cf85d00918 +620, 0x9741e45d3f9890a3 +621, 0x6cb74b5d4d36db4b +622, 0xf24734d622bd2209 +623, 0xadd6d94f78e9d378 +624, 0xc3bbdb59225cca7f +625, 0x5ad36614275b30cd +626, 0x495568dd74eea434 +627, 0xf35de47e0ffe1f2d +628, 0xefa209dca719ab18 +629, 0x844ddcaeb5b99ae8 +630, 0x37449670a1dc7b19 +631, 0x5a4612c166f845c1 +632, 0xe70f7782f2087947 +633, 0x98d484deac365721 +634, 0x705302198cf52457 +635, 0x7135ae0f5b77df41 +636, 0x342ac6e44a9b6fc3 +637, 0x2713fd2a59af5826 +638, 0x6e1a3f90f84efa75 +639, 0x9fb3b4dd446ca040 +640, 0x530044ae91e6bd49 +641, 0xe984c4183974dc3e +642, 0x40c1fa961997d066 +643, 0xb7868250d8c21559 +644, 0x8bc929fa085fd1de +645, 0x7bdb63288dc8733e +646, 0xac4faad24326a468 +647, 0x1c6e799833aea0b1 +648, 0xcc8a749e94f20f36 +649, 0x4e7abfd0443547c5 +650, 0xb661c73bb8caa358 +651, 0x4a800f5728ff2351 +652, 0x8c15e15189b9f7ed +653, 0xab367846b811362c +654, 0x4ba7508f0851ca2a +655, 0xe9af891acbafc356 +656, 0xbdebe183989601f8 +657, 0x4c665ea496afc061 +658, 0x3ca1d14a5f2ed7c +659, 0xfbdff10a1027dd21 +660, 0xdfd28f77c8cff968 +661, 0xc4fbaadf8a3e9c77 +662, 0xdac7e448b218c589 +663, 0xb26390b5befd19e2 +664, 0xd2ef14916c66dba9 +665, 0xfab600284b0ff86b +666, 0xf04a1c229b58dabb +667, 0xc21c45637e452476 +668, 0xd1435966f75e0791 +669, 0xc1f28522eda4a2d0 +670, 0x52332ae8f1222185 +671, 0x81c6c0790c0bf47e +672, 0xfebd215e7d8ffb86 +673, 0x68c5dce55dbe962b +674, 0x231d09cb0d2531d1 +675, 0x3218fba199dbbc6b +676, 0x8f23c535f8ea0bf6 +677, 0x6c228963e1df8bd9 +678, 0x9843c7722ed153e3 +679, 0xd032d99e419bddec +680, 0xe2dca88aa7814cab +681, 0x4d53fb8c6a59cdc2 +682, 0x8fb3abc46157b68b +683, 0xa3e733087e09b8e +684, 0x6bdc1aee029d6b96 +685, 0x4089667a8906d65b +686, 0x8f3026a52d39dd03 +687, 0x6d2e0ccb567bae84 +688, 0x74bad450199e464 +689, 0xf114fb68a8f300d5 +690, 0xc7a5cc7b374c7d10 +691, 0xf0e93da639b279d1 +692, 0xb9943841ad493166 +693, 0x77a69290455a3664 +694, 0x41530da2ebea054b +695, 0xe8f9fab03ea24abf +696, 0xaa931f0c9f55a57a +697, 0xb4d68a75d56f97ae +698, 0x3d58ff898b6ba297 +699, 0x49d81e08faf5a3f5 +700, 0xfc5207b9f3697f3b +701, 0xa25911abb3cf19b7 +702, 0x6b8908eb67c3a41 +703, 0xd63ef402e2e3fa33 +704, 0x728e75d3f33b14c5 +705, 0x248cb1b8bc6f379a +706, 0x3aa3d6d2b8c72996 +707, 0x49cc50bd2d3d2860 +708, 0xb4e1387647c72075 +709, 0x435a1630a4a81ed3 +710, 0xa5ea13005d2460cf +711, 0xc7a613df37d159ec +712, 0x95721ccc218b857e +713, 0xd4b70d8c86b124d3 +714, 0x2b82bcc4b612d494 +715, 0xaf13062885276050 +716, 0xcbd8fcf571a33d9c +717, 0x3f7f67ca1125fc15 +718, 0xddf4bb45aac81b4c +719, 0x23606da62de9c040 +720, 0xa3a172375666b636 +721, 0x292f87387a6c6c3c +722, 0xd1d10d00c5496fe1 +723, 0x86b0411ce8a25550 +724, 0x38e0487872e33976 +725, 0x363e49f88ddfd42c +726, 0x45bdf1e9f6b66b0a +727, 0x8a6fff3de394f9b5 +728, 0x8502158bb03f6209 +729, 0x22e24d16dba42907 +730, 0x3fe3ba427cc2b779 +731, 0x77144793f66b3d7e +732, 0xcf8912ccb29b8af9 +733, 0xdc856caff2abd670 +734, 0xe6d3ae0b0d9d4c8b +735, 0xb8f5d40e454c539f +736, 0x79ca953114fbc6b7 +737, 0x478d6f4bbfa38837 +738, 0x9babae1a3ffdc340 +739, 0x40edd56802bae613 +740, 0x97a56c2dcccf0641 +741, 0xafc250257f027f8e +742, 0x8da41ef1edf69125 +743, 0x6574b0280ff9d309 +744, 0x197c776151b8f820 +745, 0x6b03e077c9dac3b6 +746, 0x24a40ebbc5c341c5 +747, 0x50e585169a6a1c4b +748, 0x37783a5a6a3e4e02 +749, 0xb3de81ee6fbad647 +750, 0xf4f292f57ca4591e +751, 0x6214e9e7d44d30a +752, 0x5920190c56d21c12 +753, 0x9ac163419b5e0c9b +754, 0xfc2328761ae8ed93 +755, 0xc68f945b545508c6 +756, 0x687c49a17ce0a5e2 +757, 0x276d8f53d30d4ab4 +758, 0x8201804970343ce1 +759, 0x1b5d323cc2e7fb7e +760, 0x6f351ef04fd904b +761, 0x6c793a7d455d5198 +762, 0x46f5d108430ae91f +763, 0xac16a15b2a0cf77f +764, 0xa0d479d9e4122b9d +765, 0x3afd94604307f19 +766, 0x2573ed6d39d38dbf +767, 0xa58e14ba60b4294b +768, 0xe69c1aed5840d156 +769, 0x4cf6fda7f04855c2 +770, 0x2fb65a56ef5f22da +771, 0xf95819434d5dc220 +772, 0x29c65133623dafba +773, 0x8e997bd018467523 +774, 0xfd08ba9d498461a7 +775, 0xdd52243bc78a5592 +776, 0x39c30108f6db88b3 +777, 0x38af8e1894f259b9 +778, 0x97eedf3b4ae5f6de +779, 0x757825add80c5ece +780, 0xf0fdd90ac14edb14 +781, 0xbbb19d4cc8cac6d4 +782, 0x9a82234edfae05e3 +783, 0x704401c61d1edf1c +784, 0x8b0eb481fb3a1fb2 +785, 0xef6f36e7cc06c002 +786, 0x7a208b17e04b8cd7 +787, 0xf20e33d498838fe9 +788, 0xc2bdb22117058326 +789, 0x6ec31939eb4ca543 +790, 0x6f1654838f507a21 +791, 0xc65ab81a955d2b93 +792, 0x40b1420fdd9531b8 +793, 0xe31f221cab9f4f40 +794, 0x798cdd414c1deb7a +795, 0x9c84e9c7d41cd983 +796, 0x63d6b1ae3b60b7fa +797, 0xb42bfdd1a2f78ffa +798, 0x37e431eaccaaa8e9 +799, 0x7508142a0f73eac9 +800, 0x91662a023df5893a +801, 0x59782070e2fe3031 +802, 0xb2acd589a8ce7961 +803, 0xa224743fa877b292 +804, 0xaa5362aa27e6ed9e +805, 0xa394a4e520c0c1c7 +806, 0xe49b16d2018ffb6f +807, 0xb8074b9f2f1e762b +808, 0xcf5f86143d5c23a7 +809, 0xfd838785db987087 +810, 0x31b1889df389aff8 +811, 0x30aaca876a4383b +812, 0x1731bb71c4c38d4f +813, 0x9a83a65395e05458 +814, 0x99cd0c8d67c8f4fc +815, 0xfbd9fdc849b761a5 +816, 0x82c04834fc466889 +817, 0xdeef9d6e715e8c97 +818, 0x549c281c16da6078 +819, 0x2d70661254ad599d +820, 0x57995793a72acac +821, 0xf1727005116183ba +822, 0xa22bb38945285de3 +823, 0x4f2d687fe45131ff +824, 0x5666c87ddbbc981f +825, 0xbcb4b2d4e7a517d0 +826, 0x5e794dd2e20b785d +827, 0x449ad020149e093c +828, 0x7704ee0412d106f5 +829, 0x83cbdf257b072ac1 +830, 0xae5c4fc9f638b0da +831, 0x7b9e5a64e372ed47 +832, 0x7eddbbb22c2cdf57 +833, 0x3f19ebfa155b08e +834, 0x91d991154dfd7177 +835, 0x611ae74b952d387f +836, 0x3fdf7a335bda36ee +837, 0xdf182433fc7a7c05 +838, 0x62c78598d1f8db0a +839, 0xc3750c69d2c5c1f0 +840, 0xf1318024709efdee +841, 0xaa3fd360d224dc29 +842, 0x62af53b2f307c19 +843, 0xdf527683c58120c2 +844, 0x3281deecc496f93d +845, 0x4f704ad31527ef08 +846, 0x127a14a5e07cfdfc +847, 0x90d0b1f549255c92 +848, 0xbc3406b212c5e1fc +849, 0x4e89f39379dba91d +850, 0x1290ef43c4998e6e +851, 0xecfeb1a1cb1c6e1b +852, 0x2067e90403003bf1 +853, 0x38ae04be30bdbeba +854, 0x8a3537f298baedda +855, 0xd07f3b825cdb2936 +856, 0xea020b5aebae8b45 +857, 0xfcd614ab031132b0 +858, 0x5fb682a4ff2268f5 +859, 0xd1c4662ce65596f4 +860, 0x7026b8270dd0b8dc +861, 0x8101ec4b4beae45a +862, 0xa0e9dc87940610a6 +863, 0x83ec33679d83165b +864, 0x981847ca82e86d41 +865, 0xda84c188a304a0b7 +866, 0x3c37529c5a5bbbb8 +867, 0x34a8491ce3e19a5a +868, 0xd36ad716a2fa6cb8 +869, 0xfd1d1d6a5189a15c +870, 0x9716eb47851e8d8d +871, 0x7dfb13ea3b15c5aa +872, 0xbdf6e707f45113a5 +873, 0xb8118261b04bd097 +874, 0x6191f9895881bec6 +875, 0x7aac257ae11acf9b +876, 0x35a491e1537ff120 +877, 0xe078943432efa71c +878, 0xb3338485dd3dc2b9 +879, 0x456060975d2bb3b5 +880, 0xaddc4c451bdfc44c +881, 0x18bfa7beacf96430 +882, 0x8802ebcaf0f67498 +883, 0xad922a5a825bd780 +884, 0x9fb4587d748f4efa +885, 0xdb2a445136cd5e7 +886, 0xb98b3676ea8e96ac +887, 0xb02d8d244d784878 +888, 0xa1a8442b18860abb +889, 0x6a3029ba1361e5d1 +890, 0xf426d5fac161eb1 +891, 0xfa5ac2b87acecb23 +892, 0xaa659896e50535df +893, 0xf40dd7a3d3c5c8ed +894, 0x3f8367abecb705bc +895, 0x2d60e7525873358f +896, 0xc4a9d3948a0c3937 +897, 0x5ecc04fef6003909 +898, 0x7a865004918cba2 +899, 0x47ae110a678ec10b +900, 0xa0f02f629d91aa67 +901, 0x4848b99e7fac9347 +902, 0xaa858346d63b80ac +903, 0xeb5bf42ee161eeef +904, 0x4d35d723d3c6ba37 +905, 0xdf22ca6ca93b64a7 +906, 0x9d198520f97b25b1 +907, 0x3068415350778efe +908, 0xf3709f2e8793c2fe +909, 0xd1517bac8dd9f16f +910, 0xfb99bccaa15861dc +911, 0xa9ad607d796a2521 +912, 0x55d3793d36bd22e4 +913, 0xf99270d891ff7401 +914, 0x401750a5c4aa8238 +915, 0xd84b3003e6f28309 +916, 0x8a23798b5fa7c98b +917, 0xadd58bbc8f43e399 +918, 0xbd8c741ada62c6a8 +919, 0xbdc6937bc55b49fa +920, 0x4aefa82201b8502 +921, 0x17adf29a717b303 +922, 0xa6ed2197be168f6c +923, 0x1ba47543f4359a95 +924, 0xe34299949ac01ae9 +925, 0x711c76cffc9b62f3 +926, 0xbac259895508a4b7 +927, 0x3c8b3b3626b0d900 +928, 0x1a8d23fbe2ae71bf +929, 0xca984fa3b5a5c3a1 +930, 0xb1986ab7521a9c93 +931, 0xd6b5b2c8d47a75b5 +932, 0xc7f1c4a88afb4957 +933, 0xdeb58033a3acd6cc +934, 0xabe49ddfe1167e67 +935, 0x8d559c10205c06e3 +936, 0xea07a1a7de67a651 +937, 0xcbef60db15b6fef8 +938, 0xbfca142cff280e7 +939, 0x362693eba0732221 +940, 0x7463237e134db103 +941, 0x45574ddb5035e17a +942, 0xfc65e0cb9b94a1aa +943, 0x3154c55f1d86b36d +944, 0x2d93a96dd6ab2d8b +945, 0xbe3bc1d1f2542a25 +946, 0xdd4b541f7385bdaa +947, 0x3b56b919d914e3f8 +948, 0x82fd51468a21895f +949, 0x8988cf120731b916 +950, 0xa06a61db5fb93e32 +951, 0x6ed66c1b36f68623 +952, 0x875ae844d2f01c59 +953, 0x17ccd7ac912e5925 +954, 0x12fe2a66b8e40cb1 +955, 0xf843e5e3923ad791 +956, 0xa17560f2fd4ef48 +957, 0x27a2968191a8ee07 +958, 0xa9aab4d22ff44a3c +959, 0x63cd0dcc3bb083ae +960, 0x7a30b48c6160bf85 +961, 0x956160fb572503b3 +962, 0xc47f6b7546640257 +963, 0xaf4b625f7f49153 +964, 0x2f5c86a790e0c7e8 +965, 0xb52e0610ae07f0b8 +966, 0x38a589292c3d849e +967, 0xc3e9ef655d30b4ef +968, 0xb5695f765cda998a +969, 0xde5d5e692a028e91 +970, 0x839476721555f72e +971, 0x48b20679b17d9ebf +972, 0xe3d4c6b2c26fb0df +973, 0xce5a9834f0b4e71f +974, 0x533abb253d5d420e +975, 0x9eac5ad9aed34627 +976, 0xc0f2a01ab3c90dbb +977, 0x6528eda93f6a066c +978, 0xc16a1b625e467ade +979, 0x1a4a320fb5e8b098 +980, 0x8819cccd8b4ab32f +981, 0x42daa88531fd0bfd +982, 0xcf732226409be17c +983, 0xfddcdb25ccbf378c +984, 0x9b15b603bf589fc1 +985, 0x2436066b95d366fe +986, 0x8d42eff2e9cbda90 +987, 0x694b2fc8a4e8303c +988, 0x8e207f98aaea3ccd +989, 0x4730d7a620f822d9 +990, 0x468dc9ca30fe2fd4 +991, 0x74b36d8a1c0f031b +992, 0x3c1aac1c488c1a94 +993, 0x19d0101042444585 +994, 0x8ec50c56d0c8adf4 +995, 0x721ec629e4d66394 +996, 0x3ca5ad93abeac4a4 +997, 0xaaebc76e71592623 +998, 0x969cc319e3ed6058 +999, 0xc0a277e3b2bfc3de diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/philox-testset-1.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/philox-testset-1.csv new file mode 100644 index 0000000000000000000000000000000000000000..d27a17c4abff9d2cec2b82075b09f9f068dc6f1e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/philox-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xedc95200e2bd66a5 +1, 0x581d4e43b7682352 +2, 0x4be7278f5e373eab +3, 0xee47f17991a9e7ea +4, 0x38a7d2ae422f2e2c +5, 0xe2a6730a3b4a8a15 +6, 0x1588b7a841486442 +7, 0x13ad777246700504 +8, 0x14d157e0f5e18204 +9, 0xd87c22a7ee8c13f1 +10, 0x30cc389ce3542ba1 +11, 0xb8a53348955bb2e9 +12, 0xc08802e3c454f74f +13, 0xb444f627671a5780 +14, 0x4b6dd42b29cbf567 +15, 0x6109c7dc0bc5f7d5 +16, 0x85c954715d6b5b1e +17, 0x646178d3d9a3a5d5 +18, 0xebbde42b1cd83465 +19, 0x3d015102f6bc9c1a +20, 0x720fe2ec3798d5fd +21, 0x93120961289ceb2e +22, 0xc9207e960a56fae2 +23, 0xa7f042f31d991b98 +24, 0x5fac117415fae74b +25, 0xd0a970ba8dddc287 +26, 0x84b4e7e51b43106 +27, 0x6ad02bf525ea265f +28, 0xcdc7e5992b36ef8f +29, 0x44d4985209261d60 +30, 0x628c02d50f4b902e +31, 0xc7b1914922d1e76d +32, 0xfde99ff895cba51d +33, 0x175a0be050fa985f +34, 0x47297d3699e03228 +35, 0xccf1e9aeaa3339cd +36, 0x9fdd18ebeeaf15b1 +37, 0x7c94c9ab68747011 +38, 0x612d8ef22c1fa80f +39, 0x13f52b860de89ab5 +40, 0x81f264b8c139c43b +41, 0x8d017ba4ef1e85ba +42, 0x6d0556f46219951e +43, 0x8ee7b85663cf67b6 +44, 0x2432fc707645fe67 +45, 0xaf814046051e5941 +46, 0x4d432a83739ac76f +47, 0x59e5060d0983ccdd +48, 0xdd20e828b83d9b53 +49, 0x1b891800d7385f4c +50, 0x10e86a026c52ff5e +51, 0xb932f11723f7b90c +52, 0xb2413d0a1f3582d0 +53, 0xe7cd4edda65fc6b5 +54, 0x6d3808848d56593b +55, 0x192a727c3c7f47d9 +56, 0x9659d8aea5db8c16 +57, 0x4242c79fe2c77c16 +58, 0x605f90c913827cea +59, 0x53e153c8bfc2138a +60, 0xed2158fbdef5910e +61, 0xae9e6e29d4cb5060 +62, 0x7dd51afaad3b11ce +63, 0x2b9ba533d01a5453 +64, 0x7e0e9cf2b6c72c8 +65, 0x1cc8b3c7747ed147 +66, 0x9b102651e2e11b48 +67, 0x30b0b53cbaac33ea +68, 0x70c28aec39b99b85 +69, 0x5f1417ff536fdb75 +70, 0x3a1d91abd53acf58 +71, 0xba116a1772168259 +72, 0xf5369bc9bd284151 +73, 0x67bf11373bf183ca +74, 0xef0b2d44dbd33dc7 +75, 0xbfd567ee1a2953ed +76, 0x7d373f2579b5e5c6 +77, 0x756eeae7bcdd99be +78, 0x75f16eb9faa56f3b +79, 0x96d55ded2b54b9a5 +80, 0x94495191db692c24 +81, 0x32358bdd56bab38c +82, 0x3f6b64078576579 +83, 0x7177e7948bc064c9 +84, 0x2cbf23f09ba9bc91 +85, 0x9b97cc31c26645f5 +86, 0x5af2d239ff9028b1 +87, 0x316fa920e0332abe +88, 0x46535b7d1cae10a0 +89, 0x21f0a6869298022c +90, 0xf395c623b12deb14 +91, 0x8573995180675aa7 +92, 0xc3076509f4dc42d5 +93, 0x15e11e49760c6066 +94, 0xe8a6d311e67a021d +95, 0x7482f389c883339b +96, 0xda6f881573cba403 +97, 0xb110ffb847e42f07 +98, 0x2c3393140605ccf9 +99, 0xba1c8ba37d8bdc33 +100, 0x59adf43db7a86fe0 +101, 0xb4fcbf6aa585ca85 +102, 0xd794a93c18033fa6 +103, 0x6e839c01985f9d4 +104, 0x64065bf28222b2c7 +105, 0x6a6359b293fa0640 +106, 0x5ff610969e383e44 +107, 0xa8172c263f05c7f7 +108, 0x62a0172e8bd75d07 +109, 0x7be66e3c453b65ac +110, 0x6a3b8d5a14014292 +111, 0xa2583e6087450020 +112, 0xd5d3ecc480c627d2 +113, 0xa24e83f1eec8a27c +114, 0xa23febd2a99ee75a +115, 0x9a5fbf91c7310366 +116, 0x5b63156932e039b +117, 0x942af3c569908505 +118, 0x89a850f71ab6a912 +119, 0xfeadc803ac132fe9 +120, 0x67bf60e758250f3 +121, 0x533c25103466a697 +122, 0xb7deede3482f9769 +123, 0x325e043b53bba915 +124, 0x9e8d9e7fde132006 +125, 0x6bacc6860bbc436e +126, 0xb3ea0534c42b1c53 +127, 0xb2389334db583172 +128, 0xa74b1bfbf5242ee4 +129, 0x53a487e2dc51d15c +130, 0xe5a3b538d2c7a82e +131, 0x7b6c70bb0c4cadaf +132, 0xae20791b2081df1 +133, 0xc685c12e3c61d32c +134, 0x60110e6b0286e882 +135, 0x49682119c774045c +136, 0x53dc11a3bbd072e +137, 0xbdc87c6e732d9c2d +138, 0xcc4620861ebac8fd +139, 0x7e9c3558759350cc +140, 0x157408dee34891ba +141, 0x9bcad1855b80651b +142, 0xd81b29141d636908 +143, 0x1ed041a9f319c69d +144, 0x805b2f541208b490 +145, 0x484ef3bba2eb7c66 +146, 0xb6b5e37d50a99691 +147, 0xabc26a7d9e97e85f +148, 0xcba2a3cce0417c2f +149, 0xa030dfffd701993c +150, 0x2bf2dc50582ebf33 +151, 0xd9df13dd3eb9993e +152, 0x31ca28b757232ae5 +153, 0x614562a0ccf37263 +154, 0x44d635b01725afbb +155, 0x5ae230bc9ca9cd +156, 0xb23a124eb98705c6 +157, 0x6395675444981b11 +158, 0xd97314c34119f9ca +159, 0x9de61048327dd980 +160, 0x16bac6bded819707 +161, 0xcea3700e3e84b8c7 +162, 0xaa96955e2ee9c408 +163, 0x95361dcc93b5bc99 +164, 0x306921aed3713287 +165, 0x4df87f3130cd302a +166, 0x37c451daeb6a4af5 +167, 0x8dbbe35f911d5cc1 +168, 0x518157ce61cb10f9 +169, 0x669f577aebc7b35b +170, 0x4b0a5824a8786040 +171, 0x519bc3528de379f5 +172, 0x6128012516b54e02 +173, 0x98e4f165e5e6a6dd +174, 0x6404d03618a9b882 +175, 0x15b6aeb3d9cd8dc5 +176, 0x87ed2c1bae83c35b +177, 0x8377fc0252d41278 +178, 0x843f89d257a9ba02 +179, 0xcdda696ea95d0180 +180, 0xcfc4b23a50a89def +181, 0xf37fd270d5e29902 +182, 0xafe14418f76b7efa +183, 0xf984b81577076842 +184, 0xe8c60649ccb5458d +185, 0x3b7be8e50f8ff27b +186, 0xaa7506f25cef1464 +187, 0x5e513da59f106688 +188, 0x3c585e1f21a90d91 +189, 0x1df0e2075af292a +190, 0x29fdd36d4f72795f +191, 0xb162fe6c24cb4741 +192, 0x45073a8c02bd12c4 +193, 0xcbaaa395c2106f34 +194, 0x5db3c4c6011bc21c +195, 0x1b02aac4f752e377 +196, 0xa2dfb583eb7bec5 +197, 0xfe1d728805d34bb1 +198, 0xf647fb78bb4601ec +199, 0xd17be06f0d1f51ef +200, 0x39ec97c26e3d18a0 +201, 0xb7117c6037e142c8 +202, 0xe3a6ce6e6c71a028 +203, 0xe70a265e5db90bb2 +204, 0x24da4480530def1e +205, 0xfd82b28ce11d9a90 +206, 0x5bf61ead55074a1d +207, 0xbe9899c61dec480d +208, 0xae7d66d21e51ec9e +209, 0x384ee62c26a08419 +210, 0x6648dccb7c2f4abf +211, 0xc72aa0c2c708bdc9 +212, 0x205c5946b2b5ba71 +213, 0xd4d8d0b01890a812 +214, 0x56f185493625378d +215, 0x92f8072c81d39bd0 +216, 0xa60b3ceecb3e4979 +217, 0xfcf41d88b63b5896 +218, 0xf5a49aa845c14003 +219, 0xffcc7e99eee1e705 +220, 0xdd98312a7a43b32d +221, 0xa6339bd7730b004 +222, 0xdac7874ba7e30386 +223, 0xadf6f0b0d321c8 +224, 0x126a173ae4ffa39f +225, 0x5c854b137385c1e7 +226, 0x8173d471b1e69c00 +227, 0x23fa34de43581e27 +228, 0x343b373aef4507b1 +229, 0xa482d262b4ea919c +230, 0xf7fbef1b6f7fbba +231, 0xd8ce559487976613 +232, 0xbf3c8dd1e6ebc654 +233, 0xda41ed375451e988 +234, 0xf54906371fd4b9b3 +235, 0x5b6bb41231a04230 +236, 0x866d816482b29c17 +237, 0x11315b96941f27dc +238, 0xff95c79205c47d50 +239, 0x19c4fff96fbdac98 +240, 0xbfb1ae6e4131d0f4 +241, 0x9d20923f3cdb82c9 +242, 0x282175507c865dff +243, 0xdfd5e58a40fe29be +244, 0xedbd906ff40c8e4f +245, 0x11b04fc82614ccb3 +246, 0xeceb8afda76ae49f +247, 0xa4856913847c2cdf +248, 0x6f1425f15a627f2a +249, 0xdf144ffedf60349e +250, 0x392d7ecfd77cc65f +251, 0x72b8e2531049b2c6 +252, 0x5a7eb2bdb0ec9529 +253, 0xdcfd4306443e78c1 +254, 0x89ad67ed86cd7583 +255, 0x276b06c0779a6c8f +256, 0xb2dbb723196a0ac3 +257, 0x66c86a3b65906016 +258, 0x938348768a730b47 +259, 0x5f5282de938d1a96 +260, 0xa4d4588c4b473b1f +261, 0x8daed5962be4796f +262, 0x9dde8d796985a56e +263, 0x46be06dbd9ed9543 +264, 0xdf98286ceb9c5955 +265, 0xa1da1f52d7a7ca2b +266, 0x5a7f1449f24bbd62 +267, 0x3aedc4e324e525fd +268, 0xced62464cd0154e1 +269, 0x148fc035e7d88ce3 +270, 0x82f8878948f40d4c +271, 0x4c04d9cdd6135c17 +272, 0xdf046948d86b3b93 +273, 0x2f0dec84f403fe40 +274, 0xa61954fb71e63c0d +275, 0x616d8496f00382e8 +276, 0x162c622472746e27 +277, 0x43bcfe48731d2ceb +278, 0xff22432f9ff16d85 +279, 0xc033ed32bb0ad5a4 +280, 0x5d3717cc91c0ce09 +281, 0x7a39a4852d251075 +282, 0x61cd73d71d6e6a6 +283, 0xe37e2ea4783ab1a5 +284, 0x60e1882162579ea8 +285, 0x9258ec33f1a88e00 +286, 0x24b32acf029f0407 +287, 0x1410fc9aea6d3fac +288, 0x6054cf2a3c71d8f7 +289, 0x82f7605157a66183 +290, 0x3b34c1c0dff9eac5 +291, 0xfebe01b6d5c61819 +292, 0x7372187c68b777f2 +293, 0xc6923812cda479f0 +294, 0x386613be41b45156 +295, 0x92cfebe8cc4014b +296, 0x8e13c4595849828b +297, 0x90e47390d412291f +298, 0x6b21a1d93d285138 +299, 0xbf5b1f5922f04b12 +300, 0x21e65d1643b3cb69 +301, 0xf7683b131948ac3c +302, 0xe5d99fc926196ed2 +303, 0x7b138debbec90116 +304, 0x8a2650a75c2c2a5c +305, 0x20689a768f9b347b +306, 0xdfa2900cfb72dc6e +307, 0x98959c3855611cc2 +308, 0x5fdb71b89596cc7c +309, 0x1c14ac5c49568c7b +310, 0x958c4293016091fe +311, 0x7484522eb0087243 +312, 0xc4018dfb34fc190f +313, 0xca638567e9888860 +314, 0x102cd4805f0c0e89 +315, 0xcc3bc438e04548f8 +316, 0xb808944bb56ea5be +317, 0xffd4778dbf945c57 +318, 0xfe42617784c0233b +319, 0x3eccbfeae9b42d3c +320, 0xd9f1b585fd0bfa60 +321, 0x5c063d1b2705d5dd +322, 0x8e8bec3519941b64 +323, 0x9e94c36cbec2a42 +324, 0x1cd19f5b64ffd3ad +325, 0x9632e3aebfc68e66 +326, 0x98960c2d9da4ae45 +327, 0xb76994b1f2bbfc1f +328, 0xca184a737d3971cc +329, 0x964d31b07183adfb +330, 0xe9e0ff351cd276d4 +331, 0xb5747c860b05bbe4 +332, 0x5549ddc3bd3862e2 +333, 0x495496677b27873b +334, 0x53910baa26e3ea18 +335, 0xaa07a07ad0a688d3 +336, 0xbb43bd1f09ecdb1e +337, 0xe2ebc105699dd84 +338, 0x6e815a2729584035 +339, 0x2caab1713b17948a +340, 0x43d39d209fa41c90 +341, 0xfe3e71089d5d1c3a +342, 0xa778646c32f81177 +343, 0x8d42bfb86e6e92d5 +344, 0x175571f70b4fcfbe +345, 0x2a66a6fe10dc3b5b +346, 0xd9545e85235ca709 +347, 0x5642781c77ced48a +348, 0x24facc40b72ccd09 +349, 0xa800fbacce33f6f8 +350, 0x675f58a0ff19fba +351, 0x35aedf57bb5cde1b +352, 0xe5535a6b63f6d068 +353, 0x84dffd0102aaa85d +354, 0x621faad65467aaa7 +355, 0x596ad85b556b112f +356, 0x837545fff8894c7a +357, 0x3d9a4ae1356bc6a6 +358, 0xcd8b7153205d4ad0 +359, 0x98afdd40f1ed09a6 +360, 0xa38b2dc55a5cf87f +361, 0x484aecce2b6838bc +362, 0x6af05c26bdab18d9 +363, 0xf418b7399dcf2e4b +364, 0x1cfa38789b0d2445 +365, 0xfbed23c34166ee67 +366, 0x38e6820039e4912a +367, 0x1fe94911e963591e +368, 0x1291c79aee29ad70 +369, 0x65eccfc89506f963 +370, 0x7d14de3b2f55b1f6 +371, 0x82eb79c36cd2a739 +372, 0x41ffe3b75ea0def5 +373, 0x9eba9156470a51d9 +374, 0xd17c00b981db37d1 +375, 0xf688769a75601aa7 +376, 0xbcf738e9e03d571e +377, 0x14712e56df8f919b +378, 0xab14e227d156e310 +379, 0xf53d193e993e351e +380, 0x857fae46bd312141 +381, 0xc2dd71e41b639966 +382, 0x74f8b987a3d00ad1 +383, 0x5bce8526dc527981 +384, 0x94910926c172a379 +385, 0x503c45557688a9d5 +386, 0x244d03834e05807f +387, 0x6e014cbab9c7a31f +388, 0xae544c638530facf +389, 0x9b853aaaf9cbc22d +390, 0xfb42ab7024d060ed +391, 0x74cc3fba0dfd7ff2 +392, 0x24ec9e8f62144ad5 +393, 0x72f082954307bbe7 +394, 0x36feda21bbf67577 +395, 0x3222191611b832f1 +396, 0xd0584e81bcac8b0b +397, 0xdce8d793ef75e771 +398, 0x978824c6c2578fc +399, 0x6e8f77503b3c2ee4 +400, 0xc85d2d86fecf5d03 +401, 0x3d35b4a5d4d723c4 +402, 0xd3987dfd4727fff3 +403, 0xd3cde63fb6a31add +404, 0xf6699e86165bdaeb +405, 0x9d60ba158ec364c4 +406, 0x920c3c18b346bfc9 +407, 0x770fd1fdfbc236ca +408, 0x45998cfc5fc12ddd +409, 0xd74a3454e888834b +410, 0xbf2aa68081a4a28f +411, 0xea41b26a6f1da1b3 +412, 0x5560a2d24b9d5903 +413, 0xe3791f652a228d8b +414, 0x365116d3b5a8520c +415, 0xb1b2bd46528f8969 +416, 0xfcfe14943ef16ae7 +417, 0xf4d43425e8a535dc +418, 0xe6cf10a78782a7e0 +419, 0x9c7ac0de46556e3e +420, 0xc667ae0856eed9ef +421, 0x47dbb532e16f9c7e +422, 0xdf4785a5d89ee82e +423, 0xbd014925ce79dbcf +424, 0xea0d663fb58fa5be +425, 0x51af07d5cc3821fb +426, 0x27a1bdcdc4159a9d +427, 0x520c986c59b1e140 +428, 0x50b73fd9bacd5b39 +429, 0xae5240641f51e4f3 +430, 0x71faecc164ed9681 +431, 0xda95aa35529a7ee +432, 0xe25ba29b853c1c6d +433, 0x9871a925cda53735 +434, 0xde481ad8540e114d +435, 0xa2997f540e8abca0 +436, 0xc9683c5035e28185 +437, 0x1082471b57182bac +438, 0xbd3ecf0f0b788988 +439, 0xf479760776fbb342 +440, 0x3730929200d91f44 +441, 0xc1762d79ae72809c +442, 0xfaa0a4c7b1686cb3 +443, 0xd581e6d55afdafcd +444, 0x6cf57bdfba2dcf6d +445, 0xdef79d9fe6a5bcef +446, 0x13ed376e18132bd3 +447, 0xbe67efd72defa2a +448, 0x5acc176c468966ea +449, 0x8b35b626af139187 +450, 0x446de3fac0d973ac +451, 0xe1d49e06dc890317 +452, 0x817bc3fd21fc09b7 +453, 0xb71c3958a13d5579 +454, 0x8746e010f73d7148 +455, 0x1b61c06009922e83 +456, 0xba17e62e6b092316 +457, 0x1375fa23c4db8290 +458, 0x3f071230f51245a6 +459, 0x51c99a086a61cd13 +460, 0x5f0f2ae78589e1fd +461, 0x604834e114bbbc27 +462, 0x5eb2a7a34814e9a9 +463, 0x77a6907f386bf11e +464, 0x99525de2bd407eeb +465, 0xb818348c57b3b98f +466, 0x25f5f9e702fbe78d +467, 0x8f66669e6f884473 +468, 0x1e47d46e2af4f919 +469, 0xf6a19df846476833 +470, 0xff00c67bcd06621f +471, 0xe3dfe069795d72d8 +472, 0x8affc88b2fea4d73 +473, 0x66df747e5f827168 +474, 0xf368ec338d898a0e +475, 0x9e1f1a739c5984a2 +476, 0x46a1c90e1ca32cbc +477, 0xc261bc305ed8d762 +478, 0x754d7949f7da9e72 +479, 0x4c8fbbb14ef47b17 +480, 0xccbdc67a3848d80d +481, 0x3c25e6f58bae751d +482, 0x7078b163b936d9b6 +483, 0x440e27463c134ecf +484, 0x6c83ee39f324db0f +485, 0x27cf901b22aea535 +486, 0x57262dec79a3f366 +487, 0x91db09f1dbb524fb +488, 0xd7436eefba865df2 +489, 0x16c86b0a275a3f43 +490, 0x689493e6681deaa9 +491, 0x7e1dc536c1a9ac42 +492, 0x1145beac3ac7f5cc +493, 0x3d05e211a104b2b0 +494, 0x4f9e77ced3c52f44 +495, 0x53de1369354add72 +496, 0x1fb60f835f47cdeb +497, 0x6ab36f089e40c106 +498, 0xaabffcb0d3d04c7 +499, 0xaa399686d921bd25 +500, 0x2bf8dd8b6d6fa7f0 +501, 0x1ddbf4e124329613 +502, 0x466a740241466a72 +503, 0x98d7381eb68a761 +504, 0x817691510bc4857a +505, 0x8837622c0171fe33 +506, 0xcba078873179ee16 +507, 0x13adad1ab7b75af4 +508, 0x3bac3f502428840c +509, 0xbeb3cce138de9a91 +510, 0x30ef556e40b5f0b4 +511, 0x19c22abdf3bbb108 +512, 0x977e66ea4ddc7cf +513, 0x9f4a505f223d3bf3 +514, 0x6bc3f42ac79ec87b +515, 0x31e77712158d6c23 +516, 0x6d8de4295a28af0d +517, 0xee1807dbda72adb7 +518, 0xda54140179cd038f +519, 0x715aa5cdac38e062 +520, 0x5a7e55e99a22fa16 +521, 0xf190c36aa8edbe4f +522, 0xccadd93a82c1d044 +523, 0x7070e6d5012c3f15 +524, 0x50a83341a26c1ba5 +525, 0x11bca7cc634142e5 +526, 0x623a0d27867d8b04 +527, 0x75c18acff54fbf6e +528, 0x455ae7d933497a6f +529, 0xf624cf27d030c3d3 +530, 0x7a852716f8758bac +531, 0xe7a497ac1fa2b5b4 +532, 0xf84f097498f57562 +533, 0xc4bb392f87f65943 +534, 0x618e79a5d499fbfb +535, 0xb3c0b61d82b48b8 +536, 0x4750a10815c78ea7 +537, 0x9cf09cca3ddece69 +538, 0x2a69f1c94cc901a2 +539, 0x347a0e446e1ce86d +540, 0xb06f3a5a5ab37bb1 +541, 0x8035bd0713d591db +542, 0x539c9637042c3a1f +543, 0xd7ba4dc6b273cbd7 +544, 0x12f3f99933444f85 +545, 0x4a9517b9783fb9a4 +546, 0x6422b2ea95093bc5 +547, 0x3a5ecff0f996c2a6 +548, 0x31de504efc76a723 +549, 0x7ccb7c5233c21a9f +550, 0xc687d9e6ce4186e8 +551, 0x6e40769d6940376a +552, 0xf51207314f1f7528 +553, 0x67ee3acb190865e3 +554, 0xe08d586270588761 +555, 0xe387fa489af1a75c +556, 0x73414a52d29d8375 +557, 0x671a38191cf2a357 +558, 0xe00fb25b1aa54008 +559, 0x11a0610e22cf549b +560, 0xc90cc865d57c75be +561, 0x90d0863cc15f2b79 +562, 0x8b3e60d32ebcb856 +563, 0xb28cc55af621e04a +564, 0xcf60bd3cb2a5ab1d +565, 0x212cb5d421948f86 +566, 0xee297b96e0a3363f +567, 0x4e9392ff998760d1 +568, 0x61940c8d0105ba3e +569, 0x14ebcbae72a59a16 +570, 0xdf0f39a3d10c02af +571, 0xfc047b2b3c1c549d +572, 0x91718b5b98e3b286 +573, 0x9ea9539b1547d326 +574, 0x7a5a624a89a165e6 +575, 0x145b37dcaa8c4166 +576, 0x63814bbb90e5616c +577, 0xc4bc3ca6c38bb739 +578, 0x853c3a61ddc6626c +579, 0xa7ce8481c433829a +580, 0x8aff426941cc07b +581, 0x2dc3347ca68d8b95 +582, 0xce69f44f349e9917 +583, 0x2fa5cb8aca009b11 +584, 0xf26bb012115d9aca +585, 0xafa01c2f2d27235a +586, 0xabcba21f1b40305e +587, 0xfec20c896c0c1128 +588, 0xc5f7a71ebacadfa0 +589, 0xc8479ad14bab4eef +590, 0xad86ec9a3e7d3dc +591, 0xbbecd65292b915c5 +592, 0xb1f9e28149e67446 +593, 0x708d081c03dad352 +594, 0xaa8a84dbd1de916c +595, 0x9aa3efb29ba9480b +596, 0xd3c63969ff11443e +597, 0x1e9e9ac861315919 +598, 0x4fe227f91e66b41d +599, 0xefc0212d43d253ab +600, 0x98341437727c42d1 +601, 0x5ea85c0fe9008adc +602, 0x7891b15faa808613 +603, 0x32db2d63989aacfd +604, 0xc92f7f28e88fd7bc +605, 0x3513545eb6549475 +606, 0x49abe0082906fbf8 +607, 0xcee1e1a6551e729c +608, 0x38556672b592a28e +609, 0xc3e61409c4ec2d45 +610, 0x96c67ce2995a0fd4 +611, 0x9b9b0cada870293 +612, 0x82d6dd5dada48037 +613, 0xeea4f415299f1706 +614, 0x371107895f152ab3 +615, 0x2f6686159f4396bb +616, 0x61005a2ff3680089 +617, 0x9d2f2cafb595e6b6 +618, 0x4a812a920f011672 +619, 0x317554d3a77385d7 +620, 0x24c01086727eb74b +621, 0xa15ff76d618a3a9e +622, 0x2121bfd983859940 +623, 0x384d11577eea8114 +624, 0xab0f4299f3c44d88 +625, 0x136fd4b07cfa14d9 +626, 0x665fe45cbfaa972a +627, 0x76c5a23398a314e9 +628, 0x5507036357ccda98 +629, 0xd9b8c5ac9dce632b +630, 0x366bc71781da6e27 +631, 0xdd2b2ba1d6be6d15 +632, 0xf33ed0d50ea6f1a6 +633, 0xf05a9b1900174c18 +634, 0x3947e1419e2787cf +635, 0x6c742b1e029637d0 +636, 0x32aba12196a0d2e8 +637, 0x1b94aab2e82e7df +638, 0x68b617db19229d6 +639, 0x6c88a95ac0a33f98 +640, 0xdc9b95fd60c2d23e +641, 0x999e6971d3afc8b3 +642, 0x7071fc6ad8b60129 +643, 0x41a8184ef62485f6 +644, 0xb68e0605c7d5e713 +645, 0x272b961a1d1bbee +646, 0x23f04e76446187b0 +647, 0x999a7a8f6d33f260 +648, 0xdbd6318df4f168d +649, 0x8f5e74c84c40711e +650, 0x8ccc6b04393a19d6 +651, 0xadcd24b782dd8d3d +652, 0x1a966b4f80ef9499 +653, 0xcb6d4f9ff5a280f0 +654, 0x8095ff2b8484018a +655, 0xbfd3389611b8e771 +656, 0x278eb670b7d12d51 +657, 0x31df54ca8d65c20f +658, 0x121c7fb38af6985e +659, 0x84fb94f38fe1d0a +660, 0x15ae8af1a6d48f02 +661, 0x8d51e4a62cba1a28 +662, 0x58e6b6b3ae0f9e42 +663, 0x9365a0a85669cc99 +664, 0xe56e92f65a2106df +665, 0x68fa299c66b428fc +666, 0x55e51bb0b0a832c6 +667, 0x48b565293f9bc494 +668, 0x73d8132b1cbabb57 +669, 0x9178ac3926c36cbc +670, 0xe2f22c7b28ea5e0f +671, 0x6af45322a99afb12 +672, 0x59072fcb486a46f4 +673, 0x166b717b08d3d8e +674, 0xd4e627a2dfacc4ab +675, 0x33dad6f2921dedaa +676, 0x4b13b806834a6704 +677, 0xe5f7971b398ed54d +678, 0x20bfae65e3e6899b +679, 0x881dab45d2b4fc98 +680, 0x6f248126b5b885be +681, 0x7aeb39e986f9deee +682, 0xf819f9574b8c3a03 +683, 0xff3d93ed6bd9781a +684, 0x3a31e2e24a2f6385 +685, 0x7888a88f8944a5e +686, 0x4faee12f5de95537 +687, 0x7f3e4efccdb2ed67 +688, 0x91e0f2fc12593af5 +689, 0xb5be8a4b886a40d3 +690, 0x998e8288ac3a9b1b +691, 0x85c48fc8b1349e7b +692, 0xf03af25222d8fae5 +693, 0x45467e805b242c2e +694, 0xa2350db793dbebdc +695, 0xfebe5b61d2174553 +696, 0xa9a331f02c54ad0b +697, 0xe94e49a0f905aef3 +698, 0xe54b4c812b55e3da +699, 0xdc454114c6bc0278 +700, 0x99c7765ab476baa2 +701, 0xccd9590e47fdff7c +702, 0xfa2bcae7afd6cb71 +703, 0x2c1bf1a433a6f0f7 +704, 0x53882c62ff0aab28 +705, 0x80ac900f844dacc +706, 0x27ba8eb5c4a44d54 +707, 0x78f3dfb072a46004 +708, 0x34e00e6ec629edce +709, 0x5b88d19b552d1fbd +710, 0xe4df375dc79df432 +711, 0x37446312ff79c3b4 +712, 0xb72256900a95fa6d +713, 0x89f3171fbdff0bfc +714, 0xd37885b048687eba +715, 0xbb033213b283b60e +716, 0xcf10b523ee769030 +717, 0xbf8070b6cfd7bafb +718, 0xb7194da81fd1763b +719, 0xbfc303de88e68d24 +720, 0xb949c7a5aea8a072 +721, 0x844216e7bae90455 +722, 0xf1e7f20840049a33 +723, 0x96e3263ad0cae794 +724, 0x10772d51f6e9ba49 +725, 0xcea24fccae9d23b3 +726, 0xefd378add9dde040 +727, 0xba0c7c5275805976 +728, 0x2e2a04608f64fa8c +729, 0xafb42ec43aa0fa7 +730, 0x30444b84241ac465 +731, 0x19ef384bac4493ab +732, 0xfd1ac615d3ba5ab9 +733, 0x6cc781ba38643aff +734, 0x30ff27ebed875cfd +735, 0xee1a261aca97ae62 +736, 0xc5a92715202bc940 +737, 0x9e6ec76f93c657ff +738, 0x9b9fd55f55191ca5 +739, 0x654b13af008d8f03 +740, 0x1b7f030d9bd0719f +741, 0x6d622e277550cb7f +742, 0x3f8ee6b8830d0538 +743, 0x475462bcd0de190f +744, 0x21380e8a513bdbcd +745, 0x629bf3771b1bd7a4 +746, 0x3b5fd0b62c353709 +747, 0xf95634006ec3867e +748, 0x1be8bb584a6653c2 +749, 0x2e2d3cfa85320ce8 +750, 0x5b904b692252d11d +751, 0x4bfd76631d527990 +752, 0xc019571ca2bec4a0 +753, 0xf2eb730cea4cd751 +754, 0xd4571d709530191a +755, 0x3b5bd947061f5a7d +756, 0x56e2322cd2d1d1c0 +757, 0xa8830a5f62019f83 +758, 0x901d130c1b873cf3 +759, 0xb5dd29b363c61299 +760, 0xbb710bec3a17b26d +761, 0xc0c464daca0f2328 +762, 0x4dc8055df02650f5 +763, 0x3d3cd9bbe8b957af +764, 0xdb79612c2635b828 +765, 0xe25b3a8ad8fa3040 +766, 0xd5875c563cbf236b +767, 0x46861c1c3849c9bc +768, 0xf84bf1a2814dff43 +769, 0x6d8103902e0ad5e6 +770, 0x99f51c9be8af79e5 +771, 0xb0bfa8540ff94a96 +772, 0xaf45109a4e06f7d0 +773, 0x281df3e55aea9bfc +774, 0x6a1155ca8aa40e60 +775, 0x754d32c5de1f5da +776, 0xce1eafb1c6ca916f +777, 0xc4f2185fa8577bd1 +778, 0x4a188e9bdb5501d9 +779, 0xbb14107e99bd5550 +780, 0xf0381d8425ec2962 +781, 0x213dbfffc16ec4f6 +782, 0x7a999c5a28ea65bc +783, 0x23758c2aba7709ff +784, 0xea7e4bb205e93b44 +785, 0x9c5a31e53911c658 +786, 0x7f04d0bbdc689ddc +787, 0xe3ed89ab8d78dcb3 +788, 0x73c38bfb43986210 +789, 0x740c7d787eb8e158 +790, 0x5284fafdfb3fb9ec +791, 0x2e91a58ac1fb1409 +792, 0xb94a600bf0a09af3 +793, 0x533ea4dbe07d81dd +794, 0x48c3f1a736b3c5fd +795, 0x56ae3499fa8720ce +796, 0x526f2def663ca818 +797, 0x2f085759c65665c4 +798, 0xf715f042c69e0db4 +799, 0x110889c399231e60 +800, 0x64584a244866f3a0 +801, 0xf02ec101a39405d3 +802, 0xe73cd5e9a7f17283 +803, 0xfea64869e7028234 +804, 0x97559974ad877891 +805, 0xc8695aba1dc9f2e5 +806, 0x7b62b76ffc2264ec +807, 0xf5e1df172ec5ccd +808, 0xafaeb68765e443bd +809, 0xd3870eb2e8337623 +810, 0x4f944d684138fb39 +811, 0x6977c575038916ad +812, 0x8ada1a225df95a56 +813, 0xe4044c6c58d15e54 +814, 0x4e5121366681cf2 +815, 0xcf8640b079357b0d +816, 0xcd5b157d44106fa3 +817, 0x9d7a5481279e25a1 +818, 0xe10e9db41fb4b34f +819, 0x1052607be1eadff9 +820, 0x3403d67232fe2265 +821, 0xac9358f498c34afc +822, 0x820172da0dc39c9 +823, 0xe186e91a3b826b6a +824, 0x1a838e2a40284445 +825, 0x1870b617ebd7bce6 +826, 0xcb7cba4424be1ed7 +827, 0x6a2e56e40fdf9041 +828, 0xace93bbe108f97ee +829, 0xfeb9bc74ac41ca08 +830, 0x8cb2d05b0f6a1f51 +831, 0x73792309f3fac0a9 +832, 0x2507343d431308ca +833, 0xd0ea1197be615412 +834, 0xb1870812f1d2fa94 +835, 0x6d067b6935dcd23e +836, 0xaf161014e5492c31 +837, 0xd4be0dce97064be4 +838, 0xf8edfe3fc75c20f1 +839, 0x894751dc442d2d9c +840, 0xb4a95f6a6663456c +841, 0x74e93162e2d805db +842, 0x784bc5f3a7a2f645 +843, 0xd234d7c5b0582ea9 +844, 0x491f28d0ab6cb97c +845, 0xa79419e5cf4336c3 +846, 0x66b00141978c849 +847, 0xa7ddbd64698d563f +848, 0xefc33a4a5d97d4b2 +849, 0x95075514a65aebdc +850, 0x40eca5b3e28cd25e +851, 0x90ec7d00e9c9e35d +852, 0x63e84104d5af417a +853, 0xdaca0ea32df5744 +854, 0x7ed54f2587795881 +855, 0x5a73931760af4ee0 +856, 0x857d1a185a3081ec +857, 0x6eac2aabe67fb463 +858, 0xd1f86155d8bfc55f +859, 0x6d56398f3e7877ef +860, 0x7642f61dfc62bc17 +861, 0x1d76b12843246ffa +862, 0xde7817809b8a31d0 +863, 0xbcca9cd091198f9d +864, 0xf71ca566dddcdfd4 +865, 0xea4386ee8b61d082 +866, 0xe351729d6010bac4 +867, 0xfd685d8a49910dd6 +868, 0xa7a20ea6c686bd3 +869, 0x1cdaf82f4dbd5536 +870, 0xa3da1d1e77dda3e0 +871, 0x4f723b3818ff8b2a +872, 0x1290669eca152469 +873, 0xb54158b52d30651b +874, 0xc06b74f2c7f0fee +875, 0x7d5840bcbf702379 +876, 0x19fa4c1254a82ed +877, 0xcf5ce090ad0b38ea +878, 0xd4edd6ac9437e16d +879, 0xc6ebf25eb623b426 +880, 0xd2b6dbdf00d8fea2 +881, 0x949cf98391cc59e1 +882, 0x380a0c7d0356f7b3 +883, 0x8ffefe32465473bf +884, 0x637b6542d27c861e +885, 0x347d12ffc664ecd9 +886, 0xea66e3a0c75a6b37 +887, 0xc3aff6f34fb537a1 +888, 0x67bdf3579959bf49 +889, 0xa17a348e3a74b723 +890, 0x93c9ef26ddadd569 +891, 0x483909059a5ac0b2 +892, 0x26ec9074b56d5a0d +893, 0x6216000d9a48403a +894, 0x79b43909eab1ec05 +895, 0xe4a8e8d03649e0de +896, 0x1435d666f3ccdc08 +897, 0xb9e22ba902650a0e +898, 0x44dffcccc68b41f8 +899, 0x23e60dcc7a559a17 +900, 0x6fd1735eacd81266 +901, 0xf6bda0745ea20c8e +902, 0x85efcaefe271e07c +903, 0x9be996ee931cef42 +904, 0xe78b41c158611d64 +905, 0xd6201df605839830 +906, 0x702e8e47d2769fd3 +907, 0xb8dcf70e18cf14c +908, 0xac2690bab1bf5c17 +909, 0x92b166b71205d696 +910, 0xb0e73c795fc6df28 +911, 0x4bf2322c8b6b6f0d +912, 0xa842fbe67918cea0 +913, 0xb01a8675d9294e54 +914, 0xfbe3c94f03ca5af2 +915, 0x51a5c089600c441f +916, 0x60f0fd7512d85ded +917, 0xef3113d3bc2cadb0 +918, 0xe1ea128ade300d60 +919, 0xde413b7f8d92d746 +920, 0xfc32c6d43f47c5d8 +921, 0x69d551d8c2b54c68 +922, 0xb9bc68c175777943 +923, 0xb9c79c687f0dae90 +924, 0xd799421ef883c06e +925, 0xbff553ca95a29a3e +926, 0xfc9ffac46bd0aca1 +927, 0x4f6c3a30c80c3e5a +928, 0x8b7245bc6dc4a0a +929, 0xaf4e191a4575ff60 +930, 0x41218c4a76b90f0b +931, 0x986052aa51b8e89b +932, 0x284b464ed5622f9 +933, 0xba6bded912626b40 +934, 0x43cad3ed7443cb5c +935, 0x21641fa95725f328 +936, 0x6d99d6d09d755822 +937, 0x8246dfa2d4838492 +938, 0xd2ee70b9056f4726 +939, 0x87db515a786fbb8b +940, 0x7c63e4c1d7786e7d +941, 0xd1a9d548f10b3e88 +942, 0xa00856475f3b74c9 +943, 0x7f1964ce67148bf4 +944, 0x446650ec71e6018c +945, 0xb1805ca07d1b6345 +946, 0x869c0a1625b7271b +947, 0x79d6da06ce2ecfe2 +948, 0xec7b3cafc5e3c85f +949, 0x1745ce21e39f2c3d +950, 0xd9a0a7af6ee97825 +951, 0x680e0e52a6e11d5c +952, 0xd86b3f344ff7f4cd +953, 0xab56af117c840b9c +954, 0x5c5404c7e333a10e +955, 0x4f1eb462f35d990d +956, 0xf857605a5644458e +957, 0x3bb87cdf09262f86 +958, 0xd57295baf6da64b +959, 0xb5993f48472f2894 +960, 0x7d1a501608c060b2 +961, 0x45fabe2d0e54adf0 +962, 0xbb41c3806afb4efe +963, 0xbfbc506049424c8 +964, 0xb7dd6b67f2203344 +965, 0x389ce52eff883b81 +966, 0xe259c55c0cf6d000 +967, 0x70fb3e3824f7d213 +968, 0x9f36d5599ed55f4b +969, 0xd14cf6f12f83c4f7 +970, 0x570a09d56aaa0b66 +971, 0x8accafd527f4598 +972, 0xa42d64c62175adfd +973, 0xddb9c6a87b6e1558 +974, 0xd80b6c69fa1cde2a +975, 0x44ebaac10082207b +976, 0xf99be8889552fa1a +977, 0x38253cd4b38b5dc5 +978, 0x85356c8b02675791 +979, 0xbf91677b2ecdcf55 +980, 0x2316cb85e93f366e +981, 0x9abf35954db6b053 +982, 0xf49f7425e086b45a +983, 0x8f5b625e074afde2 +984, 0xe0d614559791b080 +985, 0xbf7b866afab2a525 +986, 0xde89d7e1641a6412 +987, 0x1d10687d8ae5b86f +988, 0x1f034caa0e904cbd +989, 0x2086357aec8a7a2c +990, 0x22dc476b80c56e1e +991, 0xbef5a73cc0e3a493 +992, 0xddfa3829b26ed797 +993, 0x8917a87ec3d4dc78 +994, 0xfeabe390628c365e +995, 0x581b0c4f6fb2d642 +996, 0x1ef8c590adbf5b9a +997, 0x4d8e13aac0cce879 +998, 0xfe38f71e5977fad0 +999, 0x1f83a32d4adfd2ed diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/philox-testset-2.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/philox-testset-2.csv new file mode 100644 index 0000000000000000000000000000000000000000..e8807a5856e681296ae1a3dcee904b5f5e93186e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/philox-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x399e5b222b82fa9 +1, 0x41fd08c1f00f3bc5 +2, 0x78b8824162ee4d04 +3, 0x176747919e02739d +4, 0xfaa88f002a8d3596 +5, 0x418eb6f592e6c227 +6, 0xef83020b8344dd45 +7, 0x30a74a1a6eaa064b +8, 0x93d43bf97a490c3 +9, 0xe4ba28b442194cc +10, 0xc829083a168a8656 +11, 0x73f45d50f8e22849 +12, 0xf912db57352824cc +13, 0xf524216927b12ada +14, 0x22b7697473b1dfda +15, 0x311e2a936414b39f +16, 0xb905abfdcc425be6 +17, 0x4b14630d031eac9c +18, 0x1cf0c4ae01222bc8 +19, 0xa6c33efc6e82ef3 +20, 0x43b3576937ba0948 +21, 0x1e483d17cdde108a +22, 0x6722784cac11ac88 +23, 0xee87569a48fc45d7 +24, 0xb821dcbe74d18661 +25, 0xa5d1876ef3da1a81 +26, 0xe4121c2af72a483 +27, 0x2d747e355a52cf43 +28, 0x609059957bd03725 +29, 0xc3327244b49e16c5 +30, 0xb5ae6cb000dde769 +31, 0x774315003209017 +32, 0xa2013397ba8db605 +33, 0x73b228945dbcd957 +34, 0x801af7190375d3c0 +35, 0xae6dca29f24c9c67 +36, 0xd1cc0bcb1ca26249 +37, 0x1defa62a5bd853be +38, 0x67c2f5557fa89462 +39, 0xf1729b58122fab02 +40, 0xb67eb71949ec6c42 +41, 0x5456366ec1f8f7d7 +42, 0x44492b32eb7966f5 +43, 0xa801804159f175f1 +44, 0x5a416f23cac70d84 +45, 0x186f55293302303d +46, 0x7339d5d7b6a43639 +47, 0xfc6df38d6a566121 +48, 0xed2fe018f150b39e +49, 0x508e0b04a781fa1b +50, 0x8bee9d50f32eaf50 +51, 0x9870015d37e63cc +52, 0x93c6b12309c14f2d +53, 0xb571cf798abe93ff +54, 0x85c35a297a88ae6e +55, 0x9b1b79afe497a2ae +56, 0x1ca02e5b95d96b8d +57, 0x5bb695a666c0a94a +58, 0x4e3caf9bbab0b208 +59, 0x44a44be1a89f2dc1 +60, 0x4ff37c33445758d1 +61, 0xd0e02875322f35da +62, 0xfd449a91fb92646b +63, 0xbe0b49096b95db4d +64, 0xffa3647cad13ef5d +65, 0x75c127a61acd10c8 +66, 0xd65f697756f5f98e +67, 0x3ced84be93d94434 +68, 0x4da3095c2fc46d68 +69, 0x67564e2a771ee9ac +70, 0x36944775180644a9 +71, 0xf458db1c177cdb60 +72, 0x5b58406dcd034c8 +73, 0x793301a3fdab2a73 +74, 0x1c2a1a16d6db6128 +75, 0xc2dacd4ddddbe56c +76, 0x2e7d15be2301a111 +77, 0xd4f4a6341b3bcd18 +78, 0x3622996bbe6a9e3b +79, 0xaf29aa9a7d6d47da +80, 0x6d7dbb74a4cd68ae +81, 0xc260a17e0f39f841 +82, 0xdee0170f2af66f0d +83, 0xf84ae780d7b5a06e +84, 0x8326247b73f43c3a +85, 0xd44eef44b4f98b84 +86, 0x3d10aee62ec895e3 +87, 0x4f23fef01bf703b3 +88, 0xf8e50aa57d888df6 +89, 0x7da67411e3bef261 +90, 0x1d00f2769b2f96d7 +91, 0x7ef9a15b7444b84e +92, 0xcfa16436cc2b7e21 +93, 0x29ab8cfac00460ff +94, 0x23613de8608b0e70 +95, 0xb1aa0980625798a8 +96, 0xb9256fd29db7df99 +97, 0xdacf311bf3e7fa18 +98, 0xa013c8f9fada20d8 +99, 0xaf5fd4fe8230fe3e +100, 0xd3d59ca55102bc5c +101, 0x9d08e2aa5242767f +102, 0x40278fe131e83b53 +103, 0x56397d03c7c14c98 +104, 0xe874b77b119359b3 +105, 0x926a1ba4304ab19f +106, 0x1e115d5aa695a91d +107, 0xc6a459df441f2fe3 +108, 0x2ca842bc1b0b3c6a +109, 0x24c804cf8e5eed16 +110, 0x7ca00fc4a4c3ebd3 +111, 0x546af7cecc4a4ba6 +112, 0x8faae1fa18fd6e3 +113, 0x40420b0089641a6a +114, 0x88175a35d9abcb83 +115, 0xf7d746d1b8b1357c +116, 0x7dae771a651be970 +117, 0x2f6485247ee4df84 +118, 0x6883702fab2d8ec5 +119, 0xeb7eea829a67f9a6 +120, 0x60d5880b485562ed +121, 0x7d4ca3d7e41a4e7e +122, 0xbb7fef961ab8de18 +123, 0x3b92452fb810c164 +124, 0x5f4b4755348b338 +125, 0xca45a715a7539806 +126, 0xc33efd9da5399dd +127, 0x593d665a51d4aedd +128, 0x75d6b8636563036b +129, 0x7b57caa55e262082 +130, 0x4ede7427969e0dd5 +131, 0xc3f19b6f78ea00b +132, 0xeea7bab9be2181ea +133, 0x652c45fe9c420c04 +134, 0x14ba9e3d175670ee +135, 0xd2ad156ba6490474 +136, 0x4d65ae41065f614 +137, 0x6ff911c8afa28eb1 +138, 0xedc2b33588f3cb68 +139, 0x437c8bc324666a2f +140, 0x828cee25457a3f0 +141, 0x530c986091f31b9b +142, 0x2f34671e8326ade7 +143, 0x4f686a8f4d77f6da +144, 0xa4c1987083498895 +145, 0xbce5a88b672b0fb1 +146, 0x8476115a9e6a00cc +147, 0x16de18a55dd2c238 +148, 0xdf38cf4c416232bc +149, 0x2cb837924e7559f3 +150, 0xfad4727484e982ed +151, 0x32a55d4b7801e4f +152, 0x8b9ef96804bd10a5 +153, 0xa1fd422c9b5cf2a9 +154, 0xf46ddb122eb7e442 +155, 0x6e3842547afa3b33 +156, 0x863dee1c34afe5c4 +157, 0x6a43a1935b6db171 +158, 0x1060a5c2f8145821 +159, 0xf783ec9ed34c4607 +160, 0x1da4a86bf5f8c0b0 +161, 0x4c7714041ba12af8 +162, 0x580da7010be2f192 +163, 0xad682fe795a7ea7a +164, 0x6687b6cb88a9ed2c +165, 0x3c8d4b175517cd18 +166, 0xe9247c3a524a6b6b +167, 0x337ca9cfaa02658 +168, 0xed95399481c6feec +169, 0x58726a088e606062 +170, 0xfe7588a5b4ee342a +171, 0xee434c7ed146fdee +172, 0xe2ade8b60fdc4ba5 +173, 0xd57e4c155de4eaab +174, 0xdefeae12de1137cb +175, 0xb7a276a241316ac1 +176, 0xeb838b1b1df4ca15 +177, 0x6f78965edea32f6f +178, 0x18bebd264d7a5d53 +179, 0x3641c691d77005ec +180, 0xbe70ed7efea8c24c +181, 0x33047fa8d03ca560 +182, 0x3bed0d2221ff0f87 +183, 0x23083a6ffbcf38a2 +184, 0xc23eb827073d3fa5 +185, 0xc873bb3415e9fb9b +186, 0xa4645179e54147fe +187, 0x2c72fb443f66e207 +188, 0x98084915dd89d8f4 +189, 0x88baa2de12c99037 +190, 0x85c74ab238cb795f +191, 0xe122186469ea3a26 +192, 0x4c3bba99b3249292 +193, 0x85d6845d9a015234 +194, 0x147ddd69c13e6a31 +195, 0x255f4d678c9a570b +196, 0x2d7c0c410bf962b4 +197, 0x58eb7649e0aa16ca +198, 0x9d240bf662fe0783 +199, 0x5f74f6fa32d293cc +200, 0x4928e52f0f79d9b9 +201, 0xe61c2b87146b706d +202, 0xcfcd90d100cf5431 +203, 0xf15ea8138e6aa178 +204, 0x6ab8287024f9a819 +205, 0xed8942593db74e01 +206, 0xefc00e4ec2ae36dd +207, 0xc21429fb9387f334 +208, 0xf9a3389e285a9bce +209, 0xacdee8c43aae49b3 +210, 0xefc382f02ad55c25 +211, 0x1153b50e8d406b72 +212, 0xb00d39ebcc2f89d8 +213, 0xde62f0b9831c8850 +214, 0xc076994662eef6c7 +215, 0x66f08f4752f1e3ef +216, 0x283b90619796249a +217, 0x4e4869bc4227499e +218, 0xb45ad78a49efd7ed +219, 0xffe19aa77abf5f4b +220, 0xfce11a0daf913aef +221, 0x7e4e64450d5cdceb +222, 0xe9621997cfd62762 +223, 0x4d2c9e156868081 +224, 0x4e2d96eb7cc9a08 +225, 0xda74849bba6e3bd3 +226, 0x6f4621da935e7fde +227, 0xb94b914aa0497259 +228, 0xd50d03e8b8db1563 +229, 0x1a45c1ce5dca422e +230, 0xc8d30d33276f843f +231, 0xb57245774e4176b4 +232, 0x8d36342c05abbbb1 +233, 0x3591ad893ecf9e78 +234, 0x62f4717239ee0ac8 +235, 0x9b71148a1a1d4200 +236, 0x65f8e0f56dd94463 +237, 0x453b1fcfd4fac8c2 +238, 0x4c25e48e54a55865 +239, 0xa866baa05112ace2 +240, 0x7741d3c69c6e79c5 +241, 0x7deb375e8f4f7a8a +242, 0xc242087ede42abd8 +243, 0x2fa9d1d488750c4b +244, 0xe8940137a935d3d3 +245, 0x1dab4918ca24b2f2 +246, 0xe2368c782168fe3e +247, 0x6e8b2d1d73695909 +248, 0x70455ebea268b33e +249, 0x656a919202e28da1 +250, 0x5a5a8935647da999 +251, 0x428c6f77e118c13c +252, 0xa87aee2b675bb083 +253, 0x3873a6412b239969 +254, 0x5f72c1e91cb8a2ee +255, 0xa25af80a1beb5679 +256, 0x1af65d27c7b4abc3 +257, 0x133437060670e067 +258, 0xb1990fa39a97d32e +259, 0x724adc89ae10ed17 +260, 0x3f682a3f2363a240 +261, 0x29198f8dbd343499 +262, 0xdfaeeaa42bc51105 +263, 0x5baff3901b9480c2 +264, 0x3f760a67043e77f5 +265, 0x610fa7aa355a43ba +266, 0x394856ac09c4f7a7 +267, 0x1d9229d058aee82e +268, 0x19c674804c41aeec +269, 0x74cf12372012f4aa +270, 0xa5d89b353fa2f6ca +271, 0x697e4f672ac363dd +272, 0xde6f55ba73df5af9 +273, 0x679cf537510bd68f +274, 0x3dc916114ae9ef7e +275, 0xd7e31a66ec2ee7ba +276, 0xc21bebb968728495 +277, 0xc5e0781414e2adfd +278, 0x71147b5412ddd4bd +279, 0x3b864b410625cca9 +280, 0x433d67c0036cdc6 +281, 0x48083afa0ae20b1b +282, 0x2d80beecd64ac4e8 +283, 0x2a753c27c3a3ee3e +284, 0xb2c5e6afd1fe051a +285, 0xea677930cd66c46b +286, 0x4c3960932f92810a +287, 0xf1b367a9e527eaba +288, 0xb7d92a8a9a69a98e +289, 0x9f9ad3210bd6b453 +290, 0x817f2889db2dcbd8 +291, 0x4270a665ac15813c +292, 0x90b85353bd2be4dd +293, 0x10c0460f7b2d68d +294, 0x11cef32b94f947f5 +295, 0x3cf29ed8e7d477e8 +296, 0x793aaa9bd50599ef +297, 0xbac15d1190014aad +298, 0x987944ae80b5cb13 +299, 0x460aa51f8d57c484 +300, 0xc77df0385f97c2d3 +301, 0x92e743b7293a3822 +302, 0xbc3458bcfbcbb8c0 +303, 0xe277bcf3d04b4ed7 +304, 0xa537ae5cf1c9a31c +305, 0x95eb00d30bd8cfb2 +306, 0x6376361c24e4f2dd +307, 0x374477fe87b9ea8e +308, 0x8210f1a9a039902e +309, 0xe7628f7031321f68 +310, 0x8b8e9c0888fc1d3d +311, 0x306be461fdc9e0ed +312, 0x510009372f9b56f5 +313, 0xa6e6fa486b7a027a +314, 0x9d3f002025203b5a +315, 0x7a46e0e81ecbef86 +316, 0x41e280c611d04df0 +317, 0xedcec10418a99e8a +318, 0x5c27b6327e0b9dbd +319, 0xa81ed2035b509f07 +320, 0x3581e855983a4cc4 +321, 0x4744594b25e9809d +322, 0xc737ac7c27fbd0ed +323, 0x1b523a307045433a +324, 0x8b4ce9171076f1d9 +325, 0x2db02d817cd5eec0 +326, 0x24a1f1229af50288 +327, 0x5550c0dcf583ff16 +328, 0x3587baaa122ec422 +329, 0xf9d3dc894229e510 +330, 0xf3100430d5cf8e87 +331, 0xc31af79862f8e2fb +332, 0xd20582063b9f3537 +333, 0xac5e90ac95fcc7ad +334, 0x107c4c704d5109d4 +335, 0xebc8628906dbfd70 +336, 0x215242776da8c531 +337, 0xa98002f1dcf08b51 +338, 0xbc3bdc07f3b09718 +339, 0x238677062495b512 +340, 0x53b4796f2a3c49e8 +341, 0x6424286467e22f0e +342, 0x14d0952a11a71bac +343, 0x2f97098149b82514 +344, 0x3777f2fdc425ad2 +345, 0xa32f2382938876d4 +346, 0xda8a39a021f20ae3 +347, 0x364361ef0a6ac32c +348, 0x4413eede008ff05a +349, 0x8dda8ace851aa327 +350, 0x4303cabbdcecd1ee +351, 0x2e69f06d74aa549f +352, 0x4797079cd4d9275c +353, 0xc7b1890917e98307 +354, 0x34031b0e822a4b4c +355, 0xfc79f76b566303ea +356, 0x77014adbe255a930 +357, 0xab6c43dd162f3be5 +358, 0xa430041f3463f6b9 +359, 0x5c191a32ada3f84a +360, 0xe8674a0781645a31 +361, 0x3a11cb667b8d0916 +362, 0xaedc73e80c39fd8a +363, 0xfde12c1b42328765 +364, 0x97abb7dcccdc1a0b +365, 0x52475c14d2167bc8 +366, 0x540e8811196d5aff +367, 0xa867e4ccdb2b4b77 +368, 0x2be04af61e5bcfb9 +369, 0x81b645102bfc5dfd +370, 0x96a52c9a66c6450f +371, 0x632ec2d136889234 +372, 0x4ed530c0b36a6c25 +373, 0x6f4851225546b75 +374, 0x2c065d6ba46a1144 +375, 0xf8a3613ff416551d +376, 0xb5f0fd60e9c971a9 +377, 0x339011a03bb4be65 +378, 0x9439f72b6995ded6 +379, 0xc1b03f3ef3b2292d +380, 0xad12fd221daab3ae +381, 0xf615b770f2cf996f +382, 0x269d0fdcb764172 +383, 0x67837025e8039256 +384, 0x6402831fc823fafa +385, 0x22854146a4abb964 +386, 0x7b5ad9b5a1bad7a8 +387, 0x67170e7beb6ac935 +388, 0xfc2d1e8e24adfaaa +389, 0x7ded4395345ff40d +390, 0x418981760a80dd07 +391, 0xc03bef38022c1d2 +392, 0x3a11850b26eade29 +393, 0xaa56d02c7175c5f4 +394, 0xd83b7917b9bfbff5 +395, 0x3c1df2f8fa6fced3 +396, 0xf3d6e2999c0bb760 +397, 0xc66d683a59a950e3 +398, 0x8e3972a9d73ffabf +399, 0x97720a0443edffd9 +400, 0xa85f5d2fe198444a +401, 0xfc5f0458e1b0de5e +402, 0xe3973f03df632b87 +403, 0xe151073c84c594b3 +404, 0x68eb4e22e7ff8ecf +405, 0x274f36eaed7cae27 +406, 0x3b87b1eb60896b13 +407, 0xbe0b2f831442d70a +408, 0x2782ed7a48a1b328 +409, 0xb3619d890310f704 +410, 0xb03926b11b55921a +411, 0xdb46fc44aa6a0ce4 +412, 0x4b063e2ef2e9453a +413, 0xe1584f1aeec60fb5 +414, 0x7092bd6a879c5a49 +415, 0xb84e1e7c7d52b0e6 +416, 0x29d09ca48db64dfb +417, 0x8f6c4a402066e905 +418, 0x77390795eabc36b +419, 0xcc2dc2e4141cc69f +420, 0x2727f83beb9e3c7c +421, 0x1b29868619331de0 +422, 0xd38c571e192c246f +423, 0x535327479fe37b6f +424, 0xaff9ce5758617eb3 +425, 0x5658539e9288a4e4 +426, 0x8df91d87126c4c6d +427, 0xe931cf8fdba6e255 +428, 0x815dfdf25fbee9e8 +429, 0x5c61f4c7cba91697 +430, 0xdd5f5512fe2313a1 +431, 0x499dd918a92a53cd +432, 0xa7e969d007c97dfd +433, 0xb8d39c6fc81ac0bb +434, 0x1d646983def5746c +435, 0x44d4b3b17432a60c +436, 0x65664232a14db1e3 +437, 0xda8fae6433e7500b +438, 0xbe51b94ff2a3fe94 +439, 0xe9b1bd9a9098ef9f +440, 0xfe47d54176297ef5 +441, 0xb8ab99bc03bb7135 +442, 0xcfad97f608565b38 +443, 0xf05da71f6760d9c1 +444, 0xef8da40a7c70e7b +445, 0xe0465d58dbd5d138 +446, 0xb54a2d70eb1a938 +447, 0xfdd50c905958f2d8 +448, 0x3c41933c90a57d43 +449, 0x678f6d894c6ad0bb +450, 0x403e8f4582274e8 +451, 0x5cbbe975668df6b0 +452, 0x297e6520a7902f03 +453, 0x8f6dded33cd1efd7 +454, 0x8e903c97be8d783b +455, 0x10bd015577e30f77 +456, 0x3fcd69d1c36eab0c +457, 0xb45989f3ca198d3 +458, 0x507655ce02b491a9 +459, 0xa92cf99bb78602ce +460, 0xebfb82055fbc2f0f +461, 0x3334256279289b7a +462, 0xc19d2a0f740ee0ac +463, 0x8bb070dea3934905 +464, 0xa4ab57d3a8d1b3eb +465, 0xfee1b09bcacf7ff4 +466, 0xccc7fb41ceec41fa +467, 0xd4da49094eb5a74d +468, 0xed5c693770af02ed +469, 0x369dabc9bbfaa8e4 +470, 0x7eab9f360d054199 +471, 0xe36dbebf5ee94076 +472, 0xd30840e499b23d7 +473, 0x8678e6cb545015ff +474, 0x3a47932ca0b336e +475, 0xeb7c742b6e93d6fe +476, 0x1404ea51fe5a62a9 +477, 0xa72cd49db978e288 +478, 0xfd7bada020173dcf +479, 0xc9e74fc7abe50054 +480, 0x93197847bb66808d +481, 0x25fd5f053dce5698 +482, 0xe198a9b18cc21f4 +483, 0x5cc27b1689452d5d +484, 0x8b3657af955a98dc +485, 0xc17f7584f54aa1c0 +486, 0xe821b088246b1427 +487, 0x32b5a9f6b45b6fa0 +488, 0x2aef7c315c2bae0c +489, 0xe1af8129846b705a +490, 0x4123b4c091b34614 +491, 0x6999d61ec341c073 +492, 0x14b9a8fcf86831ea +493, 0xfd4cff6548f46c9f +494, 0x350c3b7e6cc8d7d6 +495, 0x202a5047fecafcd5 +496, 0xa82509fe496bb57d +497, 0x835e4b2608b575fe +498, 0xf3abe3da919f54ec +499, 0x8705a21e2c9b8796 +500, 0xfd02d1427005c314 +501, 0xa38458faa637f49b +502, 0x61622f2360e7622a +503, 0xe89335a773c2963b +504, 0x481264b659b0e0d0 +505, 0x1e82ae94ebf62f15 +506, 0x8ea7812de49209d4 +507, 0xff963d764680584 +508, 0x418a68bef717f4af +509, 0x581f0e7621a8ab91 +510, 0x840337e9a0ec4150 +511, 0x951ef61b344be505 +512, 0xc8b1b899feb61ec2 +513, 0x8b78ca13c56f6ed9 +514, 0x3d2fd793715a946f +515, 0xf1c04fabcd0f4084 +516, 0x92b602614a9a9fcc +517, 0x7991bd7a94a65be7 +518, 0x5dead10b06cad2d7 +519, 0xda7719b33f722f06 +520, 0x9d87a722b7bff71e +521, 0xb038e479071409e9 +522, 0xf4e8bbec48054775 +523, 0x4fec2cd7a28a88ea +524, 0x839e28526aad3e56 +525, 0xd37ec57852a98bf0 +526, 0xdef2cbbe00f3a02d +527, 0x1aecfe01a9e4d801 +528, 0x59018d3c8beaf067 +529, 0x892753e6ac8bf3cd +530, 0xefdd3437023d2d1c +531, 0x447bfbd148c8cb88 +532, 0x282380221bd442b8 +533, 0xfce8658d1347384a +534, 0x60b211a7ec6bfa8 +535, 0xd21729cfcc692974 +536, 0x162087ecd5038a47 +537, 0x2b17000c4bce39d2 +538, 0x3a1f75ff6adcdce0 +539, 0x721a411d312f1a2c +540, 0x9c13b6133f66934d +541, 0xaa975d14978980e5 +542, 0x9403dbd4754203fa +543, 0x588c15762fdd643 +544, 0xdd1290f8d0ada73a +545, 0xd9b77380936103f4 +546, 0xb2e2047a356eb829 +547, 0x7019e5e7f76f7a47 +548, 0x3c29a461f62b001d +549, 0xa07dc6cfab59c116 +550, 0x9b97e278433f8eb +551, 0x6affc714e7236588 +552, 0x36170aeb32911a73 +553, 0x4a665104d364a789 +554, 0x4be01464ec276c9c +555, 0x71bb10271a8b4ecf +556, 0xbf62e1d068bc018 +557, 0xc9ada5db2cbbb413 +558, 0x2bded75e726650e5 +559, 0x33d5a7af2f34385d +560, 0x8179c46661d85657 +561, 0x324ebcfd29267359 +562, 0xac4c9311dc9f9110 +563, 0xc14bb6a52f9f9c0 +564, 0xc430abe15e7fb9db +565, 0xf1cce5c14df91c38 +566, 0x651e3efa2c0750d3 +567, 0x38a33604a8be5c75 +568, 0x7aaf77fe7ff56a49 +569, 0xc0d1cc56bbf27706 +570, 0x887aa47324e156c6 +571, 0x12547c004b085e8d +572, 0xd86a8d6fbbbfd011 +573, 0x57c860188c92d7b4 +574, 0xcd5d3843d361b8ca +575, 0x8f586ef05a9cb3ef +576, 0x174456e1ba6267d5 +577, 0xf5dc302c62fe583c +578, 0xa349442fabcdb71 +579, 0xe5123c1a8b6fd08e +580, 0x80681552aa318593 +581, 0xb295396deaef1e31 +582, 0xabb626e0b900e32b +583, 0xf024db8d3f19c15e +584, 0x1d04bb9548e2fb6c +585, 0xd8ed2b2214936c2b +586, 0x618ca1e430a52bc9 +587, 0xccbca44a6088136b +588, 0xd0481855c8b9ccbe +589, 0x3c92a2fade28bdf7 +590, 0x855e9fefc38c0816 +591, 0x1269bbfe55a7b27c +592, 0x1d6c853d83726d43 +593, 0xc8655511cc7fcafc +594, 0x301503eb125a9b0e +595, 0xb3108e4532016b11 +596, 0xbb7ab6245da9cb3d +597, 0x18004c49116d85eb +598, 0x3480849c20f61129 +599, 0xe28f45157463937b +600, 0x8e85e61060f2ce1 +601, 0x1673da4ec589ba5e +602, 0x74b9a6bd1b194712 +603, 0xed39e147fa8b7601 +604, 0x28ce54019102ca77 +605, 0x42e0347f6d7a2f30 +606, 0xb6a908d1c4814731 +607, 0x16c3435e4e9a126d +608, 0x8880190514c1ad54 +609, 0xfffd86229a6f773c +610, 0x4f2420cdb0aa1a93 +611, 0xf8e1acb4120fc1fa +612, 0x63a8c553ab36a2f2 +613, 0x86b88cf3c0a6a190 +614, 0x44d8b2801622c792 +615, 0xf6eae14e93082ff1 +616, 0xd9ed4f5d1b8fac61 +617, 0x1808ce17f4e1f70 +618, 0x446e83ea336f262f +619, 0xc7c802b04c0917b7 +620, 0x626f45fd64968b73 +621, 0x9ffa540edc9b2c5c +622, 0xa96a1e219e486af8 +623, 0x2bb8963884e887a1 +624, 0xba7f68a5d029e3c4 +625, 0xefc45f44392d9ca0 +626, 0x98d77762503c5eab +627, 0xd89bcf62f2da627c +628, 0xa3cab8347f833151 +629, 0xa095b7595907d5c7 +630, 0x3b3041274286181 +631, 0xb518db8919eb71fa +632, 0x187036c14fdc9a36 +633, 0xd06e28301e696f5d +634, 0xdbc71184e0c56492 +635, 0xfe51e9cae6125bfd +636, 0x3b12d17cd014df24 +637, 0x3b95e4e2c986ac1a +638, 0x29c1cce59fb2dea2 +639, 0x58c05793182a49d6 +640, 0xc016477e330d8c00 +641, 0x79ef335133ada5d +642, 0x168e2cad941203f3 +643, 0xf99d0f219d702ef0 +644, 0x655628068f8f135b +645, 0xdcdea51910ae3f92 +646, 0x8e4505039c567892 +647, 0x91a9ec7e947c89ae +648, 0x8717172530f93949 +649, 0x1c80aba9a440171a +650, 0x9c8f83f6ebe7441e +651, 0x6c05e1efea4aa7f9 +652, 0x10af696b777c01b +653, 0x5892e9d9a92fc309 +654, 0xd2ba7da71e709432 +655, 0x46378c7c3269a466 +656, 0x942c63dfe18e772c +657, 0x6245cf02ef2476f +658, 0x6f265b2759ea2aea +659, 0x5aa757f17d17f4a6 +660, 0x1ad6a3c44fa09be6 +661, 0xe861af14e7015fb8 +662, 0x86be2e7db388c77 +663, 0x5c7bba32b519e9a0 +664, 0x3feb314850c4437b +665, 0x97955add60cfb45b +666, 0xfdb536230a540bdc +667, 0xdac9d7bf6e58512e +668, 0x4894c00e474e8120 +669, 0xa1918a37739da366 +670, 0xa8097f2096532807 +671, 0x592afe50e6c5e643 +672, 0xd69050ee6dcb33dc +673, 0xa6956b262dd3c561 +674, 0x1a55c815555e63f7 +675, 0x2ec7fd37516de2bb +676, 0x8ec251d9c70e76ba +677, 0x9b76e4abafd2689 +678, 0x9ce3f5c751a57df1 +679, 0x915c4818bf287bc7 +680, 0x2293a0d1fe07c735 +681, 0x7627dcd5d5a66d3d +682, 0xb5e4f92cc49c7138 +683, 0x6fc51298731d268c +684, 0xd19800aa95441f87 +685, 0x14f70f31162fa115 +686, 0x41a3da3752936f59 +687, 0xbec0652be95652ee +688, 0x7aa4bdb1020a290f +689, 0x4382d0d9bee899ef +690, 0xe6d988ae4277d6ff +691, 0xe618088ccb2a32d1 +692, 0x411669dfaa899e90 +693, 0x234e2bf4ba76d9f +694, 0xe109fe4cb7828687 +695, 0x1fb96b5022b0b360 +696, 0x6b24ad76c061a716 +697, 0x7e1781d4d7ecee15 +698, 0xf20c2dbe82ba38ba +699, 0xeda8e8ae1d943655 +700, 0xa58d196e2a77eaec +701, 0x44564765a5995a0b +702, 0x11902fe871ecae21 +703, 0x2ea60279900e675d +704, 0x38427227c18a9a96 +705, 0xe0af01490a1b1b48 +706, 0x826f91997e057824 +707, 0x1e57308e6e50451 +708, 0xb42d469bbbfdc350 +709, 0xb9734cff1109c49b +710, 0x98967559bb9d364f +711, 0xd6be360041907c12 +712, 0xa86a1279122a1e21 +713, 0x26f99a8527bfc698 +714, 0xfa8b85758f28f5d6 +715, 0xe3057429940806ae +716, 0x4bee2d7e84f93b2b +717, 0x948350a76ea506f4 +718, 0xa139154488045e74 +719, 0x8893579ba5e78085 +720, 0x5f21c215c6a9e397 +721, 0x456134f3a59641dc +722, 0x92c0273f8e97a9c6 +723, 0xd2936c9c3f0c6936 +724, 0xcfa4221e752c4735 +725, 0x28cd5a7457355dca +726, 0xecdfdde23d90999f +727, 0x60631b2d494d032b +728, 0xf67289df269a827f +729, 0xcbe8011ef0f5b7ef +730, 0x20eea973c70a84f5 +731, 0xbe1fd200398557ce +732, 0xd2279ee030191bba +733, 0xf2bd4291dedaf819 +734, 0xfc6d167dbe8c402 +735, 0x39ac298da5d0044b +736, 0xceac026f5f561ce +737, 0x10a5b0bdd8ad60e6 +738, 0xdeb3c626df6d4bcb +739, 0x3c128962e77ff6ca +740, 0xc786262e9c67a0e5 +741, 0x4332855b3febcdc0 +742, 0x7bda9724d1c0e020 +743, 0x6a8c93399bc4df22 +744, 0xa9b20100ac707396 +745, 0xa11a3458502c4eb5 +746, 0xb185461c60478941 +747, 0x13131d56195b7ff6 +748, 0x8d55875ddbd4aa1c +749, 0xc09b67425f469aa5 +750, 0x39e33786cc7594c4 +751, 0x75e96db8e4b08b93 +752, 0xda01cd12a3275d1e +753, 0x2c49e7822344fab5 +754, 0x9bd5f10612514ca7 +755, 0x1c801a5c828e7332 +756, 0x29797d3f4f6c7b4c +757, 0xac992715e21e4e53 +758, 0xe40e89ee887ddb37 +759, 0x15189a2b265a783b +760, 0xa854159a52af5c5 +761, 0xb9d8a5a81c12bead +762, 0x3240cdc9d59e2a58 +763, 0x1d0b872234cf8e23 +764, 0xc01224cf6ce12cff +765, 0x2601e9f3905c8663 +766, 0xd4ecf9890168d6b4 +767, 0xa45db796d89bfdd5 +768, 0x9f389406dad64ab4 +769, 0xa5a851adce43ffe3 +770, 0xd0962c41c26e5aa9 +771, 0x8a671679e48510a4 +772, 0xc196dc0924a6bfeb +773, 0x3ead661043b549cb +774, 0x51af4ca737d405ac +775, 0xf4425b5c62275fb6 +776, 0x71e69d1f818c10f5 +777, 0xacaf4af2d3c70162 +778, 0x2e1f1d4fd7524244 +779, 0xe54fdd8f388890e8 +780, 0xfda0d33e84eb2b83 +781, 0x53965c5e392b81da +782, 0x5c92288267263097 +783, 0xcac1b431c878c66c +784, 0x36c0e1cf417241c6 +785, 0x5cc4d9cd1a36bf2c +786, 0x32e4257bb5d3e470 +787, 0x4aecff904adb44fb +788, 0x4d91a8e0d1d60cac +789, 0xa3b478388385b038 +790, 0x48d955f24eba70be +791, 0x310e4deb07f24f68 +792, 0x8853e73b1f30a5a +793, 0x278aee45c2a65c5 +794, 0xf6932eedbd62fb0b +795, 0xafb95958c82fafad +796, 0x78e807c18616c16c +797, 0xd7abadda7488ed9f +798, 0x2dd72e2572aa2ae6 +799, 0x6ec3791982c2be09 +800, 0x6865bb314fac478f +801, 0xa14dc0ce09000d1a +802, 0xb8081ad134da10f2 +803, 0xc4ac1534aa825ef5 +804, 0xd83aeb48ae2d538f +805, 0x38052027e3074be4 +806, 0xa9833e06ef136582 +807, 0x4f02d790ec9fd78 +808, 0xec2f60bc711c5bdc +809, 0x9253b0d12268e561 +810, 0xa8ac607fdd62c206 +811, 0x895e28ebc920289f +812, 0xe2fd42b154243ac7 +813, 0xc69cac2f776eee19 +814, 0xf4d4ac11db56d0dc +815, 0xa8d37049b9f39833 +816, 0x75abbf8a196c337c +817, 0xb115bb76750d27b8 +818, 0x39426d187839154 +819, 0xd488423e7f38bf83 +820, 0xbb92e0c76ecb6a62 +821, 0x3055a018ce39f4e3 +822, 0xc93fe0e907729bfb +823, 0x65985d17c5863340 +824, 0x2088ae081b2028e1 +825, 0x6e628de873314057 +826, 0x864377cccf573f0e +827, 0xae03f4c9aa63d132 +828, 0xb1db766d6404c66d +829, 0xdce5a22414a374b +830, 0x622155b777819997 +831, 0x69fe96e620371f3c +832, 0xa9c67dbc326d94fc +833, 0x932a84ae5dd43bab +834, 0xe2301a20f6c48c3f +835, 0x795d2e79c6477300 +836, 0xd8e3e631289521e7 +837, 0xae2684979002dfd6 +838, 0xc9c2392377550f89 +839, 0xa1b0c99d508ef7ec +840, 0x593aef3c5a5272ec +841, 0xe32e511a4b7162cd +842, 0xab3b81655f5a2857 +843, 0x1b535e1a0aaf053e +844, 0x5b33f56c1b6a07e2 +845, 0x782dc8cfcac4ef36 +846, 0xb3d4f256eecfd202 +847, 0xf73a6598f58c4f7e +848, 0xd5722189524870ae +849, 0x707878de6b995fc0 +850, 0xc3eb6ba73e3d7e8a +851, 0xca75c017655b75a7 +852, 0x1b29369ea3541e5f +853, 0x352e98858bdb58a3 +854, 0x1e4412d184b6b27d +855, 0x2d375ba0304b2d17 +856, 0x56c30fce69a5d08e +857, 0x6b8c2b0c06584bda +858, 0xde4dfff228c8c91f +859, 0xb7c9edd574e6287f +860, 0xf6078281c9fca2b2 +861, 0xb9b9a51de02a2f1e +862, 0xa411bef31c0103b0 +863, 0xc5facd8fc5e1d7a3 +864, 0x54e631c05ddf7359 +865, 0x815b42b3fd06c474 +866, 0xc9ac07566fda18ec +867, 0xd84ea62957bd8e15 +868, 0x5575f74b5cfd8803 +869, 0x5779a8d460c2e304 +870, 0xfd6e87e264a85587 +871, 0xa1d674daa320b26d +872, 0x2c3c3ec64b35afc4 +873, 0x393a274ff03e6935 +874, 0x1f40ecbac52c50ea +875, 0xc3de64fa324ffc0c +876, 0x56ae828b7f9deb04 +877, 0xe7c1a77b5c1f2cb3 +878, 0xa4c4aab19ea921cc +879, 0xec164c238825822c +880, 0xa6a3304770c03b03 +881, 0x3a63641d5b1e8123 +882, 0x42677be3a54617ef +883, 0xa2680423e3a200c0 +884, 0x8b17cf75f3f37277 +885, 0xe7ce65a49242be3d +886, 0x7f85934271323e4b +887, 0xcfb0f431f79a4fab +888, 0x392e4041a8505b65 +889, 0xd3e5daf0d8b25ea6 +890, 0x9447eff675d80f53 +891, 0xea27a9d53cfaeea8 +892, 0xe3f2335945a83ba +893, 0x8875a43ce216413b +894, 0xe49941f9eabce33e +895, 0x9357c1296683a5b1 +896, 0xf0f16439e81ee701 +897, 0x3181515295ffd79a +898, 0x9d7150fffd169ed8 +899, 0x2d6a1d281e255a72 +900, 0x81bf1286fb3a92b6 +901, 0x566d3079b499e279 +902, 0xc7939ca8f047341 +903, 0xb1f8050e7c2d59f6 +904, 0x605701045e7be192 +905, 0x51b73360e8e31a1c +906, 0x9f4ad54483ba9fe0 +907, 0xd3085b8fcf69d1c8 +908, 0xc3e7475026dc5f0b +909, 0x5800f8554b157354 +910, 0x37dfdf858cfcd963 +911, 0x3a1fce05ce385072 +912, 0xf495c062645c20c3 +913, 0xdcbeec2c3492c773 +914, 0xc38f427589d1d0b4 +915, 0x681ead60216a8184 +916, 0x4bd569c40cc88c41 +917, 0x49b0d442e130b7a2 +918, 0xee349156b7d1fa3f +919, 0x2bde2d2db055135b +920, 0xc6a460d2fbcb2378 +921, 0xd0f170494ff3dbb +922, 0xb294422492528a23 +923, 0xfc95873c854e7b86 +924, 0x6c9c3ad1797bb19c +925, 0xe0c06f2aab65062d +926, 0x58e32ce0f11e3a81 +927, 0xa745fcd729ff5036 +928, 0x599b249b2fc2cdb2 +929, 0x78f23b5b0dd5b082 +930, 0x6de3e957f549ecfc +931, 0x9d0712fa6d878756 +932, 0x9076e8554e4a413a +933, 0xf3185818c0294de8 +934, 0x5de7cdf4b455b9b6 +935, 0xb15f6908ed703f7d +936, 0x98c654dfedc6818 +937, 0x120502ab0e93ae42 +938, 0x67966a98a58dc120 +939, 0x1caa0fc628989482 +940, 0xd8b2c3cd480a8625 +941, 0x85c70071b3aed671 +942, 0xff385f8473714662 +943, 0xe2868e4bf3773b63 +944, 0x96cf8019b279298e +945, 0x8511cc930bd74800 +946, 0x5312e48fdd55f5ab +947, 0xfcdae564b52df78d +948, 0x9eee48373e652176 +949, 0x953788f6bcbc56b0 +950, 0xd1a3855dbd2f6b37 +951, 0x3ad32acf77f4d1e9 +952, 0x917c7be81b003e30 +953, 0x9ce817da1e2e9dfb +954, 0x2968983db162d44d +955, 0x1e005decef5828ad +956, 0xc38fe59d1aa4f3d5 +957, 0xf357f1710dc02f1d +958, 0x2613912a4c83ec67 +959, 0x832a11470b9a17cb +960, 0x5e85508a611f0dad +961, 0x2781131677f59d56 +962, 0xa82358d7d4b0237f +963, 0xfbf8b3cc030c3af6 +964, 0x68b2f68ac8a55adb +965, 0x3b6fcf353add0ada +966, 0xd1956049bcd15bd5 +967, 0x95b76f31c7f98b6d +968, 0x814b6690df971a84 +969, 0xdcf7959cddd819e4 +970, 0xcf8c72c5d804fc88 +971, 0x56883769c8945a22 +972, 0x1f034652f658cf46 +973, 0x41df1324cda235a1 +974, 0xeccd32524504a054 +975, 0x974e0910a04ec02c +976, 0x72104507b821f6db +977, 0x791f8d089f273044 +978, 0xe0f79a4f567f73c3 +979, 0x52fe5bea3997f024 +980, 0x5f8b9b446494f78 +981, 0xfd9f511947059190 +982, 0x3aea9dac6063bce3 +983, 0xbfdae4dfc24aee60 +984, 0xa82cdbbf0a280318 +985, 0xf460aae18d70aa9d +986, 0x997367cb204a57c4 +987, 0x616e21ab95ba05ef +988, 0x9bfc93bec116769f +989, 0x2b2ee27c37a3fa5b +990, 0xb25c6ed54006ee38 +991, 0xab04d4a5c69e69a5 +992, 0x6d2f6b45f2d8438f +993, 0x4ad2f32afc82f092 +994, 0x513d718908f709c0 +995, 0x5272aadc4fffca51 +996, 0xeb3f87e66156ef5d +997, 0xf8a3d5a46a86ba85 +998, 0xdb4548a86f27abfd +999, 0x57c05f47ff62380d diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/sfc64-testset-1.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/sfc64-testset-1.csv new file mode 100644 index 0000000000000000000000000000000000000000..c7768b244c59154786aa0f29c974c59a0440a9f5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/sfc64-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xa475f55fbb6bc638 +1, 0xb2d594b6c29d971c +2, 0x275bc4ece4484fb1 +3, 0x569be72d9b3492fb +4, 0x89a5bb9b206a670c +5, 0xd951bfa06afdc3f9 +6, 0x7ee2e1029d52a265 +7, 0x12ef1d4de0cb4d4c +8, 0x41658ba8f0ef0280 +9, 0x5b650c82e4fe09c5 +10, 0x638a9f3e30ec4e94 +11, 0x147487fb2ba9233e +12, 0x89ef035603d2d1fb +13, 0xe66ca57a190e6cbe +14, 0x330f673740dd61fc +15, 0xc71d3dce2f8bb34e +16, 0x3c07c39ff150b185 +17, 0x5df952b6cae8f099 +18, 0x9f09f2b1f0ceac80 +19, 0x19598eee2d0c4c67 +20, 0x64e06483702e0ebd +21, 0xda04d1fdb545f7fa +22, 0xf2cf53b61a0c4f9b +23, 0xf0bb724ce196f66e +24, 0x71cefde55d9cf0f +25, 0x6323f62824a20048 +26, 0x1e93604680f14b4e +27, 0xd9d8fad1d4654025 +28, 0xf4ee25af2e76ca08 +29, 0x6af3325896befa98 +30, 0xad9e43abf5e04053 +31, 0xbf930e318ce09de3 +32, 0x61f9583b4f9ffe76 +33, 0x9b69d0b3d5ec8958 +34, 0xa608f250f9b2ca41 +35, 0x6fdba7073dc2bb5d +36, 0xa9d57601efea6d26 +37, 0xc24a88a994954105 +38, 0xc728b1f78d88fe5b +39, 0x88da88c2b083b3b2 +40, 0xa9e27f7303c76cfd +41, 0xc4c24608c29176eb +42, 0x5420b58466b972fd +43, 0xd2018a661b6756c8 +44, 0x7caed83d9573fc7 +45, 0x562a3d81b849a06a +46, 0x16588af120c21f2c +47, 0x658109a7e0eb4837 +48, 0x877aabb14d3822e1 +49, 0x95704c342c3745fe +50, 0xeeb8a0dc81603616 +51, 0x431bf94889290419 +52, 0xe4a9410ab92a5863 +53, 0xbc6be64ea60f12ba +54, 0x328a2da920015063 +55, 0x40f6b3bf8271ae07 +56, 0x4068ff00a0e854f8 +57, 0x1b287572ca13fa78 +58, 0xa11624a600490b99 +59, 0x4a04ef29eb7150fa +60, 0xcc9469ab5ffb739 +61, 0x99a6a9f8d95e782 +62, 0x8e90356573e7a070 +63, 0xa740b8fb415c81c4 +64, 0x47eccef67447f3da +65, 0x2c720afe3a62a49b +66, 0xe2a747f0a43eacf4 +67, 0xba063a87ab165576 +68, 0xbc1c78ed27feb5a3 +69, 0x285a19fa3974f9d +70, 0x489c61e704f5f0e3 +71, 0xf5ab04f6b03f238b +72, 0x7e25f88138a110dd +73, 0xc3d1cef3d7c1f1d1 +74, 0xc3de6ec64d0d8e00 +75, 0x73682a15b6cc5088 +76, 0x6fecbeb319163dc5 +77, 0x7e100d5defe570a1 +78, 0xad2af9af076dce57 +79, 0x3c65100e23cd3a9a +80, 0x4b442cc6cfe521bb +81, 0xe89dc50f8ab1ef75 +82, 0x8b3c6fdc2496566 +83, 0xdfc50042bc2c308c +84, 0xe39c5f158b33d2b2 +85, 0x92f6adefdfeb0ac +86, 0xdf5808a949c85b3e +87, 0x437384021c9dace9 +88, 0xa7b5ed0d3d67d8f +89, 0xe1408f8b21da3c34 +90, 0xa1bba125c1e80522 +91, 0x7611dc4710385264 +92, 0xb00a46ea84082917 +93, 0x51bf8002ffa87cef +94, 0x9bb81013e9810adc +95, 0xd28f6600013541cd +96, 0xc2ca3b1fa7791c1f +97, 0x47f9ad58f099c82c +98, 0x4d1bb9458469caf9 +99, 0xca0b165b2844257 +100, 0xc3b2e667d075dc66 +101, 0xde22f71136a3dbb1 +102, 0x23b4e3b6f219e4c3 +103, 0x327e0db4c9782f66 +104, 0x9365506a6c7a1807 +105, 0x3e868382dedd3be7 +106, 0xff04fa6534bcaa99 +107, 0x96621a8862995305 +108, 0x81bf39cb5f8e1df7 +109, 0x79b684bb8c37af7a +110, 0xae3bc073c3cde33c +111, 0x7805674112c899ac +112, 0xd95a27995abb20f2 +113, 0x71a503c57b105c40 +114, 0x5ff00d6a73ec8acc +115, 0x12f96391d91e47c2 +116, 0xd55ca097b3bd4947 +117, 0x794d79d20468b04 +118, 0x35d814efb0d7a07d +119, 0xfa9ac9bd0aae76d3 +120, 0xa77b8a3711e175cd +121, 0xe6694fbf421f9489 +122, 0xd8f1756525a1a0aa +123, 0xe38dfa8426277433 +124, 0x16b640c269bbcd44 +125, 0x2a7a5a67ca24cfeb +126, 0x669039c28d5344b4 +127, 0x2a445ee81fd596bb +128, 0x600df94cf25607e0 +129, 0x9358561a7579abff +130, 0xee1d52ea179fc274 +131, 0x21a8b325e89d31be +132, 0x36fc0917486eec0a +133, 0x3d99f40717a6be9f +134, 0x39ac140051ca55ff +135, 0xcef7447c26711575 +136, 0xf22666870eff441d +137, 0x4a53c6134e1c7268 +138, 0xd26de518ad6bdb1b +139, 0x1a736bf75b8b0e55 +140, 0xef1523f4e6bd0219 +141, 0xb287b32fd615ad92 +142, 0x2583d6af5e841dd5 +143, 0x4b9294aae7ca670c +144, 0xf5aa4a84174f3ca9 +145, 0x886300f9e0dc6376 +146, 0x3611401e475ef130 +147, 0x69b56432b367e1ac +148, 0x30c330e9ab36b7c4 +149, 0x1e0e73079a85b8d5 +150, 0x40fdfc7a5bfaecf +151, 0xd7760f3e8e75a085 +152, 0x1cc1891f7f625313 +153, 0xeece1fe6165b4272 +154, 0xe61111b0c166a3c1 +155, 0x2f1201563312f185 +156, 0xfd10e8ecdd2a57cb +157, 0x51cdc8c9dd3a89bf +158, 0xed13cc93938b5496 +159, 0x843816129750526b +160, 0xd09995cd6819ada +161, 0x4601e778d40607df +162, 0xef9df06bd66c2ea0 +163, 0xae0bdecd3db65d69 +164, 0xbb921a3c65a4ae9a +165, 0xd66698ce8e9361be +166, 0xacdc91647b6068f4 +167, 0xe505ef68f2a5c1c0 +168, 0xd6e62fd27c6ab137 +169, 0x6a2ba2c6a4641d86 +170, 0x9c89143715c3b81 +171, 0xe408c4e00362601a +172, 0x986155cbf5d4bd9d +173, 0xb9e6831728c893a7 +174, 0xb985497c3bf88d8c +175, 0xd0d729214b727bec +176, 0x4e557f75fece38a +177, 0x6572067fdfd623ca +178, 0x178d49bb4d5cd794 +179, 0xe6baf59f60445d82 +180, 0x5607d53518e3a8d2 +181, 0xba7931adb6ebbd61 +182, 0xe853576172611329 +183, 0xe945daff96000c44 +184, 0x565b9ba3d952a176 +185, 0xcdb54d4f88c584c8 +186, 0x482a7499bee9b5e5 +187, 0x76560dd0affe825b +188, 0x2a56221faa5ca22c +189, 0x7729be5b361f5a25 +190, 0xd6f2195795764876 +191, 0x59ef7f8f423f18c5 +192, 0x7ebefed6d02adde1 +193, 0xcfec7265329c73e5 +194, 0x4fd8606a5e59881c +195, 0x95860982ae370b73 +196, 0xdecfa33b1f902acc +197, 0xf9b8a57400b7c0a6 +198, 0xd20b822672ec857b +199, 0x4eb81084096c7364 +200, 0xe535c29a44d9b6ad +201, 0xdef8b48ebacb2e29 +202, 0x1063bc2b8ba0e915 +203, 0xe4e837fb53d76d02 +204, 0x4df935db53579fb8 +205, 0xa30a0c8053869a89 +206, 0xe891ee58a388a7b5 +207, 0x17931a0c64b8a985 +208, 0xaf2d350b494ce1b3 +209, 0x2ab9345ffbcfed82 +210, 0x7de3fe628a2592f0 +211, 0x85cf54fab8b7e79d +212, 0x42d221520edab71b +213, 0x17b695b3af36c233 +214, 0xa4ffe50fe53eb485 +215, 0x1102d242db800e4d +216, 0xc8dc01f0233b3b6 +217, 0x984a030321053d36 +218, 0x27fa8dc7b7112c0e +219, 0xba634dd8294e177f +220, 0xe67ce34b36332eb +221, 0x8f1351e1894fb41a +222, 0xb522a3048761fd30 +223, 0xc350ad9bc6729edc +224, 0xe0ed105bd3c805e1 +225, 0xa14043d2b0825aa7 +226, 0xee7779ce7fc11fdf +227, 0xc0fa8ba23a60ab25 +228, 0xb596d1ce259afbad +229, 0xaa9b8445537fdf62 +230, 0x770ab2c700762e13 +231, 0xe812f1183e40cc1 +232, 0x44bc898e57aefbbd +233, 0xdd8a871df785c996 +234, 0x88836a5e371eb36b +235, 0xb6081c9152623f27 +236, 0x895acbcd6528ca96 +237, 0xfb67e33ddfbed435 +238, 0xaf7af47d323ce26 +239, 0xe354a510c3c39b2d +240, 0x5cacdedda0672ba3 +241, 0xa440d9a2c6c22b09 +242, 0x6395099f48d64304 +243, 0xc11cf04c75f655b5 +244, 0x1c4e054d144ddb30 +245, 0x3e0c2db89d336636 +246, 0x127ecf18a5b0b9a7 +247, 0x3b50551a88ea7a73 +248, 0xbd27003e47f1f684 +249, 0xf32d657782baac9b +250, 0x727f5cabf020bc9 +251, 0x39c1c1c226197dc7 +252, 0x5552c87b35deeb69 +253, 0x64d54067b5ce493f +254, 0x3494b091fe28dda0 +255, 0xdf0278bc85ee2965 +256, 0xdef16fec25efbd66 +257, 0xe2be09f578c4ce28 +258, 0xd27a9271979d3019 +259, 0x427f6fcd71845e3 +260, 0x26b52c5f81ec142b +261, 0x98267efc3986ad46 +262, 0x7bf4165ddb7e4374 +263, 0xd05f7996d7941010 +264, 0x3b3991de97b45f14 +265, 0x9068217fb4f27a30 +266, 0xd8fe295160afc7f3 +267, 0x8a159fab4c3bc06f +268, 0x57855506d19080b6 +269, 0x7636df6b3f2367a4 +270, 0x2844ee3abd1d5ec9 +271, 0xe5788de061f51c16 +272, 0x69e78cc9132a164 +273, 0xacd53cde6d8cd421 +274, 0xb23f3100068e91da +275, 0x4140070a47f53891 +276, 0xe4a422225a96e53a +277, 0xb82a8925a272a2ac +278, 0x7c2f9573590fe3b7 +279, 0xbaf80764db170575 +280, 0x955abffa54358368 +281, 0x355ce7460614a869 +282, 0x3700ede779a4afbf +283, 0x10a6ec01d92d68cd +284, 0x3308f5a0a4c0afef +285, 0x97b892d7601136c9 +286, 0x4955c3b941b8552e +287, 0xca85aa67e941961d +288, 0xb1859ae5db28e9d2 +289, 0x305d072ac1521fbd +290, 0xed52a868996085bb +291, 0x723bfa6a76358852 +292, 0x78d946ecd97c5fb3 +293, 0x39205b30a8e23e79 +294, 0xb927e3d086baadbe +295, 0xa18d6946136e1ff5 +296, 0xdab6f0b51c1eb5ff +297, 0xf0a640bf7a1af60c +298, 0xf0e81db09004d0d4 +299, 0xfe76cebdbe5a4dde +300, 0x2dafe9cc3decc376 +301, 0x4c871fdf1af34205 +302, 0xe79617d0c8fa893b +303, 0xee658aaad3a141f7 +304, 0xfd91aa74863e19f1 +305, 0x841b8f55c103cc22 +306, 0x22766ed65444ad5d +307, 0x56d03d1beca6c17a +308, 0x5fd4c112c92036ae +309, 0x75466ae58a5616dc +310, 0xfbf98b1081e802a9 +311, 0xdc325e957bf6d8f5 +312, 0xb08da7015ebd19b7 +313, 0xf25a9c0944f0c073 +314, 0xf4625bafa0ced718 +315, 0x4349c9e093a9e692 +316, 0x75a9ccd4dd8935cb +317, 0x7e6cf9e539361e91 +318, 0x20fdd22fb6edd475 +319, 0x5973021b57c2311f +320, 0x75392403667edc15 +321, 0xed9b2156ea70d9f1 +322, 0xf40c114db50b64a0 +323, 0xe26bb2c9eef20c62 +324, 0x409c1e3037869f03 +325, 0xcdfd71fdda3b7f91 +326, 0xa0dfae46816777d6 +327, 0xde060a8f61a8deb8 +328, 0x890e082a8b0ca4fc +329, 0xb9f2958eddf2d0db +330, 0xd17c148020d20e30 +331, 0xffdc9cc176fe7201 +332, 0xffb83d925b764c1 +333, 0x817ea639e313da8d +334, 0xa4dd335dd891ca91 +335, 0x1342d25a5e81f488 +336, 0xfa7eb9c3cf466b03 +337, 0xfe0a423d44b185d0 +338, 0x101cfd430ab96049 +339, 0x7b5d3eda9c4504b +340, 0xe20ccc006e0193f1 +341, 0xf54ccddedebc5df0 +342, 0xc0edd142bd58f1db +343, 0x3831f40d378d2430 +344, 0x80132353f0a88289 +345, 0x688f23c419d03ef8 +346, 0x4c6837e697884066 +347, 0x699387bb2e9a3a8f +348, 0x8996f860342448d8 +349, 0xb0f80dff99bfa5cc +350, 0x3e927a7f9ea12c8e +351, 0xd7e498d1e5f9dff3 +352, 0x78ecb97bb3f864cc +353, 0x3c4ffd069a014d38 +354, 0xf8d5073a1e09b4d4 +355, 0x8717e854f9faef23 +356, 0xfbcc5478d8d0ad7 +357, 0xd3cd8b233ca274ff +358, 0x8bd8f11f79beb265 +359, 0xf64498a832d8fd0e +360, 0xb01bba75112131ec +361, 0x55572445a7869781 +362, 0x7b56622f18cb3d7a +363, 0x7f192c9e075bdb83 +364, 0xd9a112f836b83ff3 +365, 0x68673b37269653dc +366, 0xe46a9433fb6a0879 +367, 0x127d756ca4779001 +368, 0xc1378e8b1e8eab94 +369, 0x1006edb0f51d078c +370, 0xc6dd53961232d926 +371, 0x9a4aeef44038256d +372, 0xd357f4fa652d4f5f +373, 0x59f3d2cc3378598 +374, 0xe76e6207a824a7fc +375, 0x5fc5e33712ceffef +376, 0x77d24aeb0ccb1adc +377, 0x5be4b2826805659e +378, 0x257c69d787e64634 +379, 0x58dd52ca6bc727b1 +380, 0x3ab997767235ea33 +381, 0x986a2a7a966fad14 +382, 0xc900f8b27761dcc4 +383, 0x44991bdb13795700 +384, 0xe5c145a4fe733b2 +385, 0x56f041b56bffe0d3 +386, 0x5779c4fef8067996 +387, 0xa0fe8748e829532d +388, 0x840c1277d78d9dd4 +389, 0x37ebcb315432acbc +390, 0xf4bc8738433ba3be +391, 0x8b122993f2e10062 +392, 0xe1fe8481f2681ed5 +393, 0x8e23f1630d9f494a +394, 0xda24661a01b7d0b3 +395, 0x7a02942a179cee36 +396, 0xf1e08a3c09b71ac +397, 0x3dec2cc7ee0bd8fd +398, 0x1f3e480113d805d4 +399, 0xc061b973ad4e3f2c +400, 0x6bea750f17a66836 +401, 0xbc2add72eac84c25 +402, 0xcff058d3f97934ca +403, 0x54ccc30987778ec2 +404, 0x93449ec1e1469558 +405, 0xe2ff369eb0c6836 +406, 0x41c2df2d63bf8e55 +407, 0xf9302629b6c71be2 +408, 0xdd30376b8e5ab29a +409, 0x12db9e04f911d754 +410, 0x8d03d6cd359f1b97 +411, 0xe15956511abf1cee +412, 0x9b68e10e2c2fd940 +413, 0x2e28de6491c1ce53 +414, 0x52b329b72d0c109d +415, 0xc2c0b115f9da2a60 +416, 0x6ca084105271bbff +417, 0x49b92b8676058c1e +418, 0x767fc92a70f7e5a3 +419, 0x87ba4ed4b65a6aa0 +420, 0xf70b052e0a3975e9 +421, 0x3e925c3306db9eec +422, 0x43253f1d96ac9513 +423, 0xe3e04f1a1ea454c4 +424, 0x763e3f4cc81ba0c8 +425, 0x2a2721ac69265705 +426, 0xdf3b0ac6416ea214 +427, 0xa6a6b57450f3e000 +428, 0xc3d3b1ac7dbfe6ac +429, 0xb66e5e6f7d2e4ec0 +430, 0x43c65296f98f0f04 +431, 0xdb0f6e3ff974d842 +432, 0x3d6b48e02ebb203b +433, 0xd74674ebf09d8f27 +434, 0xbe65243c58fc1200 +435, 0x55eb210a68d42625 +436, 0x87badab097dbe883 +437, 0xada3fda85a53824f +438, 0xef2791e8f48cd37a +439, 0x3fe7fceb927a641a +440, 0xd3bffd3ff031ac78 +441, 0xb94efe03da4d18fb +442, 0x162a0ad8da65ea68 +443, 0x300f234ef5b7e4a6 +444, 0xa2a8b4c77024e4fb +445, 0x5950f095ddd7b109 +446, 0xded66dd2b1bb02ba +447, 0x8ec24b7fa509bcb6 +448, 0x9bede53d924bdad6 +449, 0xa9c3f46423be1930 +450, 0x6dfc90597f8de8b4 +451, 0xb7419ebc65b434f0 +452, 0xa6596949238f58b9 +453, 0x966cbade640829b8 +454, 0x58c74877bdcbf65e +455, 0xaa103b8f89b0c453 +456, 0x219f0a86e41179a4 +457, 0x90f534fc06ddc57f +458, 0x8db7cdd644f1affa +459, 0x38f91de0167127ac +460, 0xdcd2a65e4df43daa +461, 0x3e04f34a7e01f834 +462, 0x5b237eea68007768 +463, 0x7ff4d2b015921768 +464, 0xf786b286549d3d51 +465, 0xaefa053fc2c3884c +466, 0x8e6a8ff381515d36 +467, 0x35b94f3d0a1fce3c +468, 0x165266d19e9abb64 +469, 0x1deb5caa5f9d8076 +470, 0x13ab91290c7cfe9d +471, 0x3651ca9856be3e05 +472, 0xe7b705f6e9cccc19 +473, 0xd6e7f79668c127ed +474, 0xa9faf37154896f92 +475, 0x89fbf190603e0ab1 +476, 0xb34d155a86f942d0 +477, 0xb2d4400a78bfdd76 +478, 0x7c0946aca8cfb3f0 +479, 0x7492771591c9d0e8 +480, 0xd084d95c5ca2eb28 +481, 0xb18d12bd3a6023e +482, 0xea217ed7b864d80b +483, 0xe52f69a755dd5c6f +484, 0x127133993d81c4aa +485, 0xe07188fcf1670bfb +486, 0x178fbfe668e4661d +487, 0x1c9ee14bb0cda154 +488, 0x8d043b96b6668f98 +489, 0xbc858986ec96ca2b +490, 0x7660f779d528b6b7 +491, 0xd448c6a1f74ae1d3 +492, 0x178e122cfc2a6862 +493, 0x236f000abaf2d23b +494, 0x171b27f3f0921915 +495, 0x4c3ff07652f50a70 +496, 0x18663e5e7d3a66ca +497, 0xb38c97946c750cc9 +498, 0xc5031aae6f78f909 +499, 0x4d1514e2925e95c1 +500, 0x4c2184a741dabfbb +501, 0xfd410364edf77182 +502, 0xc228157f863ee873 +503, 0x9856fdc735cc09fc +504, 0x660496cd1e41d60e +505, 0x2edf1d7e01954c32 +506, 0xd32e94639bdd98cf +507, 0x8e153f48709a77d +508, 0x89357f332d2d6561 +509, 0x1840d512c97085e6 +510, 0x2f18d035c9e26a85 +511, 0x77b88b1448b26d5b +512, 0xc1ca6ef4cdae0799 +513, 0xcc203f9e4508165f +514, 0xeaf762fbc9e0cbbe +515, 0xc070c687f3c4a290 +516, 0xd49ed321068d5c15 +517, 0x84a55eec17ee64ee +518, 0x4d8ee685298a8871 +519, 0x9ff5f17d7e029793 +520, 0x791d7d0d62e46302 +521, 0xab218b9114e22bc6 +522, 0x4902b7ab3f7119a7 +523, 0x694930f2e29b049e +524, 0x1a3c90650848999f +525, 0x79f1b9d8499c932b +526, 0xfacb6d3d55e3c92f +527, 0x8fd8b4f25a5da9f5 +528, 0xd037dcc3a7e62ae7 +529, 0xfecf57300d8f84f4 +530, 0x32079b1e1dc12d48 +531, 0xe5f8f1e62b288f54 +532, 0x97feba3a9c108894 +533, 0xd279a51e1899a9a0 +534, 0xd68eea8e8e363fa8 +535, 0x7394cf2deeca9386 +536, 0x5f70b0c80f1dbf10 +537, 0x8d646916ed40462 +538, 0xd253bb1c8a12bbb6 +539, 0x38f399a821fbd73e +540, 0x947523a26333ac90 +541, 0xb52e90affbc52a37 +542, 0xcf899cd964654da4 +543, 0xdf66ae9cca8d99e7 +544, 0x6051478e57c21b6a +545, 0xffa7dc975af3c1da +546, 0x195c7bff2d1a8f5 +547, 0x64f12b6575cf984d +548, 0x536034cb842cf9e1 +549, 0x180f247ce5bbfad +550, 0x8ced45081b134867 +551, 0x532bbfdf426710f3 +552, 0x4747933e74c4f54d +553, 0x197a890dc4793401 +554, 0x76c7cc2bd42fae2 +555, 0xdabfd67f69675dd0 +556, 0x85c690a68cdb3197 +557, 0xe482cec89ce8f92 +558, 0x20bc9fb7797011b1 +559, 0x76dc85a2185782ad +560, 0x3df37c164422117a +561, 0x99211f5d231e0ab0 +562, 0xef7fd794a0a91f4 +563, 0x419577151915f5fe +564, 0x3ce14a0a7135dae3 +565, 0x389b57598a075d6a +566, 0x8cc2a9d51b5af9aa +567, 0xe80a9beffbd13f13 +568, 0x65e96b22ea8a54d8 +569, 0x79f38c4164138ede +570, 0xd1955846cba03d81 +571, 0x60359fe58e4f26d6 +572, 0x4ea724f585f8d13e +573, 0x316dfdbadc801a3c +574, 0x20aa29b7c6dd66fe +575, 0x65eaf83a6a008caa +576, 0x407000aff1b9e8cb +577, 0xb4d49bfb2b268c40 +578, 0xd4e6fe8a7a0f14a9 +579, 0xe34afef924e8f58e +580, 0xe377b0c891844824 +581, 0x29c2e20c112d30c8 +582, 0x906aad1fe0c18a95 +583, 0x308385f0efbb6474 +584, 0xf23900481bf70445 +585, 0xfdfe3ade7f937a55 +586, 0xf37aae71c33c4f97 +587, 0x1c81e3775a8bed85 +588, 0x7eb5013882ce35ea +589, 0x37a1c1692495818d +590, 0x3f90ae118622a0ba +591, 0x58e4fe6fea29b037 +592, 0xd10ff1d269808825 +593, 0xbce30edb60c21bba +594, 0x123732329afd6fee +595, 0x429b4059f797d840 +596, 0x421166568a8c4be1 +597, 0x88f895c424c1bd7f +598, 0x2adaf7a7b9f781cb +599, 0xa425644b26cb698 +600, 0x8cc44d2486cc5743 +601, 0xdb9f357a33abf6ba +602, 0x1a57c4ea77a4d70c +603, 0x1dea29be75239e44 +604, 0x463141a137121a06 +605, 0x8fecfbbe0b8a9517 +606, 0x92c83984b3566123 +607, 0x3b1c69180ed28665 +608, 0x14a6073425ea8717 +609, 0x71f4c2b3283238d7 +610, 0xb3d491e3152f19f +611, 0x3a0ba3a11ebac5d2 +612, 0xddb4d1dd4c0f54ac +613, 0xdb8f36fe02414035 +614, 0x1cf5df5031b1902c +615, 0x23a20ed12ef95870 +616, 0xf113e573b2dedcbb +617, 0x308e2395cde0a9fa +618, 0xd377a22581c3a7da +619, 0xe0ced97a947a66fb +620, 0xe44f4de9cd754b00 +621, 0x2344943337d9d1bf +622, 0x4b5ae5e2ea6e749c +623, 0x9b8d2e3ef41d1c01 +624, 0x59a5a53ebbd24c6b +625, 0x4f7611bf9e8a06fb +626, 0xea38c7b61361cd06 +627, 0xf125a2bfdd2c0c7 +628, 0x2df8dcb5926b9ebb +629, 0x233e18720cc56988 +630, 0x974c61379b4aa95e +631, 0xc7fe24c1c868910b +632, 0x818fd1affc82a842 +633, 0xcee92a952a26d38e +634, 0x8962f575ebcbf43 +635, 0x7770687e3678c460 +636, 0xdfb1db4ed1298117 +637, 0xb9db54cb03d434d3 +638, 0x34aebbf2244257ad +639, 0xd836db0cb210c490 +640, 0x935daed7138957cd +641, 0x3cd914b14e7948fd +642, 0xd0472e9ed0a0f7f0 +643, 0xa9df33dca697f75e +644, 0x15e9ea259398721a +645, 0x23eeba0f970abd60 +646, 0x2217fdf8bbe99a12 +647, 0x5ea490a95717b198 +648, 0xf4e2bfc28280b639 +649, 0x9d19916072d6f05c +650, 0x5e0387cab1734c6a +651, 0x93c2c8ac26e5f01e +652, 0xb0d934354d957eb1 +653, 0xee5099a1eef3188c +654, 0x8be0abca8edc1115 +655, 0x989a60845dbf5aa3 +656, 0x181c7ed964eee892 +657, 0x49838ea07481288d +658, 0x17dbc75d66116b2e +659, 0xa4cafb7a87c0117e +660, 0xab2d0ae44cdc2e6e +661, 0xdf802f2457e7da6 +662, 0x4b966c4b9187e124 +663, 0x62de9db6f4811e1a +664, 0x1e20485968bc62 +665, 0xe9ac288265caca94 +666, 0xc5c694d349aa8c1a +667, 0x3d67f2083d9bdf10 +668, 0x9a2468e503085486 +669, 0x9d6acd3dc152d1a3 +670, 0xca951e2aeee8df77 +671, 0x2707371af9cdd7b0 +672, 0x2347ae6a4eb5ecbd +673, 0x16abe5582cb426f +674, 0x523af4ff980bbccb +675, 0xb07a0f043e3694aa +676, 0x14d7c3da81b2de7 +677, 0xf471f1b8ac22305b +678, 0xdb087ffff9e18520 +679, 0x1a352db3574359e8 +680, 0x48d5431502cc7476 +681, 0x7c9b7e7003dfd1bf +682, 0x4f43a48aae987169 +683, 0x9a5d3eb66dedb3e9 +684, 0xa7b331af76a9f817 +685, 0xba440154b118ab2d +686, 0x64d22344ce24c9c6 +687, 0xa22377bd52bd043 +688, 0x9dfa1bb18ca6c5f7 +689, 0xdccf44a92f644c8b +690, 0xf623d0a49fd18145 +691, 0x556d5c37978e28b3 +692, 0xad96e32ce9d2bb8b +693, 0x2e479c120be52798 +694, 0x7501cf871af7b2f7 +695, 0xd02536a5d026a5b8 +696, 0x4b37ff53e76ab5a4 +697, 0xdb3a4039caaeab13 +698, 0x6cbd65e3b700c7be +699, 0x7367abd98761a147 +700, 0xf4f9ba216a35aa77 +701, 0xf88ca25ce921eb86 +702, 0xb211de082ec2cbf2 +703, 0xdd94aa46ec57e12e +704, 0xa967d74ad8210240 +705, 0xdaa1fada8cfa887 +706, 0x85901d081c4488ee +707, 0xcf67f79a699ef06 +708, 0x7f2f1f0de921ee14 +709, 0x28bc61e9d3f2328b +710, 0x3332f2963faf18e5 +711, 0x4167ac71fcf43a6 +712, 0x843c1746b0160b74 +713, 0xd9be80070c578a5e +714, 0xbd7250c9af1473e7 +715, 0x43f78afaa3647899 +716, 0x91c6b5dd715a75a5 +717, 0x29cc66c8a07bfef3 +718, 0x3f5c667311dc22be +719, 0x4f49cd47958260cd +720, 0xbef8be43d920b64e +721, 0x7a892a5f13061d8b +722, 0x9532f40125c819b1 +723, 0x924fca3045f8a564 +724, 0x9b2c6442453b0c20 +725, 0x7e21009085b8e793 +726, 0x9b98c17e17af59d2 +727, 0xba61acb73e3ae89a +728, 0xb9d61a710555c138 +729, 0xc2a425d80978974b +730, 0xa275e13592da7d67 +731, 0xe962103202d9ad0f +732, 0xbdf8367a4d6f33fd +733, 0xe59beb2f8648bdc8 +734, 0xb4c387d8fbc4ac1c +735, 0x5e3f276b63054b75 +736, 0xf27e616aa54d8464 +737, 0x3f271661d1cd7426 +738, 0x43a69dbee7502c78 +739, 0x8066fcea6df059a1 +740, 0x3c10f19409bdc993 +741, 0x6ba6f43fb21f23e0 +742, 0x9e182d70a5bccf09 +743, 0x1520783d2a63a199 +744, 0xba1dcc0c70b9cace +745, 0x1009e1e9b1032d8 +746, 0xf632f6a95fb0315 +747, 0x48e711c7114cbfff +748, 0xef281dcec67debf7 +749, 0x33789894d6abf59b +750, 0x6c8e541fffbe7f9c +751, 0x85417f13b08e0a88 +752, 0x9a581e36d589608f +753, 0x461dca50b1befd35 +754, 0x5a3231680dde6462 +755, 0xcc57acf729780b97 +756, 0x50301efef62e1054 +757, 0x675d042cd4f6bbc9 +758, 0x1652fdd3794384c9 +759, 0x1c93bbeeb763cd4d +760, 0x44b7240c4b105242 +761, 0x4c6af2a1b606ccfb +762, 0x18fc43ece2ec1a40 +763, 0x859a5511aeae8acb +764, 0x2f56826f1996ad2f +765, 0xa8e95ce8bb363bdf +766, 0xf4da396054e50e4b +767, 0x5493865e9895883c +768, 0x768e4c8b332ac0e3 +769, 0x32195d2aa583fca5 +770, 0xf2f353f21266bc15 +771, 0x43cddf1d021307d +772, 0x6031e3aa30300e4a +773, 0x4f1298469ac6088f +774, 0x4b4d450bafac574e +775, 0x23e1cf9c0582a22b +776, 0x2e9036980db49cd0 +777, 0xe4e228b113c411b2 +778, 0x8bddcdb82b51706 +779, 0xd2a7ea8288593629 +780, 0x67fe90e98fdda61 +781, 0x7b63494dba95717b +782, 0x105625904510d782 +783, 0xdf4aa2242454e50a +784, 0x32541d6cd7d6c7e3 +785, 0x5661fb432591cf3b +786, 0xce920a5ed047bce7 +787, 0xed4178a3c96eea8f +788, 0xe378cd996e39863b +789, 0x169e1fdc8e2b05e1 +790, 0xaee1812ef7149a96 +791, 0x648571c7453d12c5 +792, 0xb7b6bc9328573c43 +793, 0xe7fb969078e270d7 +794, 0xdfc2b1b8985f6e6f +795, 0x862b6527ee39a1aa +796, 0x1ee329aea91d7882 +797, 0x20d25324f2fe704 +798, 0xbfcc47401fc3bbfd +799, 0x1515cdc8d48b2904 +800, 0xbd6eefe86284261c +801, 0x9b1f28e3b35f22ee +802, 0x842a29d35e5aecda +803, 0xf2346109ad370765 +804, 0x24d68add5a71afd9 +805, 0x4a691421613d91e2 +806, 0x60e3058b3c244051 +807, 0x79194905cdaa5de8 +808, 0xe0e2df35c01e8987 +809, 0xe29b78beffbb5e4a +810, 0xcdcdbc020218c19e +811, 0x5ae0af8c16feae43 +812, 0x8109292feeaf14fa +813, 0x34113f7508dfa521 +814, 0xc062ac163f56730a +815, 0xf1660e66ec6d4c4c +816, 0x5966c55f60151c80 +817, 0x3865ae8ec934b17 +818, 0x472a7314afb055ec +819, 0x7a24277309a44a44 +820, 0x556e02dd35d38baa +821, 0x9849611a1bc96ec1 +822, 0xd176f5d5a8eb0843 +823, 0x44db12ec60510030 +824, 0x272e3a06a0030078 +825, 0x7c4764dbefc075ea +826, 0x910712f3735c1183 +827, 0xd49a2da74ae7aff6 +828, 0xcf9b3e6e8f776d71 +829, 0x27789fe3ec481a02 +830, 0x86659f82c6b5912b +831, 0xe044b3dbf339158c +832, 0x99d81f6bb62a37b0 +833, 0x5f5830c246fada9a +834, 0xe68abab1eeb432cb +835, 0x49c5c5ace04e104 +836, 0x1ac3871b3fc6771b +837, 0x773b39f32d070652 +838, 0x9c4138c2ae58b1f3 +839, 0xac41c63d7452ac60 +840, 0x9248826b245359e1 +841, 0x99bba1c7a64f1670 +842, 0xe0dc99ff4ebb92f2 +843, 0x113638652740f87c +844, 0xebf51e94da88cfc +845, 0x5441c344b81b2585 +846, 0xe1e69e0bc2de652a +847, 0xe9ab6d64ae42ed1e +848, 0x879af8730e305f31 +849, 0x36b9ad912c7e00d6 +850, 0x83ef5e9fca853886 +851, 0xda54d48bb20ea974 +852, 0x32c6d93aefa92aa2 +853, 0x4e887b2c3391847d +854, 0x50966e815f42b1b8 +855, 0x53411ac087832837 +856, 0x46f64fef79df4f29 +857, 0xb34aae3924cd272c +858, 0xf5ad455869a0adbe +859, 0x8351ded7144edac8 +860, 0xeb558af089677494 +861, 0x36ed71d69293a8d6 +862, 0x659f90bf5431b254 +863, 0x53349102b7519949 +864, 0x3db83e20b1713610 +865, 0x6d63f96090556254 +866, 0x4cc0467e8f45c645 +867, 0xb8840c4bd5cd4091 +868, 0xbd381463cc93d584 +869, 0x203410d878c2066d +870, 0x2ebea06213cf71c8 +871, 0x598e8fb75e3fceb4 +872, 0xdcca41ceba0fce02 +873, 0x61bf69212b56aae5 +874, 0x97eed7f70c9114fa +875, 0xf46f37a8b7a063f9 +876, 0x66c8f4ffe5bd6efa +877, 0xe43fd6efda2d4e32 +878, 0x12d6c799e5ad01de +879, 0x9ac83e7f8b709360 +880, 0xbbb7bb3c1957513d +881, 0x7f87c08d4b3796b0 +882, 0x9a7d1d74b6aa4a5c +883, 0xa4314530ff741b6f +884, 0x99a80c6b6f15fca8 +885, 0xd2fec81d6d5fc3ce +886, 0x15a98be1cc40cea +887, 0x98693eb7719366f3 +888, 0x36ccdc2a9e9d4de8 +889, 0x3c8208f63d77df25 +890, 0xca2e376e2343df6 +891, 0xcc9b17cbb54420c6 +892, 0x8724c44a64d7dcb8 +893, 0x9d00c6949ff33869 +894, 0xf4f8e584d2699372 +895, 0x88f4748cdd5a2d53 +896, 0xe215072a1205bc6d +897, 0x190934fe6d740442 +898, 0x7fac5c0ab2af106d +899, 0x1b86633a0bd84fa1 +900, 0x1293e54318492dfb +901, 0x433324fd390f34b9 +902, 0x4c5eb2c67a44643b +903, 0x59a6e281c388b0dd +904, 0xe78e03f9c44623b7 +905, 0x91307a93c768fc3d +906, 0xde8867b004d8e3ff +907, 0xdf52c3f57b7c5862 +908, 0x993f3e1d10358a92 +909, 0x9ccb10bc3e18662d +910, 0x45093ce48a114c73 +911, 0xd59d05979d26330a +912, 0x417c0e03300119a9 +913, 0x1c336500f90cde81 +914, 0x1c8ccd29ead9b85b +915, 0xb76baf3e55d4d950 +916, 0x133ad6196c75fd7e +917, 0x34200b0cde7ed560 +918, 0x9c7c3dacb213c8d9 +919, 0xd97563c4fd9bf1b6 +920, 0x5d910e871835b6cb +921, 0x7d46c4733a16bdf9 +922, 0xe41d73194ddc87b2 +923, 0x7d3d8a0855a465a9 +924, 0x70c2a8b5d3f90c0f +925, 0x9e7565ca5dccfe12 +926, 0x2c0acb4577aa51b1 +927, 0x3d2cd211145b79c7 +928, 0x15a7b17aa6da7732 +929, 0xab44a3730c27d780 +930, 0xf008bd6c802bde3a +931, 0x82ed86ddf3619f77 +932, 0xaabe982ab15c49f9 +933, 0x9bcad8fa6d8e58a4 +934, 0x8f39ed8243718aa1 +935, 0xe9489340e03e3cb6 +936, 0xc722314f5eefb8d0 +937, 0x870e8869a436df59 +938, 0x4dae75b8087a8204 +939, 0xe1d790f6ec6e425b +940, 0xafd39ea1b1d0ed09 +941, 0xdf2c99e464ddf08f +942, 0x74936d859ab9644d +943, 0x3871302164250e73 +944, 0x764b68921e911886 +945, 0x2a1d024b26bb9d66 +946, 0x797fba43918e75b4 +947, 0x62ec6d24ccca335b +948, 0xf4bd8b951762b520 +949, 0x9d450dede9119397 +950, 0x5393a26d10f8c124 +951, 0x6b74769392896b57 +952, 0x7f61dbcc0e328581 +953, 0x64e1df3884d0d94 +954, 0xba77dcdf23738c37 +955, 0xf8e288bc0a177475 +956, 0x4a8abfd1702ecb7d +957, 0x53f22886694736a7 +958, 0x8fc982597ced3e3 +959, 0x1bc46090f820fff7 +960, 0x8bd31f965d02229f +961, 0x65cd0cb29996ee53 +962, 0x702e0f4fcf8c2e9f +963, 0x293b77bff307a9a0 +964, 0x125a986b8b305788 +965, 0x416b0eea428ebf3c +966, 0xeac85421ab0e8469 +967, 0x7f5496095019aa68 +968, 0x1a96d7afbc708e0 +969, 0xb91262e6766e01e1 +970, 0xd0a549cc4ccc6954 +971, 0x75a9a073f50c8a0d +972, 0xae275d2c1c6cd23c +973, 0xcf159b5ec5d28fd4 +974, 0x75d0838ce9b92b +975, 0xd4eddcee6dc4677f +976, 0x6a0a8ad5df6b75b8 +977, 0x6f3fd0ef0f13ecc4 +978, 0xb75a5826c1a8f8a8 +979, 0xd47098bbc7943766 +980, 0x3d4ddd62d5f23dd1 +981, 0x760a904e4583841c +982, 0x2afeb5022b4cf1f +983, 0x66d5f653729f0a13 +984, 0x9a6a5ab62980d30f +985, 0xc332f5643bbf8d5b +986, 0x848fb702e4056a90 +987, 0xa057beaf3f9e8c5f +988, 0x6cc603e4560a6c6a +989, 0xec761811a7b23211 +990, 0xb14aa4090a82aaa5 +991, 0xe29d9d028a5b2dbb +992, 0x5564e53738d68f97 +993, 0xfabca36542eaaf3b +994, 0xb9912fcb782020a2 +995, 0xe865e01b349284fd +996, 0x540b5ff11c5f9274 +997, 0x3463f64e1e7451dc +998, 0xe15d3e2f33b735f8 +999, 0xf5433336eadef6e diff --git a/MLPY/Lib/site-packages/numpy/random/tests/data/sfc64-testset-2.csv b/MLPY/Lib/site-packages/numpy/random/tests/data/sfc64-testset-2.csv new file mode 100644 index 0000000000000000000000000000000000000000..55712be6efc0a92c53311eaefb0b0f4fa336a455 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/data/sfc64-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x91959e5fb96a6332 +1, 0x3c1dd8a25a7e9f21 +2, 0x657bdffc99798d9e +3, 0x1a04de320b19e022 +4, 0x65b92af0e5f3c61c +5, 0x9c84070ce8f743c0 +6, 0xbb10e573693cdb25 +7, 0xd65ea9e76b37fb6b +8, 0x503efd0e76c8ae66 +9, 0xd711dcd04c26d0f +10, 0x12f53f435814ac8c +11, 0xb392cd402cfc82bd +12, 0x461764550e06c889 +13, 0x716a48b3514e6979 +14, 0xdd0a322213c18ad7 +15, 0x6673a8ca0a05c4d7 +16, 0x2992ef333437f844 +17, 0xc4aaf7e8240b2aad +18, 0x6ab0a1af1f41474f +19, 0xb0bae400c226941d +20, 0xe5f80c2eeeab48c6 +21, 0x3832c6a93a4024bf +22, 0x280bd824fabe8368 +23, 0x66b626228321e5ff +24, 0xe0bdfba5325a307e +25, 0x3a5f65c6ef254e05 +26, 0x99ea12503cb02f94 +27, 0x5d01fd2db77d420b +28, 0x6959bf5f36b2368d +29, 0xd856e30c62b5f5be +30, 0xe33233e1d8140e66 +31, 0xb78be619d415fa8d +32, 0x4f943bb2cc63d3b +33, 0x9b1460b290952d81 +34, 0x19205d794826740e +35, 0x64617bd9d7a6a1ff +36, 0x30442124b55ea76a +37, 0xebbbc3b29d0333fc +38, 0x39235a0fe359751c +39, 0xf9629768891121aa +40, 0x32052f53f366e05a +41, 0x60cc5b412c925bc8 +42, 0xf8b7ecda1c0e5a9 +43, 0x195f036e170a2568 +44, 0xfe06d0381a9ca782 +45, 0x919d89e8b88eebbf +46, 0xa47fb30148cf0d43 +47, 0x5c983e99d5f9fd56 +48, 0xe7492cdb6a1d42cd +49, 0xf9cfe5c865b0cfd8 +50, 0x35b653367bbc3b99 +51, 0xb1d92f6f4d4e440b +52, 0x737e1d5bd87ed9c0 +53, 0x7a880ca1498f8e17 +54, 0x687dae8494f9a3f7 +55, 0x6bae1989f441d5d7 +56, 0x71ad3fa5a9195c2e +57, 0x16b3969779f5d03 +58, 0xd1bce2ac973f15b3 +59, 0xa114b1ee2ce0dcdd +60, 0x270d75c11eb1b8d5 +61, 0xc48ffa087c0a7bc +62, 0xaaf9dc48cda9848d +63, 0x8111cf10ef6e584d +64, 0x6736df6af40ee6f4 +65, 0x1a1a111682fbf98d +66, 0xeb217658e1cb3b5d +67, 0xcaf58a8b79de9dec +68, 0x25d0ffd63c88d7a1 +69, 0x4c498cd871b7f176 +70, 0x4069a6156eb0cf3c +71, 0xdf012f12edcdd867 +72, 0x7734c0ac8edb1689 +73, 0xed6960ac53dbc245 +74, 0x305e20da8868c661 +75, 0x5f0c7a3719956f95 +76, 0x66842bbe3b28895 +77, 0xb608bc9a31eac410 +78, 0xfcb17d5529503abd +79, 0x829ae5cbc29b92ee +80, 0x17f2f0027bc24f3a +81, 0x435926c33d8f44cc +82, 0x3ab899327098dbec +83, 0xaf78573b27f8ead8 +84, 0xa8b334fabcf8dc60 +85, 0xcdf3b366a6a303db +86, 0x8da9379dd62b34c8 +87, 0xb0ba511955f264a7 +88, 0x9d72e21a644f961d +89, 0xfac28382e2e7e710 +90, 0xd457065f048410aa +91, 0x1cae57d952563969 +92, 0x5a160a6223253e03 +93, 0x2c45df736d73c8bd +94, 0x7f651ebc6ad9cec5 +95, 0x77a6be96c7d2e7e7 +96, 0x1721fb1dbfd6546a +97, 0xf73f433ecff3c997 +98, 0xed1e80f680965bfe +99, 0x6705ad67a3003b30 +100, 0xac21134efcadb9f7 +101, 0x4d2ba0a91d456ac +102, 0x59da7b59434eb52b +103, 0x26c1d070fd414b5f +104, 0xed7079ddfce83d9a +105, 0x9277d21f88e0fb7a +106, 0xfae16b9a8d53d282 +107, 0xb08a0e2e405fdf7d +108, 0x2ea20df44229d6ec +109, 0x80e4634cd3612825 +110, 0xbe62e8aeba8f8a1a +111, 0x4981209769c190fb +112, 0xcec96ef14c7e1f65 +113, 0x73fe4457b47e7b53 +114, 0x1d66300677315c31 +115, 0xe26821290498c4cc +116, 0xf6110248fd8fb1c5 +117, 0x30fd7fe32dbd8be3 +118, 0x534ec9b910a2bd72 +119, 0x8f9bfe878bbf7382 +120, 0x4f4eb5295c0c2193 +121, 0xdeb22f03a913be9e +122, 0x40f716f8e2a8886c +123, 0xc65007d0e386cdb1 +124, 0x9bdd26d92b143a14 +125, 0xf644b0b77ea44625 +126, 0x75f5a53f6b01993a +127, 0xfe803e347bf41010 +128, 0x594bff5fa17bc360 +129, 0x3551edfb450373c7 +130, 0x898f9dad433615db +131, 0x923d2406daa26d49 +132, 0x99e07faccbc33426 +133, 0x7389f9ff4470f807 +134, 0xdc2a25957c6df90b +135, 0x33c6d8965ef3053f +136, 0x51a8f07e838f1ab +137, 0x91c5db369380274f +138, 0xc37de65ac56b207e +139, 0xfcc6d2375dde7f14 +140, 0xa4e6418bff505958 +141, 0x4b8b9f78e46953c4 +142, 0x255ab2e0f93cf278 +143, 0xdf650717af3d96ef +144, 0x2caa21cba3aae2b2 +145, 0xce7e46c6f393daa4 +146, 0x1d5b3573f9997ac7 +147, 0x5280c556e850847d +148, 0x32edc31bef920ad7 +149, 0xefaa6b0b08cf2c6 +150, 0x5151c99d97b111c5 +151, 0x35ccf4bf53d17590 +152, 0xa210d7bd8697b385 +153, 0xa9419f95738fbe61 +154, 0xdeccf93a1a4fdc90 +155, 0xd0ea3365b18e7a05 +156, 0x84122df6dcd31b9a +157, 0x33040a2125cea5f5 +158, 0xfe18306a862f6d86 +159, 0xdb97c8392e5c4457 +160, 0xc3e0fa735e80e422 +161, 0x7d106ff36467a0c1 +162, 0xb9825eecc720a76d +163, 0x7fefc6f771647081 +164, 0xf5df3f5b3977bf13 +165, 0x18fb22736d36f1e0 +166, 0xadc4637b4953abfc +167, 0x174e66d3e17974bd +168, 0xf1614c51df4db5db +169, 0x6664ecde5717b293 +170, 0xd5bc5b6839265c26 +171, 0xf6ca9ce1af3f1832 +172, 0xca696789a9d506ea +173, 0x7399c246c8f9d53 +174, 0xadf49049626417e2 +175, 0xbcd84af37d09ab91 +176, 0xbb41c177f3a3fa45 +177, 0x592becc814d55302 +178, 0xa88b4e65f6cfe5f7 +179, 0xa0a55e34ff879426 +180, 0x3c2ea6aa725b42b7 +181, 0x65ac4a407b1f9521 +182, 0xde63d53f7e88b556 +183, 0x18bc76696d015f40 +184, 0xd1363f2cd4c116a8 +185, 0x2fe859be19a48e4a +186, 0x83d6099b1415e656 +187, 0x43f2cbc1a4ee6410 +188, 0xb2eca3d3421c533d +189, 0xc52b98ea3f031f5d +190, 0xfe57eb01da07e9d1 +191, 0xf9377883537a6031 +192, 0x364030c05dac7add +193, 0x6815cb06b35d4404 +194, 0xceae2d4ce31894be +195, 0xc602bcdf6062bf6a +196, 0xc8e4bd8dcc6062e3 +197, 0x9c29e87b92a1a791 +198, 0x41e626b871ca9651 +199, 0x325c3d1fb8efbcd8 +200, 0x7dbbacf8e3419fb3 +201, 0x3602e72516bb7319 +202, 0x537a008ebd94d24b +203, 0xda7714fc9d4d161d +204, 0x1c8c73700e1b621b +205, 0x2749b80937d6c939 +206, 0x76ee6abac5b14d33 +207, 0xf18d1e92cb6a8b5c +208, 0x6ce9579d9291c721 +209, 0x60523c745a40e58 +210, 0x637f837fcc901757 +211, 0x2ff71b19661dc5b3 +212, 0x393ab586326ad16f +213, 0xa0970ea30fe742b7 +214, 0x570222d7f27fe5ae +215, 0x3b5806d43fd38629 +216, 0x129a0ad7420180c5 +217, 0x1c4726355778d52c +218, 0x7c1459cf77656499 +219, 0xfe038a0932132069 +220, 0x4c4cc317a937483a +221, 0xa333d24067e926ba +222, 0x401d9b6ab37f6ef2 +223, 0x87ad0e491ebe4a2a +224, 0xfc02f312e72d121d +225, 0xfde715b3b99767b2 +226, 0xd111c342ba521c92 +227, 0x83b221b10879c617 +228, 0x6a1bf5c01fdf4277 +229, 0x166bfc0c3f5892ee +230, 0x4608d556d7c57856 +231, 0x8d786857c95ece49 +232, 0x2d357445a1aca4ac +233, 0x79620dae28ecd796 +234, 0x90e715dc0f2201c4 +235, 0x173b68b4c9f4b665 +236, 0x4e14d040ebac4eef +237, 0xbd25960b4b892e +238, 0x911a199db6f1989d +239, 0xfe822d7c601fd2e0 +240, 0x9b4c1d58d8223a69 +241, 0x907c1891283843b0 +242, 0xf4868bf54061c4b2 +243, 0x17f8cd1fc24efd85 +244, 0xd44253f9af14c3aa +245, 0x16d0da0cb911d43c +246, 0x3c6a46615828e79a +247, 0x498591c1138e11a5 +248, 0xcc0f26336d0d6141 +249, 0x4d3ebc873212309a +250, 0x16bad7792d5c2c6a +251, 0x474215a80b2bbd11 +252, 0x7159848abd8492fc +253, 0x359341c50973685f +254, 0x27512ee7bf784a4a +255, 0x45228ea080f70447 +256, 0x880cab616500d50e +257, 0x12fae93f9830d56e +258, 0x6744ee64348d9acd +259, 0x484dada28cd2a828 +260, 0x98491d0729e41863 +261, 0x2f15aac43c2863b0 +262, 0x5727a34d77a1da0f +263, 0xa435cebef6a62eed +264, 0xd211697d57b053b0 +265, 0x65aa757b68bd557 +266, 0xe3a1b7a2d8a3e06a +267, 0x2adf64e67252a7a9 +268, 0xadadcb75cadee276 +269, 0x7934bc57ac8d97bf +270, 0xccff0d0f412e0606 +271, 0x101a82aa3e8f3db9 +272, 0xb0f2498094b4575c +273, 0xba2561d9ef26ed8a +274, 0xfbcd1268fc3febe1 +275, 0x9aa10bb19eb152e0 +276, 0xf496217a601a6d72 +277, 0xe4be1e4f2fa91363 +278, 0x473a602bf3dd68eb +279, 0xfe8ed2a48c26f4b5 +280, 0x20e94b1a00159476 +281, 0x93e1cb1c6af86ec7 +282, 0x4fcba3898f7442ba +283, 0x5150c3a3d94891df +284, 0x91cfce6c85b033ea +285, 0x625e8a832a806491 +286, 0x28c97ba72e3ec0b2 +287, 0x8e172de217c71ea1 +288, 0x926b80216c732639 +289, 0x28b19431a649ae3d +290, 0x57c039a6e95a3795 +291, 0xfbc354182fe52718 +292, 0x819dfd7c7d534cef +293, 0xabb4093a619ed44f +294, 0xe785b7ac6f656745 +295, 0xb647b4588b2f942f +296, 0x64cf870a14c72d27 +297, 0x6d4a4a2a0ba9b37e +298, 0x78bfb0427d7ce6b0 +299, 0x8dcc72b8bfc79ac6 +300, 0x1c14d915d5e76c99 +301, 0xaf48ddea6f096d79 +302, 0x51b39b67aa130d8 +303, 0x1aeeb39d4def06de +304, 0xd678092ffedfdd27 +305, 0x8f54787f325111d3 +306, 0xf2ca2e827beaa6bc +307, 0x339d134099e98545 +308, 0x1f6a8a7b33942e43 +309, 0x952c8065dbef669a +310, 0xe066aeb6690147f7 +311, 0xed25aa92cf58ebb6 +312, 0x7601edce215ef521 +313, 0xed1c5b396abd9434 +314, 0x4fd1e407535de9d5 +315, 0xccc8315a0d4d1441 +316, 0x85753e250bb86976 +317, 0xf232e469378761c3 +318, 0x81d691b8e9aef3c6 +319, 0x224a2f9cab0ad0e +320, 0x978f3d3e50007f4e +321, 0xd3713e6a6c0cbe60 +322, 0xcce8f1eadd41f80d +323, 0x34bda028a97d469 +324, 0x90e242fdf0f59183 +325, 0x4d749754fbc5f092 +326, 0x4399f5b7851cc87b +327, 0xcb921a5f25f6c5d7 +328, 0x120bf5d0162101 +329, 0x1304cc2aa352735a +330, 0xf7236c5d0d5d417b +331, 0xc31b320fc1654306 +332, 0xb468c6b23f3fb4e7 +333, 0xb5985b5bfaca4166 +334, 0x898285a1cd2f8375 +335, 0xa13493da372aa7c9 +336, 0x15c80c09c12634e7 +337, 0x9b765c5cc9d438bd +338, 0xee7da816a9201dcb +339, 0x92e269f73b5a248e +340, 0xa8086c5de81400ce +341, 0xe0053901853d42be +342, 0x821df32c012f433e +343, 0x17a6d69ca37387c7 +344, 0x2b10044bfba3501f +345, 0x8dfd262afc2e8515 +346, 0xd68c2c7b60226371 +347, 0xe81ac114e4416774 +348, 0x5896d60061ebc471 +349, 0xa996e3147811dbd1 +350, 0xa819c7b80ecb3661 +351, 0x982ad71b38afbc01 +352, 0xab152b65aa17b7fe +353, 0x4582bc282ef187ef +354, 0xab5a17fe8d9bc669 +355, 0x83664fa9cb0284b7 +356, 0x234c4b0091968f52 +357, 0x8ab5f51805688d37 +358, 0xe9e11186e0c53eda +359, 0x10df37ef1de2eccf +360, 0x780f1b0d52db968f +361, 0x50bd4ff292872cd5 +362, 0x51e681c265f5ad0 +363, 0x842c49660a527566 +364, 0x6e56ee026e9eda87 +365, 0x4cf39e40d8c80393 +366, 0x13e466df371f7e1f +367, 0xf2ce1799f38e028e +368, 0x833c8db7adc6ff0e +369, 0xc6e189abc2ec98f +370, 0xafebb3721283fec5 +371, 0xb49bc1eb5cc17bdc +372, 0xf1d02e818f5e4488 +373, 0xe5e9d5b41a1dd815 +374, 0xce8aca6573b1bfe5 +375, 0x9b0a5d70e268b1d5 +376, 0xf3c0503a8358f4de +377, 0x2681605dd755669d +378, 0xea265ca7601efc70 +379, 0xa93747f0a159439f +380, 0x62a86ede78a23e50 +381, 0xac8a18935c3d063c +382, 0x729c0a298f5059f5 +383, 0xbbf195e5b54399f4 +384, 0x38aa9d551f968900 +385, 0x3b3e700c58778caa +386, 0x68e6e33c4443957a +387, 0x7c56fc13eb269815 +388, 0xaf7daca39711804a +389, 0x50fde6d10f9544b3 +390, 0xf3d37159f6f6c03d +391, 0x82d298f5c1a71685 +392, 0x478661ac54c5002c +393, 0x6053768e1a324ae0 +394, 0xde8fb4a7e56707ea +395, 0xaa2809301faa8cf4 +396, 0x690a8d49fedd0722 +397, 0xe17c481b9c217de9 +398, 0x60d1d8a2b57288e3 +399, 0x149adfaadc6b0886 +400, 0xa3c18b6eb79cd5fa +401, 0x5774e3a091af5f58 +402, 0x2acca57ff30e5712 +403, 0x94454d67367c4b0c +404, 0x581b2985ac2df5ca +405, 0x71618e50744f3e70 +406, 0x270a7f3bd9a94ae6 +407, 0x3ef81af9bb36cd7b +408, 0x8a4a2592875254aa +409, 0x704ac6086fbb414a +410, 0xda774d5d3f57414d +411, 0xe20d3358b918ae9e +412, 0x934a6b9f7b91e247 +413, 0xf91649cde87ec42c +414, 0x248cec5f9b6ced30 +415, 0x56791809fd8d64ba +416, 0xf502b2765c1395f +417, 0x6b04ec973d75aa7f +418, 0xb0339f2794bb26f +419, 0x4c524636efbaea49 +420, 0x6bbf3876e9738748 +421, 0xf686524e754e9e24 +422, 0x8dafa05a42d19cd3 +423, 0xc5f069ab2434008e +424, 0x4fd64cc713cba76 +425, 0xdbf93450c881ed5f +426, 0x492e278ebabb59a2 +427, 0x993fddfde4542642 +428, 0xecde68a72c8d4e52 +429, 0xe0760b3074c311fd +430, 0x68dc0e7e06528707 +431, 0x52b50edf49c0fdc7 +432, 0xb2bd4185c138f412 +433, 0x431496d7e1d86f3 +434, 0xa4e605b037e26c44 +435, 0x58236ae1f0aca2b5 +436, 0x26c72c420fc314d8 +437, 0x20134e982ab99a2b +438, 0x544b59b8b211374b +439, 0x1301c42f3a14d993 +440, 0x52a6ea740f763b0f +441, 0xf209d70c2bebf119 +442, 0xac66a4ebc2aa1be +443, 0x683713ed35878788 +444, 0x2b5578acec06b80c +445, 0x86428efa11c45b36 +446, 0xb49010adb17d291e +447, 0x73b686bd8664b6be +448, 0x6d28ebf57b6884cc +449, 0x9712091230ff58d9 +450, 0xc9c91f74c38b286 +451, 0x776310ac41dc008e +452, 0x2f3739df0bf6a88e +453, 0x5792dc62b94db675 +454, 0x5715910d024b06af +455, 0xeb1dd745458da08 +456, 0xfce7b07ccfa851a7 +457, 0xc305f1e983ac368 +458, 0x485aa9519ac00bb0 +459, 0xa5354f6589fb0ea0 +460, 0x32fee02dfdbf4454 +461, 0x4d1ddc304bbefaaa +462, 0x789a270a1737e57e +463, 0x9f3072f4b1ed8156 +464, 0x4de3c00e89058120 +465, 0xb00a02529e0a86fa +466, 0x539f6f0edd845d9a +467, 0x85e578fe15a8c001 +468, 0xa12c8e1a72cce7d8 +469, 0xc6908abbc2b1828 +470, 0xcf70090774cbb38c +471, 0x3b636a6977b45d4a +472, 0xf0a731b220680b57 +473, 0x18973929f51443a8 +474, 0xe93e1fbe7eadabe +475, 0x8233730f0a6dfa02 +476, 0x66e50b6919b0ab74 +477, 0xb1aba87c97fd08a2 +478, 0xd4dffc1fbc117ad6 +479, 0x6f7fa65724b96e6a +480, 0x4bd5800dee92e0fa +481, 0xe18a959db6256da +482, 0xe53a291bc66df487 +483, 0xb7ec306a08651806 +484, 0x1847a6b80d2821e1 +485, 0xda50391283b14d39 +486, 0xacc4d3cd7cceb97a +487, 0x57f70185165b7bc6 +488, 0x302b6d597c3aaba7 +489, 0xa47f32d037eab51e +490, 0xe1509b4408abc559 +491, 0x4f30a1d7c2934157 +492, 0x2ad03e6c60b650b2 +493, 0x334d9c337b0a9064 +494, 0xc7f442821e7aac12 +495, 0xbcdeb09298694cdd +496, 0xe42402389f8f0fb4 +497, 0xe5de56af539df727 +498, 0x7017f9b2101ee240 +499, 0x1ee5e68d5b10001d +500, 0x436229051836387a +501, 0xcd532d6d6ec38fb7 +502, 0x30a66606fdf38272 +503, 0xfdaa2ab9cf798496 +504, 0x4277b4adec70e7df +505, 0x72cfc30256e0eaef +506, 0x3c3359fd9bd34917 +507, 0xb7aa89598856efb0 +508, 0xf72226f8bf299ef5 +509, 0x258c499275a4356f +510, 0x999a56bfc7f20d76 +511, 0x2b3e7432e20c18b +512, 0x2d1251332f760cb5 +513, 0x7420e0eea62157c5 +514, 0xe85c895aa27cec3d +515, 0x27a0545c7020d57c +516, 0xc68638a65b4fff0d +517, 0xfda473983a4ea747 +518, 0xd19fe65fb4c06062 +519, 0x6b1374e050ee15e4 +520, 0x80065ecd49bc4bef +521, 0x4ee655954bc838de +522, 0xe8fb777504a72299 +523, 0x86b652ea70f4bdde +524, 0xcdc9e0fbde7e4f33 +525, 0x352c0a50cd3ac56 +526, 0x4b8605d368be75dc +527, 0x1ac9ea8129efbc37 +528, 0x470325faa99f39c5 +529, 0x25dd7ef9adccf7a1 +530, 0x5ae2c7a03e965816 +531, 0xf733d2df59dacc7d +532, 0xa05bbf0a8a1a7a70 +533, 0xe8aa3f102846ef5f +534, 0xc9b85ec49ae71789 +535, 0xb904c14ed1cb1936 +536, 0x5ae618230b5f0444 +537, 0x97987fe47b5d7467 +538, 0xabb3aca8865ca761 +539, 0x38bfdf29d4508228 +540, 0x353654f408353330 +541, 0xeb7e92930ae4ef0d +542, 0xec50f1a7ca526b96 +543, 0xd5e2dc08b5697544 +544, 0x24c7fd69d5ec32df +545, 0x6f7e1095568b8620 +546, 0x6ed9c16ca13b3c8 +547, 0xe676ef460002130f +548, 0xa3a01a3992c4b430 +549, 0xe2130406c3b1f202 +550, 0xa8f7263e2aedcd20 +551, 0xc45d71ef2e35f507 +552, 0x37155594021da7ba +553, 0x22dc94f19de73159 +554, 0x7969fc6bffc5443f +555, 0x97def7e44faa6bfe +556, 0x8b940f5e8931d71f +557, 0xd95b1dd3f1a3fdd5 +558, 0x1c83bfdca615701a +559, 0xb7fcb56279ceca6b +560, 0xd84f8950f20dcd0 +561, 0xb03343698de3cbe0 +562, 0xf64565d448d71f71 +563, 0xda52b4676e0ae662 +564, 0xda39c2c05b4ffb91 +565, 0xb35e2560421f6a85 +566, 0x1a7b108d48ac3646 +567, 0xc4e264dc390d79ed +568, 0xa10727dfd9813256 +569, 0x40d23154e720e4f7 +570, 0xd9fa7cd7e313e119 +571, 0xcbf29107859e6013 +572, 0xc357338553d940b7 +573, 0x2641b7ab0bdfcbaa +574, 0xd12f2b6060533ae7 +575, 0xd0435aa626411c56 +576, 0x44af4a488a9cec72 +577, 0xb934232ea8fa5696 +578, 0x760a8b12072b572d +579, 0xfab18f9942cfa9b3 +580, 0x5676834c1fe84d16 +581, 0x9c54e4fddb353236 +582, 0xab49edfc9551f293 +583, 0x567f1fb45a871d +584, 0x32a967c873998834 +585, 0x99240aad380ef8d1 +586, 0x7f66cbd432859a64 +587, 0x4cdc8a4658166822 +588, 0x984e3984a5766492 +589, 0xa3b2d0a3d64d3d94 +590, 0x177f667172f2affc +591, 0xb1a90607a73a303f +592, 0xe600b6c36427f878 +593, 0xf758f9834cb7f466 +594, 0x8ee9fce4a3f36449 +595, 0xcb8f11533e7da347 +596, 0xe7cf647794dabd7c +597, 0xc9d92cfe6110806 +598, 0xea1335fa9145a1ec +599, 0xbc6c29821d094552 +600, 0x37b9d6a858cc8bc3 +601, 0xf24e4c694929893e +602, 0x55d025ce2d7d0004 +603, 0xccdc69acccf4267b +604, 0xc491c04340c222eb +605, 0xba50f75ecec9befb +606, 0x1ec7bd85b8fe3bb9 +607, 0xe4de66498c59ae8a +608, 0x38aa9e912712c889 +609, 0xcee0e43c5cc31566 +610, 0x72b69aa708fc7ed +611, 0xdff70b7f6fa96679 +612, 0xd6d71d82112aadc3 +613, 0x365177892cb78531 +614, 0xa54852b39de4f72c +615, 0x11dd5832bf16dd59 +616, 0x248a0f3369c97097 +617, 0xa14cec0260e26792 +618, 0x3517616ff142bed1 +619, 0x9b693ad39dab7636 +620, 0x739dff825e994434 +621, 0x67711e7356098c9 +622, 0xa81f8515d2fdf458 +623, 0xdac2908113fe568e +624, 0xe99944ebc6e2806a +625, 0x671728ca5b030975 +626, 0xfdad20edb2b4a789 +627, 0xedc6e466bd0369d2 +628, 0x88b5d469821f7e1b +629, 0x2eabf94049a522a5 +630, 0x247794b7a2f5a8e3 +631, 0x278942bdbe02c649 +632, 0xbe5a9a9196ab99c1 +633, 0x75955060866da1b5 +634, 0xdedcfa149273c0b5 +635, 0xdbeb7a57758f3867 +636, 0x7b9053347a2c8d5a +637, 0xa059b3f2eed338a5 +638, 0x59401a46ded3b79f +639, 0x38044ba56a6d19fb +640, 0x72c7221b4e77e779 +641, 0x526df3491a3a34da +642, 0xc3b31184ba16c0c2 +643, 0xd94c7144488624af +644, 0xcf966ee4dc373f91 +645, 0x62049e65dd416266 +646, 0x7c2adccb925bf8f +647, 0xd5fa5c22ed4ef8e1 +648, 0xd00134ebd11f2cd1 +649, 0xfbdf81767bed3634 +650, 0x62e8cc8ff66b6e26 +651, 0x3a72d6bcd4f2dcf7 +652, 0xf1cd45b1b46a86ed +653, 0x1271f98e0938bb9a +654, 0x82e6927e83dc31fa +655, 0x7b9b0e0acb67b92d +656, 0x6df503e397b2e701 +657, 0x93888f6fb561e0c3 +658, 0x393fb6069a40291 +659, 0x967a7d894cc0754d +660, 0x6e298996ad866333 +661, 0x5ff3cf5559d6ab46 +662, 0xd0d70508c40349f5 +663, 0xc64c66c0dd426b33 +664, 0x8fea340ee35c64dd +665, 0xf9cd381eb3060005 +666, 0xfcc37c2799fc0b11 +667, 0x6a37c91d65b489fa +668, 0x57231000fa0a0c9d +669, 0x55f6e292c6703f9a +670, 0xd0508ffbfa55a7a6 +671, 0x885db543276bdac8 +672, 0xc26dbe6a26b0e704 +673, 0x21f884874ebd709e +674, 0x711f0b6c8f732220 +675, 0x354d0a361eaee195 +676, 0x721344d8d30b006a +677, 0xa0e090a0d3a56f07 +678, 0x16b3d5d823a4952b +679, 0x59d7874bc9eae7b6 +680, 0x9bbb32710076455f +681, 0xd4fb22242ffabafd +682, 0xe1d4ac6770be1d89 +683, 0xb259cedebc73dc8a +684, 0x35faaa3b4246ab69 +685, 0x5d26addefdaee89 +686, 0x8e7ec350da0f3545 +687, 0xd0f316eed9f8fc79 +688, 0x98b2a52c9bf291b2 +689, 0xe4d294a8aca6a314 +690, 0x25bd554e6aa7673c +691, 0xcfde5dcba5be2a6c +692, 0xb5e01fb48d2d2107 +693, 0xe1caf28948028536 +694, 0xd434aa0a26f3ee9b +695, 0xd17723381641b8f6 +696, 0xfe73bd1f3f3768a2 +697, 0x1cc6b1abd08d67e9 +698, 0x247e328371a28de0 +699, 0x502e7942e5a9104a +700, 0x6a030fd242eb4502 +701, 0xa2ffe02744014ce8 +702, 0x59290763b18fe04e +703, 0xcf14241564271436 +704, 0xb0fb73c3c1503aff +705, 0x94e27c622f82137a +706, 0x747a5b406ac3e1f0 +707, 0x9a914e96a732031d +708, 0x59f68c6c8f078835 +709, 0x809d012c73eb4724 +710, 0x5b3c3b73e1b37d74 +711, 0xdde60ef3ba49cdf7 +712, 0x87a14e1f9c761986 +713, 0x4109b960604522af +714, 0x122d0e1ed0eb6bb9 +715, 0xadc0d29e80bfe33 +716, 0xa25b1b44f5fc8e4e +717, 0xbab85d8a9b793f20 +718, 0x825f4cbced0e7d1e +719, 0x2d6ae8807acb37ea +720, 0x8234420adce2e39 +721, 0x4a8ad4da6b804807 +722, 0x1e19f9bc215e5245 +723, 0x1d6f4848a916dd5e +724, 0x9ac40dfcdc2d39cc +725, 0x9f3524e3086155ec +726, 0x861fffc43124b2ef +727, 0xe640e3b756396372 +728, 0x41cb0f0c5e149669 +729, 0xe0bd37e1192e4205 +730, 0x62917d3858f4ce47 +731, 0xa36e7eb4d855820a +732, 0x204b90255a3bf724 +733, 0x66ee83a0175535bc +734, 0x2c14ce7c6b0c1423 +735, 0x85d9495fa514f70d +736, 0x5a4fe45ead874dbc +737, 0xe72248dcb8cfc863 +738, 0xfc21ff2932ed98cd +739, 0xcbba1edd735b5cad +740, 0x91ddc32809679bf5 +741, 0x192cdf2c7631ea1f +742, 0xbbc451ddf2ea286f +743, 0xad9e80cae2397a64 +744, 0x6918f0119b95d0e5 +745, 0xa40379017a27d70a +746, 0x1aaeddb600e61e1 +747, 0x15afd93cbd7adda9 +748, 0x156719bc2b757ff4 +749, 0x13d9a59e2b2df49d +750, 0x9a490986eaddf0a +751, 0xef9a350f0b3eb6b4 +752, 0x5de7f6295ba4fa4d +753, 0x7f37fd087c3fdb49 +754, 0xa9fe3749d6f3f209 +755, 0x50912ac036d9bfb +756, 0x982cb4d726a441f8 +757, 0x8ca8d8af59b872d0 +758, 0x7f8adfb0ceeade8a +759, 0xdad390ec742be44 +760, 0xa637944d0045be5b +761, 0x3569a3b3af807061 +762, 0x9599da8eae14511d +763, 0xc333e8d19589b01a +764, 0xfb9b524a20b571e1 +765, 0xbd9dc8b37ce5c3e1 +766, 0x142333005fa389ac +767, 0x1368bc37cd5bcce1 +768, 0x16094907ad6ecf73 +769, 0xb32c90dbba4c1130 +770, 0x82761d97c1747dd0 +771, 0x599f9f267ae3444d +772, 0x79ad3382994852e1 +773, 0x2511f06d9ef06e54 +774, 0xb35e6ab7d5bbddae +775, 0xfca9fa83a2988732 +776, 0x7d4350f0394ac3ba +777, 0xa52a9527bb176ea3 +778, 0xb49fa0ceb2aa8353 +779, 0x1f62e504d1468cc0 +780, 0xe1a77bfccce6efc3 +781, 0x776cdff4dc0d6797 +782, 0x56612e39b652c1f2 +783, 0x5f096a29294eda04 +784, 0x7978abc3aabd8b23 +785, 0x79dd875e0485b979 +786, 0x8a98aa4d5735d778 +787, 0xcca43940f69d2388 +788, 0xb2d4b156f144f93a +789, 0xbd528a676e9a862 +790, 0x2a394939c8e7ec5e +791, 0xb1da900c6efe4abc +792, 0x9869af479de4c034 +793, 0x78dbdfb88ac7c1db +794, 0x18cb169143088041 +795, 0xe69e5461c51a3e13 +796, 0x5389fa16ea98183c +797, 0xed7c80d1be1ea520 +798, 0x87246fc359758ced +799, 0xab323eba95fae4ed +800, 0xbc4c0dde7f8a1828 +801, 0xdb739f7955610b1a +802, 0xecd8c68c3434cc +803, 0x138c2eb88c477f44 +804, 0x28a65f96727aae41 +805, 0xdee879f2cf5629d +806, 0x684f0c90ef20070f +807, 0xa24a819ef5621800 +808, 0x8d0054f870e4fdcb +809, 0x99e8c6e695b600b +810, 0x50b705245891f7c3 +811, 0xc02eed3a6e58e51a +812, 0x443d64e95443606c +813, 0xca24959cfbd2d120 +814, 0xe072609ea48815bc +815, 0xbcc715026590315b +816, 0x3e76df24d7aa5938 +817, 0xd8ff04940d9b79ae +818, 0x54474ce790059bcd +819, 0x278390dd6aa70e81 +820, 0xf4df619fe35414e4 +821, 0x757d71270264e615 +822, 0x1e8a373699c11b23 +823, 0xef68c82046e67dd6 +824, 0xe280006599972620 +825, 0x234e095183b0f4d6 +826, 0xe3b7560ed9839749 +827, 0xcd5ec4086572332e +828, 0xc41c0d4aaa279108 +829, 0x4b9cd6126bc16a6d +830, 0x4a7252734f3e3dd0 +831, 0xb3132df156cc103a +832, 0xf9e4abbf7b64464a +833, 0xf936df27fb3c47b7 +834, 0x9142960873f6d71a +835, 0x4ba6aa3235cdb10d +836, 0x3237a2e765ba7766 +837, 0xd62f0b94c8e99e54 +838, 0x26b682f90a3ae41b +839, 0x40ad5e82072b6f81 +840, 0xd0198101f5484000 +841, 0xe4fac60ba11c332 +842, 0x472d0b0a95ef9d38 +843, 0x8512557aec5a3d8f +844, 0xef83169d3efd4de9 +845, 0x53fe89283e7a7676 +846, 0x2f50933053d69fc4 +847, 0x76f5e4362e2e53a2 +848, 0x8676fdccce28874a +849, 0x2737764c1fb1f821 +850, 0x4a6f70afc066ab55 +851, 0x27f8e151e310fca4 +852, 0xd606960ccbe85161 +853, 0xcce51d7ddd270a32 +854, 0xb4235999794875c2 +855, 0x580084e358e884 +856, 0x2159d5e6dc8586d7 +857, 0x87bd54d8599b3ba4 +858, 0x3e9ade6a2181664 +859, 0x5e6e140406d97623 +860, 0x511545d5aa0080a2 +861, 0xf49d78ed219aac57 +862, 0xbece1f9c90b8ea87 +863, 0x1c741cac36a2c514 +864, 0x7453c141047db967 +865, 0xd751832a5037eba2 +866, 0x71370a3f30ada1f7 +867, 0x7c01cf2dcb408631 +868, 0x1052a4fbdccc0fa1 +869, 0x13d525c9df3fb6c +870, 0xa3aa8dbfee760c55 +871, 0xc0288d200f5155cf +872, 0x79f4bcd12af567c3 +873, 0x8160d163bb548755 +874, 0x5cf2995fb69fd2df +875, 0xcc98ed01396639df +876, 0xad95f1d9cfc8256e +877, 0xa3df27d9fbdbfb9d +878, 0x83e5f5dda4d52929 +879, 0x9adc05043009f55b +880, 0xdfe8329dfde1c001 +881, 0x9980ccdd5298e6a2 +882, 0x636a7bd134f6ef56 +883, 0xef5ff780c4be6ba4 +884, 0x290d71dc77a56d16 +885, 0x6d65db9ff58de1e6 +886, 0x944b063b3805a696 +887, 0xce468ca2cce33008 +888, 0x5ba1ccb840f80f48 +889, 0x28ddce36fc9ad268 +890, 0x4f77ef254d507a21 +891, 0xce9b4057fadf3ab +892, 0xb518bc68298730e6 +893, 0xd2eb5b8e2ec665b0 +894, 0xe1583303a4f87344 +895, 0x9d5a0df4fbe1bed5 +896, 0x2ba9bc03ec8cfd07 +897, 0x479ed880a96ca669 +898, 0xcedf96338324771a +899, 0x312f4fc2da41ffaa +900, 0xa0eb9cf23b5e1ed8 +901, 0xf8f88f975dc3f539 +902, 0x4a37e185d0e96e0f +903, 0xf829654a5c0b46f9 +904, 0x3909cca7a7f8c7fb +905, 0x4c2e1d66ceb45105 +906, 0xaffaa19e1db8af87 +907, 0x9ec498246bd18c76 +908, 0x21d51558edc089da +909, 0xe8984112cd1b1561 +910, 0x7de1d2cf54b0c0e1 +911, 0xa06729aed50bfb9d +912, 0xcf19f733e5db19e1 +913, 0x70edf2624ab777cd +914, 0x46685becad10e078 +915, 0x825e0f6add46785 +916, 0x66d4af3b15f70de4 +917, 0xc676614b0666b21 +918, 0x282a916c864f5cb7 +919, 0x2707283a3f512167 +920, 0x37ff3afda7461623 +921, 0xc767eb1205e4ca86 +922, 0x46b359aecc4ea25b +923, 0x67fbbb797a16dbb1 +924, 0x64fd4ba57122290e +925, 0x8acc2a8ae59d8fac +926, 0x64a49298599acc67 +927, 0xedf00de67177ce30 +928, 0x1ea9d8d7e76d2d2c +929, 0x363fcac323f70eb2 +930, 0x19e6e3ec8a9712eb +931, 0xca541e96b0961f09 +932, 0x4d8fd34c2822ec46 +933, 0x2fdd56a50b32f705 +934, 0xaac2fcf251e3fd3 +935, 0xb0c600299e57045c +936, 0xd951ec589e909e38 +937, 0x4dc8414390cae508 +938, 0x537ef9d5e2321344 +939, 0xa57bc21fd31aa2dc +940, 0xa3a60df564183750 +941, 0xbe69a5ce2e369fb6 +942, 0x7744601f4c053ec8 +943, 0x3838452af42f2612 +944, 0xd4f0dad7115a54e9 +945, 0x629cf68d8009a624 +946, 0x2211c8fa34cb98cb +947, 0x8040b19e2213db83 +948, 0xb2a86d3ba2384fd +949, 0x4b85cec4f93f0dab +950, 0xc8d212d21ea6845d +951, 0x5b271a03a4fe2be0 +952, 0xff4f671319ad8434 +953, 0x8e615a919d5afa96 +954, 0xea7f47c53161160a +955, 0x33273930b13c6efc +956, 0x98eedda27fb59c3c +957, 0x188dc5e92e939677 +958, 0x9dbd0fa0911430f1 +959, 0x5b3dcf3fa75dfd2b +960, 0x3f03846febdb275d +961, 0x20cc24faea9e9cf6 +962, 0x854f3ac66199ff5d +963, 0x31169ac99d341e6f +964, 0xa85daed3c0bc1bbe +965, 0x64633711e71ba5dd +966, 0x530e79978dc73334 +967, 0x636f2ee6e20aef13 +968, 0xf6220f8b6d9a58fb +969, 0x425db8fa32141a7b +970, 0xac7c210f4b02be95 +971, 0x5fe8cfbe197a7754 +972, 0xfff7d40c79420ea +973, 0x5f8bab9ef4697b77 +974, 0xaf6fe54e45b23fe8 +975, 0xce79456ccc70bbce +976, 0x645ef680f48f1c00 +977, 0xa4dfac46e2028595 +978, 0x6bece4c41effc5df +979, 0xd316df886442641f +980, 0xa4f6ff994edd2a6 +981, 0x30281ae3cc49abe4 +982, 0x39acb7b663dea974 +983, 0x5e8829b01a7c06fb +984, 0x87bdb08cf027f13e +985, 0xdfa5ede784e802f6 +986, 0x46d03d55711c38cc +987, 0xa55a961fc9788306 +988, 0xbf09ded495a2e57a +989, 0xcd601b29a639cc16 +990, 0x2193ce026bfd1085 +991, 0x25ba27f3f225be13 +992, 0x6f685be82f64f2fe +993, 0xec8454108229c450 +994, 0x6e79d8d205447a44 +995, 0x9ed7b6a96b9ccd68 +996, 0xae7134b3b7f8ee37 +997, 0x66963de0e5ebcc02 +998, 0x29c8dcd0d17c423f +999, 0xfb8482c827eb90bc diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_direct.py b/MLPY/Lib/site-packages/numpy/random/tests/test_direct.py new file mode 100644 index 0000000000000000000000000000000000000000..f827898cf7ca435e1893bd975d3726e52ebffe0a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_direct.py @@ -0,0 +1,453 @@ +import os +from os.path import join +import sys + +import numpy as np +from numpy.testing import (assert_equal, assert_allclose, assert_array_equal, + assert_raises) +import pytest + +from numpy.random import ( + Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence, + SFC64, default_rng +) +from numpy.random._common import interface + +try: + import cffi # noqa: F401 + + MISSING_CFFI = False +except ImportError: + MISSING_CFFI = True + +try: + import ctypes # noqa: F401 + + MISSING_CTYPES = False +except ImportError: + MISSING_CTYPES = False + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + MISSING_CFFI = True + + +pwd = os.path.dirname(os.path.abspath(__file__)) + + +def assert_state_equal(actual, target): + for key in actual: + if isinstance(actual[key], dict): + assert_state_equal(actual[key], target[key]) + elif isinstance(actual[key], np.ndarray): + assert_array_equal(actual[key], target[key]) + else: + assert actual[key] == target[key] + + +def uniform32_from_uint64(x): + x = np.uint64(x) + upper = np.array(x >> np.uint64(32), dtype=np.uint32) + lower = np.uint64(0xffffffff) + lower = np.array(x & lower, dtype=np.uint32) + joined = np.column_stack([lower, upper]).ravel() + out = (joined >> np.uint32(9)) * (1.0 / 2 ** 23) + return out.astype(np.float32) + + +def uniform32_from_uint53(x): + x = np.uint64(x) >> np.uint64(16) + x = np.uint32(x & np.uint64(0xffffffff)) + out = (x >> np.uint32(9)) * (1.0 / 2 ** 23) + return out.astype(np.float32) + + +def uniform32_from_uint32(x): + return (x >> np.uint32(9)) * (1.0 / 2 ** 23) + + +def uniform32_from_uint(x, bits): + if bits == 64: + return uniform32_from_uint64(x) + elif bits == 53: + return uniform32_from_uint53(x) + elif bits == 32: + return uniform32_from_uint32(x) + else: + raise NotImplementedError + + +def uniform_from_uint(x, bits): + if bits in (64, 63, 53): + return uniform_from_uint64(x) + elif bits == 32: + return uniform_from_uint32(x) + + +def uniform_from_uint64(x): + return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0) + + +def uniform_from_uint32(x): + out = np.empty(len(x) // 2) + for i in range(0, len(x), 2): + a = x[i] >> 5 + b = x[i + 1] >> 6 + out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0 + return out + + +def uniform_from_dsfmt(x): + return x.view(np.double) - 1.0 + + +def gauss_from_uint(x, n, bits): + if bits in (64, 63): + doubles = uniform_from_uint64(x) + elif bits == 32: + doubles = uniform_from_uint32(x) + else: # bits == 'dsfmt' + doubles = uniform_from_dsfmt(x) + gauss = [] + loc = 0 + x1 = x2 = 0.0 + while len(gauss) < n: + r2 = 2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * doubles[loc] - 1.0 + x2 = 2.0 * doubles[loc + 1] - 1.0 + r2 = x1 * x1 + x2 * x2 + loc += 2 + + f = np.sqrt(-2.0 * np.log(r2) / r2) + gauss.append(f * x2) + gauss.append(f * x1) + + return gauss[:n] + +def test_seedsequence(): + from numpy.random.bit_generator import (ISeedSequence, + ISpawnableSeedSequence, + SeedlessSeedSequence) + + s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6) + s1.spawn(10) + s2 = SeedSequence(**s1.state) + assert_equal(s1.state, s2.state) + assert_equal(s1.n_children_spawned, s2.n_children_spawned) + + # The interfaces cannot be instantiated themselves. + assert_raises(TypeError, ISeedSequence) + assert_raises(TypeError, ISpawnableSeedSequence) + dummy = SeedlessSeedSequence() + assert_raises(NotImplementedError, dummy.generate_state, 10) + assert len(dummy.spawn(10)) == 10 + + +class Base: + dtype = np.uint64 + data2 = data1 = {} + + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [] + + @classmethod + def _read_csv(cls, filename): + with open(filename) as csv: + seed = csv.readline() + seed = seed.split(',') + seed = [int(s.strip(), 0) for s in seed[1:]] + data = [] + for line in csv: + data.append(int(line.split(',')[-1].strip(), 0)) + return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)} + + def test_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data1['data']) + + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw() + assert_equal(uints, self.data1['data'][0]) + + bit_generator = self.bit_generator(*self.data2['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data2['data']) + + def test_random_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(output=False) + assert uints is None + uints = bit_generator.random_raw(1000, output=False) + assert uints is None + + def test_gauss_inv(self): + n = 25 + rs = RandomState(self.bit_generator(*self.data1['seed'])) + gauss = rs.standard_normal(n) + assert_allclose(gauss, + gauss_from_uint(self.data1['data'], n, self.bits)) + + rs = RandomState(self.bit_generator(*self.data2['seed'])) + gauss = rs.standard_normal(25) + assert_allclose(gauss, + gauss_from_uint(self.data2['data'], n, self.bits)) + + def test_uniform_double(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + def test_uniform_float(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform32_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform32_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + def test_repr(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in repr(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs) + + def test_str(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in str(rs) + assert str(self.bit_generator.__name__) in str(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs) + + def test_pickle(self): + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + bitgen_pkl = pickle.dumps(bit_generator) + reloaded = pickle.loads(bitgen_pkl) + reloaded_state = reloaded.state + assert_array_equal(Generator(bit_generator).standard_normal(1000), + Generator(reloaded).standard_normal(1000)) + assert bit_generator is not reloaded + assert_state_equal(reloaded_state, state) + + ss = SeedSequence(100) + aa = pickle.loads(pickle.dumps(ss)) + assert_equal(ss.state, aa.state) + + def test_invalid_state_type(self): + bit_generator = self.bit_generator(*self.data1['seed']) + with pytest.raises(TypeError): + bit_generator.state = {'1'} + + def test_invalid_state_value(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + state['bit_generator'] = 'otherBitGenerator' + with pytest.raises(ValueError): + bit_generator.state = state + + def test_invalid_init_type(self): + bit_generator = self.bit_generator + for st in self.invalid_init_types: + with pytest.raises(TypeError): + bit_generator(*st) + + def test_invalid_init_values(self): + bit_generator = self.bit_generator + for st in self.invalid_init_values: + with pytest.raises((ValueError, OverflowError)): + bit_generator(*st) + + def test_benchmark(self): + bit_generator = self.bit_generator(*self.data1['seed']) + bit_generator._benchmark(1) + bit_generator._benchmark(1, 'double') + with pytest.raises(ValueError): + bit_generator._benchmark(1, 'int32') + + @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available') + def test_cffi(self): + bit_generator = self.bit_generator(*self.data1['seed']) + cffi_interface = bit_generator.cffi + assert isinstance(cffi_interface, interface) + other_cffi_interface = bit_generator.cffi + assert other_cffi_interface is cffi_interface + + @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available') + def test_ctypes(self): + bit_generator = self.bit_generator(*self.data1['seed']) + ctypes_interface = bit_generator.ctypes + assert isinstance(ctypes_interface, interface) + other_ctypes_interface = bit_generator.ctypes + assert other_ctypes_interface is ctypes_interface + + def test_getstate(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + alt_state = bit_generator.__getstate__() + assert_state_equal(state, alt_state) + + +class TestPhilox(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = Philox + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/philox-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/philox-testset-2.csv')) + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)] + + def test_set_key(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + keyed = self.bit_generator(counter=state['state']['counter'], + key=state['state']['key']) + assert_state_equal(bit_generator.state, keyed.state) + + +class TestPCG64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + +class TestPCG64DXSM(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + +class TestMT19937(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = MT19937 + cls.bits = 32 + cls.dtype = np.uint32 + cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv')) + cls.seed_error_type = ValueError + cls.invalid_init_types = [] + cls.invalid_init_values = [(-1,)] + + def test_seed_float_array(self): + assert_raises(TypeError, self.bit_generator, np.array([np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([-np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([0, np.pi])) + assert_raises(TypeError, self.bit_generator, [np.pi]) + assert_raises(TypeError, self.bit_generator, [0, np.pi]) + + def test_state_tuple(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + bit_generator = rs.bit_generator + state = bit_generator.state + desired = rs.integers(2 ** 16) + tup = (state['bit_generator'], state['state']['key'], + state['state']['pos']) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + tup = tup + (0, 0.0) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + + +class TestSFC64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = SFC64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/sfc64-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/sfc64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + +class TestDefaultRNG: + def test_seed(self): + for args in [(), (None,), (1234,), ([1234, 5678],)]: + rg = default_rng(*args) + assert isinstance(rg.bit_generator, PCG64) + + def test_passthrough(self): + bg = Philox() + rg = default_rng(bg) + assert rg.bit_generator is bg + rg2 = default_rng(rg) + assert rg2 is rg + assert rg2.bit_generator is bg diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_extending.py b/MLPY/Lib/site-packages/numpy/random/tests/test_extending.py new file mode 100644 index 0000000000000000000000000000000000000000..19f015e8d2599625046e24bb4ed880ac6ee972c6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_extending.py @@ -0,0 +1,95 @@ +import os +import pytest +import shutil +import subprocess +import sys +import warnings +import numpy as np + +try: + import cffi +except ImportError: + cffi = None + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + cffi = None + +try: + with warnings.catch_warnings(record=True) as w: + # numba issue gh-4733 + warnings.filterwarnings('always', '', DeprecationWarning) + import numba +except ImportError: + numba = None + +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from distutils.version import LooseVersion + # Cython 0.29.21 is required for Python 3.9 and there are + # other fixes in the 0.29 series that are needed even for earlier + # Python versions. + # Note: keep in sync with the one in pyproject.toml + required_version = LooseVersion('0.29.21') + if LooseVersion(cython_version) < required_version: + # too old or wrong cython, skip the test + cython = None + +@pytest.mark.skipif(cython is None, reason="requires cython") +@pytest.mark.slow +def test_cython(tmp_path): + srcdir = os.path.join(os.path.dirname(__file__), '..') + shutil.copytree(srcdir, tmp_path / 'random') + # build the examples and "install" them into a temporary directory + build_dir = tmp_path / 'random' / '_examples' / 'cython' + subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', + '--prefix', str(tmp_path / 'installdir'), + '--single-version-externally-managed', + '--record', str(tmp_path/ 'tmp_install_log.txt'), + ], + cwd=str(build_dir), + ) + # gh-16162: make sure numpy's __init__.pxd was used for cython + # not really part of this test, but it is a convenient place to check + with open(build_dir / 'extending.c') as fid: + txt_to_find = 'NumPy API declarations from "numpy/__init__.pxd"' + for i, line in enumerate(fid): + if txt_to_find in line: + break + else: + assert False, ("Could not find '{}' in C file, " + "wrong pxd used".format(txt_to_find)) + # get the path to the so's + so1 = so2 = None + with open(tmp_path /'tmp_install_log.txt') as fid: + for line in fid: + if 'extending.' in line: + so1 = line.strip() + if 'extending_distributions' in line: + so2 = line.strip() + assert so1 is not None + assert so2 is not None + # import the so's without adding the directory to sys.path + from importlib.machinery import ExtensionFileLoader + extending = ExtensionFileLoader('extending', so1).load_module() + extending_distributions = ExtensionFileLoader('extending_distributions', so2).load_module() + + # actually test the cython c-extension + from numpy.random import PCG64 + values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd') + assert values.shape == (10,) + assert values.dtype == np.float64 + +@pytest.mark.skipif(numba is None or cffi is None, + reason="requires numba and cffi") +def test_numba(): + from numpy.random._examples.numba import extending # noqa: F401 + +@pytest.mark.skipif(cffi is None, reason="requires cffi") +def test_cffi(): + from numpy.random._examples.cffi import extending # noqa: F401 diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_generator_mt19937.py b/MLPY/Lib/site-packages/numpy/random/tests/test_generator_mt19937.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc90e08210f521ea69fd88e01c7098277147135 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_generator_mt19937.py @@ -0,0 +1,2606 @@ +import sys +import hashlib + +import pytest + +import numpy as np +from numpy.linalg import LinAlgError +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_allclose, + assert_warns, assert_no_warnings, assert_array_equal, + assert_array_almost_equal, suppress_warnings) + +from numpy.random import Generator, MT19937, SeedSequence, RandomState + +random = Generator(MT19937()) + +JUMP_TEST_DATA = [ + { + "seed": 0, + "steps": 10, + "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, + "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, + }, + { + "seed":384908324, + "steps":312, + "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, + "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, + }, + { + "seed": [839438204, 980239840, 859048019, 821], + "steps": 511, + "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, + "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, + }, +] + +@pytest.fixture(scope='module', params=[True, False]) +def endpoint(request): + return request.param + + +class TestSeed: + def test_scalar(self): + s = Generator(MT19937(0)) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937(4294967295)) + assert_equal(s.integers(1000), 324) + + def test_array(self): + s = Generator(MT19937(range(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937(np.arange(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937([0])) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937([4294967295])) + assert_equal(s.integers(1000), 324) + + def test_seedsequence(self): + s = MT19937(SeedSequence(0)) + assert_equal(s.random_raw(1), 2058676884) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, MT19937, -0.5) + assert_raises(ValueError, MT19937, -1) + + def test_invalid_array(self): + # seed must be an unsigned integer + assert_raises(TypeError, MT19937, [-0.5]) + assert_raises(ValueError, MT19937, [-1]) + assert_raises(ValueError, MT19937, [1, -2, 4294967296]) + + def test_noninstantized_bitgen(self): + assert_raises(ValueError, Generator, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.integers(-5, -1) < -1) + x = random.integers(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + random = Generator(MT19937(1432985819)) + non_contig = random.multinomial(100, pvals=pvals) + random = Generator(MT19937(1432985819)) + contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multidimensional_pvals(self): + assert_raises(ValueError, random.multinomial, 10, [[0, 1]]) + assert_raises(ValueError, random.multinomial, 10, [[0], [1]]) + assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]]) + assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]])) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + random = Generator(MT19937(1432985819)) + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + +class TestMultivariateHypergeometric: + + def setup(self): + self.seed = 8675309 + + def test_argument_validation(self): + # Error cases... + + # `colors` must be a 1-d sequence + assert_raises(ValueError, random.multivariate_hypergeometric, + 10, 4) + + # Negative nsample + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], -1) + + # Negative color + assert_raises(ValueError, random.multivariate_hypergeometric, + [-1, 2, 3], 2) + + # nsample exceeds sum(colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], 10) + + # nsample exceeds sum(colors) (edge case of empty colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [], 1) + + # Validation errors associated with very large values in colors. + assert_raises(ValueError, random.multivariate_hypergeometric, + [999999999, 101], 5, 1, 'marginals') + + int64_info = np.iinfo(np.int64) + max_int64 = int64_info.max + max_int64_index = max_int64 // int64_info.dtype.itemsize + assert_raises(ValueError, random.multivariate_hypergeometric, + [max_int64_index - 100, 101], 5, 1, 'count') + + @pytest.mark.parametrize('method', ['count', 'marginals']) + def test_edge_cases(self, method): + # Set the seed, but in fact, all the results in this test are + # deterministic, so we don't really need this. + random = Generator(MT19937(self.seed)) + + x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([], 0, method=method) + assert_array_equal(x, []) + + x = random.multivariate_hypergeometric([], 0, size=1, method=method) + assert_array_equal(x, np.empty((1, 0), dtype=np.int64)) + + x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method) + assert_array_equal(x, [3, 0, 0]) + + colors = [1, 1, 0, 1, 1] + x = random.multivariate_hypergeometric(colors, sum(colors), + method=method) + assert_array_equal(x, colors) + + x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, + method=method) + assert_array_equal(x, [[3, 4, 5]]*3) + + # Cases for nsample: + # nsample < 10 + # 10 <= nsample < colors.sum()/2 + # colors.sum()/2 < nsample < colors.sum() - 10 + # colors.sum() - 10 < nsample < colors.sum() + @pytest.mark.parametrize('nsample', [8, 25, 45, 55]) + @pytest.mark.parametrize('method', ['count', 'marginals']) + @pytest.mark.parametrize('size', [5, (2, 3), 150000]) + def test_typical_cases(self, nsample, method, size): + random = Generator(MT19937(self.seed)) + + colors = np.array([10, 5, 20, 25]) + sample = random.multivariate_hypergeometric(colors, nsample, size, + method=method) + if isinstance(size, int): + expected_shape = (size,) + colors.shape + else: + expected_shape = size + colors.shape + assert_equal(sample.shape, expected_shape) + assert_((sample >= 0).all()) + assert_((sample <= colors).all()) + assert_array_equal(sample.sum(axis=-1), + np.full(size, fill_value=nsample, dtype=int)) + if isinstance(size, int) and size >= 100000: + # This sample is large enough to compare its mean to + # the expected values. + assert_allclose(sample.mean(axis=0), + nsample * colors / colors.sum(), + rtol=1e-3, atol=0.005) + + def test_repeatability1(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5, + method='count') + expected = np.array([[2, 1, 2], + [2, 1, 2], + [1, 1, 3], + [2, 0, 3], + [2, 1, 2]]) + assert_array_equal(sample, expected) + + def test_repeatability2(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 50, + size=5, + method='marginals') + expected = np.array([[ 9, 17, 24], + [ 7, 13, 30], + [ 9, 15, 26], + [ 9, 17, 24], + [12, 14, 24]]) + assert_array_equal(sample, expected) + + def test_repeatability3(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 12, + size=5, + method='marginals') + expected = np.array([[2, 3, 7], + [5, 3, 4], + [2, 5, 5], + [5, 3, 4], + [1, 5, 6]]) + assert_array_equal(sample, expected) + + +class TestSetState: + def setup(self): + self.seed = 1234567890 + self.rg = Generator(MT19937(self.seed)) + self.bit_generator = self.rg.bit_generator + self.state = self.bit_generator.state + self.legacy_state = (self.state['bit_generator'], + self.state['state']['key'], + self.state['state']['pos']) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.rg.standard_normal(size=3) + self.bit_generator.state = self.state + new = self.rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.rg.standard_normal() + state = self.bit_generator.state + old = self.rg.standard_normal(size=3) + self.bit_generator.state = state + new = self.rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.rg.negative_binomial(0.5, 0.5) + + +class TestIntegers: + rfunc = random.integers + + # valid integer/boolean types + itype = [bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self, endpoint): + assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float) + + def test_bounds_checking(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint, + dtype=dt) + + assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [ubnd], [lbnd], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, [0], + endpoint=endpoint, dtype=dt) + + def test_bounds_checking_array(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint) + + assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd] * 2, + [ubnd + 1] * 2, endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [1] * 2, 0, + endpoint=endpoint, dtype=dt) + + def test_rng_zero_and_extremes(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + is_open = not endpoint + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], [tgt + is_open], + size=1000, endpoint=endpoint, dtype=dt), + tgt) + + def test_rng_zero_and_extremes_array(self, endpoint): + size = 1000 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + tgt = ubnd - 1 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + def test_full_range(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_full_range_array(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self, endpoint): + # Don't use fixed seed + random = Generator(MT19937()) + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16, + endpoint=endpoint, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint, + dtype=bool) + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_scalar_array_equiv(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + size = 1000 + random = Generator(MT19937(1234)) + scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + scalar_array = random.integers([lbnd], [ubnd], size=size, + endpoint=endpoint, dtype=dt) + + random = Generator(MT19937(1234)) + array = random.integers([lbnd] * size, [ubnd] * + size, size=size, endpoint=endpoint, dtype=dt) + assert_array_equal(scalar, scalar_array) + assert_array_equal(scalar, array) + + def test_repeatability(self, endpoint): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', + 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', + 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', + 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', + 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', + 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', + 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', + 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', + 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} + + for dt in self.itype[1:]: + random = Generator(MT19937(1234)) + + # view as little endian for hash + if sys.byteorder == 'little': + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt) + else: + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt).byteswap() + + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + random = Generator(MT19937(1234)) + val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint, + dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_repeatability_broadcasting(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min + ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # view as little endian for hash + random = Generator(MT19937(1234)) + val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint, + dtype=dt) + + assert_array_equal(val, val_bc) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000, + endpoint=endpoint, dtype=dt) + + assert_array_equal(val, val_bc) + + @pytest.mark.parametrize( + 'bound, expected', + [(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612, + 3769704066, 1170797179, 4108474671])), + (2**32, np.array([517043487, 1364798666, 1733884390, 1353720613, + 3769704067, 1170797180, 4108474672])), + (2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673, + 1831631863, 1215661561, 3869512430]))] + ) + def test_repeatability_32bit_boundary(self, bound, expected): + for size in [None, len(expected)]: + random = Generator(MT19937(1234)) + x = random.integers(bound, size=size) + assert_equal(x, expected if size is not None else expected[0]) + + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[1622936284, 3620788691, 1659384060], + [1417365545, 760222891, 1909653332], + [3788118662, 660249498, 4092002593]], + [[3625610153, 2979601262, 3844162757], + [ 685800658, 120261497, 2694012896], + [1207779440, 1586594375, 3854335050]], + [[3004074748, 2310761796, 3012642217], + [2067714190, 2786677879, 1363865881], + [ 791663441, 1867303284, 2169727960]], + [[1939603804, 1250951100, 298950036], + [1040128489, 3791912209, 3317053765], + [3155528714, 61360675, 2305155588]], + [[ 817688762, 1335621943, 3288952434], + [1770890872, 1102951817, 1957607470], + [3099996017, 798043451, 48334215]]]) + for size in [None, (5, 3, 3)]: + random = Generator(MT19937(12345)) + x = random.integers([[-1], [0], [1]], + [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_broadcast_exceptions(self, endpoint): + configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)), + np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0), + (-2**63-1, -2**63-1))} + for dtype in configs: + for config in configs[dtype]: + low, high = config + high = high - endpoint + low_a = np.array([[low]*10]) + high_a = np.array([high] * 10) + assert_raises(ValueError, random.integers, low, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_a, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high_a, + endpoint=endpoint, dtype=dtype) + + low_o = np.array([[low]*10], dtype=object) + high_o = np.array([high] * 10, dtype=object) + assert_raises(ValueError, random.integers, low_o, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_o, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_o, high_o, + endpoint=endpoint, dtype=dtype) + + def test_int64_uint64_corner_case(self, endpoint): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint) + + # None of these function calls should + # generate a ValueError now. + actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool_ if dt is bool else dt + + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + + for dt in (bool, int, np.compat.long): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert not hasattr(sample, 'dtype') + assert_equal(type(sample), dt) + + def test_respect_dtype_array(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool_ if dt is bool else dt + + sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint, + dtype=dt) + assert_equal(sample.dtype, dt) + + def test_zero_size(self, endpoint): + # See gh-7203 + for dt in self.itype: + sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt) + assert sample.shape == (3, 0, 4) + assert sample.dtype == dt + assert self.rfunc(0, -10, 0, endpoint=endpoint, + dtype=dt).shape == (0,) + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + + def test_error_byteorder(self): + other_byteord_dt = 'i4' + with pytest.raises(ValueError): + random.integers(0, 200, size=10, dtype=other_byteord_dt) + + # chi2max is the maximum acceptable chi-squared value. + @pytest.mark.slow + @pytest.mark.parametrize('sample_size,high,dtype,chi2max', + [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25 + (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30 + (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25 + (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25 + ]) + def test_integers_small_dtype_chisquared(self, sample_size, high, + dtype, chi2max): + # Regression test for gh-14774. + samples = random.integers(high, size=sample_size, dtype=dtype) + + values, counts = np.unique(samples, return_counts=True) + expected = sample_size / high + chi2 = ((counts - expected)**2 / expected).sum() + assert chi2 < chi2max + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup(self): + self.seed = 1234567890 + + def test_integers(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2)) + desired = np.array([[-80, -56], [41, 37], [-83, -16]]) + assert_array_equal(actual, desired) + + def test_integers_masked(self): + # Test masked rejection sampling algorithm to generate array of + # uint32 in an interval. + random = Generator(MT19937(self.seed)) + actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32) + desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32) + assert_array_equal(actual, desired) + + def test_integers_closed(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2), endpoint=True) + desired = np.array([[-80, -56], [ 41, 38], [-83, -15]]) + assert_array_equal(actual, desired) + + def test_integers_max_int(self): + # Tests whether integers with closed=True can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + actual = random.integers(np.iinfo('l').max, np.iinfo('l').max, + endpoint=True) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.096999199829214, 0.707517457682192], + [0.084364834598269, 0.767731206553125], + [0.665069021359413, 0.715487190596693]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random = Generator(MT19937(self.seed)) + actual = random.random() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_random_float(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.0969992 , 0.70751746], + [0.08436483, 0.76773121], + [0.66506902, 0.71548719]]) + assert_array_almost_equal(actual, desired, decimal=7) + + def test_random_float_scalar(self): + random = Generator(MT19937(self.seed)) + actual = random.random(dtype=np.float32) + desired = 0.0969992 + assert_array_almost_equal(actual, desired, decimal=7) + + def test_random_unsupported_type(self): + assert_raises(TypeError, random.random, dtype='int32') + + def test_choice_uniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4) + desired = np.array([0, 0, 2, 2], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([0, 1, 0, 1], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False) + desired = np.array([2, 0, 3], dtype=np.int64) + assert_array_equal(actual, desired) + actual = random.choice(4, 4, replace=False, shuffle=False) + desired = np.arange(4, dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([0, 2, 3], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['a', 'a', 'c', 'c']) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_default_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3) + desired = np.array([[0, 1], [0, 1], [4, 5]]) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1) + desired = np.array([[0], [2], [4], [6]]) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = tuple() + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + random = Generator(MT19937(self.seed)) + non_contig = random.choice(5, 3, p=p[::2]) + random = Generator(MT19937(self.seed)) + contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_choice_return_type(self): + # gh 9867 + p = np.ones(4) / 4. + actual = random.choice(4, 2) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, replace=False) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p, replace=False) + assert actual.dtype == np.int64 + + def test_choice_large_sample(self): + choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222' + random = Generator(MT19937(self.seed)) + actual = random.choice(10000, 5000, replace=False) + if sys.byteorder != 'little': + actual = actual.byteswap() + res = hashlib.sha256(actual.view(np.int8)).hexdigest() + assert_(choice_hash == res) + + def test_bytes(self): + random = Generator(MT19937(self.seed)) + actual = random.bytes(10) + desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + random = Generator(MT19937(self.seed)) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + random.shuffle(alist) + actual = alist + desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7]) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=1) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=-1) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis_empty(self): + random = Generator(MT19937(self.seed)) + desired = np.array([]).reshape((0, 6)) + for axis in (0, 1): + actual = np.array([]).reshape((0, 6)) + random.shuffle(actual, axis=axis) + assert_array_equal(actual, desired) + + def test_shuffle_axis_nonsquare(self): + y1 = np.arange(20).reshape(2, 10) + y2 = y1.copy() + random = Generator(MT19937(self.seed)) + random.shuffle(y1, axis=1) + random = Generator(MT19937(self.seed)) + random.shuffle(y2.T) + assert_array_equal(y1, y2) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(np.AxisError, random.shuffle, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(np.AxisError, random.shuffle, arr, 3) + assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None)) + arr = [[1, 2, 3], [4, 5, 6]] + assert_raises(NotImplementedError, random.shuffle, arr, 1) + + arr = np.array(3) + assert_raises(TypeError, random.shuffle, arr) + arr = np.ones((3, 2)) + assert_raises(np.AxisError, random.shuffle, arr, 2) + + def test_permutation(self): + random = Generator(MT19937(self.seed)) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = random.permutation(alist) + desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7] + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = random.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + bad_x_str = "abcd" + assert_raises(np.AxisError, random.permutation, bad_x_str) + + bad_x_float = 1.2 + assert_raises(np.AxisError, random.permutation, bad_x_float) + + random = Generator(MT19937(self.seed)) + integer_val = 10 + desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6] + + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_permutation_custom_axis(self): + a = np.arange(16).reshape((4, 4)) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=1) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=-1) + assert_array_equal(actual, desired) + + def test_permutation_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(np.AxisError, random.permutation, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(np.AxisError, random.permutation, arr, 3) + assert_raises(TypeError, random.permutation, arr, slice(1, 2, None)) + + @pytest.mark.parametrize("dtype", [int, object]) + @pytest.mark.parametrize("axis, expected", + [(None, np.array([[3, 7, 0, 9, 10, 11], + [8, 4, 2, 5, 1, 6]])), + (0, np.array([[6, 1, 2, 9, 10, 11], + [0, 7, 8, 3, 4, 5]])), + (1, np.array([[ 5, 3, 4, 0, 2, 1], + [11, 9, 10, 6, 8, 7]]))]) + def test_permuted(self, dtype, axis, expected): + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + random.permuted(x, axis=axis, out=x) + assert_array_equal(x, expected) + + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + y = random.permuted(x, axis=axis) + assert y.dtype == dtype + assert_array_equal(y, expected) + + def test_permuted_with_strides(self): + random = Generator(MT19937(self.seed)) + x0 = np.arange(22).reshape(2, 11) + x1 = x0.copy() + x = x0[:, ::3] + y = random.permuted(x, axis=1, out=x) + expected = np.array([[0, 9, 3, 6], + [14, 20, 11, 17]]) + assert_array_equal(y, expected) + x1[:, ::3] = expected + # Verify that the original x0 was modified in-place as expected. + assert_array_equal(x1, x0) + + def test_permuted_empty(self): + y = random.permuted([]) + assert_array_equal(y, []) + + @pytest.mark.parametrize('outshape', [(2, 3), 5]) + def test_permuted_out_with_wrong_shape(self, outshape): + a = np.array([1, 2, 3]) + out = np.zeros(outshape, dtype=a.dtype) + with pytest.raises(ValueError, match='same shape'): + random.permuted(a, out=out) + + def test_permuted_out_with_wrong_type(self): + out = np.zeros((3, 5), dtype=np.int32) + x = np.ones((3, 5)) + with pytest.raises(TypeError, match='Cannot cast'): + random.permuted(x, axis=1, out=out) + + def test_beta(self): + random = Generator(MT19937(self.seed)) + actual = random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.083029353267698e-10, 2.449965303168024e-11], + [2.397085162969853e-02, 3.590779671820755e-08], + [2.830254190078299e-04, 1.744709918330393e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[42, 41], + [42, 48], + [44, 50]]) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456) + desired = 42 + assert_array_equal(actual, desired) + + def test_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.chisquare(50, size=(3, 2)) + desired = np.array([[32.9850547060149, 39.0219480493301], + [56.2006134779419, 57.3474165711485], + [55.4243733880198, 55.4209797925213]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.5439892869558927, 0.45601071304410745], + [0.5588917345860708, 0.4411082654139292 ]], + [[0.5632074165063435, 0.43679258349365657], + [0.54862581112627, 0.45137418887373015]], + [[0.49961831357047226, 0.5003816864295278 ], + [0.52374806183482, 0.47625193816517997]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + random = Generator(MT19937(self.seed)) + non_contig = random.dirichlet(alpha, size=(3, 2)) + random = Generator(MT19937(self.seed)) + contig = random.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_dirichlet_small_alpha(self): + eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc. + alpha = eps * np.array([1., 1.0e-3]) + random = Generator(MT19937(self.seed)) + actual = random.dirichlet(alpha, size=(3, 2)) + expected = np.array([ + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]] + ]) + assert_array_almost_equal(actual, expected, decimal=15) + + @pytest.mark.slow + def test_dirichlet_moderately_small_alpha(self): + # Use alpha.max() < 0.1 to trigger stick breaking code path + alpha = np.array([0.02, 0.04, 0.03]) + exact_mean = alpha / alpha.sum() + random = Generator(MT19937(self.seed)) + sample = random.dirichlet(alpha, size=20000000) + sample_mean = sample.mean(axis=0) + assert_allclose(sample_mean, exact_mean, rtol=1e-3) + + def test_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.exponential(1.1234, size=(3, 2)) + desired = np.array([[0.098845481066258, 1.560752510746964], + [0.075730916041636, 1.769098974710777], + [1.488602544592235, 2.49684815275751 ]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + random = Generator(MT19937(self.seed)) + actual = random.f(12, 77, size=(3, 2)) + desired = np.array([[0.461720027077085, 1.100441958872451], + [1.100337455217484, 0.91421736740018 ], + [0.500811891303113, 0.826802454552058]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.gamma(5, 3, size=(3, 2)) + desired = np.array([[ 5.03850858902096, 7.9228656732049 ], + [18.73983605132985, 19.57961681699238], + [18.17897755150825, 18.17653912505234]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + random = Generator(MT19937(self.seed)) + actual = random.geometric(.123456789, size=(3, 2)) + desired = np.array([[1, 11], + [1, 12], + [11, 17]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + random = Generator(MT19937(self.seed)) + actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[ 4.688397515056245, -0.289514845417841], + [ 4.981176042584683, -0.633224272589149], + [-0.055915275687488, -0.333962478257953]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[ 9, 9], + [ 9, 9], + [10, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + random = Generator(MT19937(self.seed)) + actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.156353949272393, 1.195863024830054], + [-3.435458081645966, 1.656882398925444], + [ 0.924824032467446, 1.251116432209336]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-4.338584631510999, 1.890171436749954], + [-4.64547787337966 , 2.514545562919217], + [ 1.495389489198666, 1.967827627577474]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[ 0.0268252166335, 13.9534486483053], + [ 0.1204014788936, 2.2422077497792], + [ 4.2484199496128, 12.0093343977523]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + random = Generator(MT19937(self.seed)) + actual = random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[14, 17], + [3, 18], + [5, 1]]) + assert_array_equal(actual, desired) + + def test_logseries_exceptions(self): + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.logseries, np.nan) + assert_raises(ValueError, random.logseries, [np.nan] * 10) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[1, 5, 1, 6, 4, 3], + [4, 2, 6, 2, 4, 2]], + [[5, 3, 2, 6, 3, 1], + [4, 4, 0, 2, 3, 7]], + [[6, 3, 1, 5, 3, 2], + [5, 5, 3, 1, 2, 4]]]) + assert_array_equal(actual, desired) + + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal(self, method): + random = Generator(MT19937(self.seed)) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = random.multivariate_normal(mean, cov, size, method=method) + desired = np.array([[[-1.747478062846581, 11.25613495182354 ], + [-0.9967333370066214, 10.342002097029821 ]], + [[ 0.7850019631242964, 11.181113712443013 ], + [ 0.8901349653255224, 8.873825399642492 ]], + [[ 0.7130260107430003, 9.551628690083056 ], + [ 0.7127098726541128, 11.991709234143173 ]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = random.multivariate_normal(mean, cov, method=method) + desired = np.array([0.233278563284287, 9.424140804347195]) + assert_array_almost_equal(actual, desired, decimal=15) + # Check that non symmetric covariance input raises exception when + # check_valid='raises' if using default svd method. + mean = [0, 0] + cov = [[1, 2], [1, 2]] + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + method='eigh') + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise', method='eigh') + + # check degenerate samples from singular covariance matrix + cov = [[1, 1], [1, 1]] + if method in ('svd', 'eigh'): + samples = random.multivariate_normal(mean, cov, size=(3, 2), + method=method) + assert_array_almost_equal(samples[..., 0], samples[..., 1], + decimal=6) + else: + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + random.multivariate_normal(mean, cov, method=method) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, random.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, random.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, random.multivariate_normal, + mu, np.eye(3)) + + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal_basic_stats(self, method): + random = Generator(MT19937(self.seed)) + n_s = 1000 + mean = np.array([1, 2]) + cov = np.array([[2, 1], [1, 2]]) + s = random.multivariate_normal(mean, cov, size=(n_s,), method=method) + s_center = s - mean + cov_emp = (s_center.T @ s_center) / (n_s - 1) + # these are pretty loose and are only designed to detect major errors + assert np.all(np.abs(s_center.mean(-2)) < 0.1) + assert np.all(np.abs(cov_emp - cov) < 0.2) + + def test_negative_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[543, 727], + [775, 760], + [600, 674]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_negative_binomial_p0_exception(self): + # Verify that p=0 raises an exception. + with assert_raises(ValueError): + x = random.negative_binomial(1, 0) + + def test_noncentral_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[ 1.70561552362133, 15.97378184942111], + [13.71483425173724, 20.17859633310629], + [11.3615477156643 , 3.67891108738029]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04], + [1.14554372041263e+00, 1.38187755933435e-03], + [1.90659181905387e+00, 1.21772577941822e+00]]) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[0.82947954590419, 1.80139670767078], + [6.58720057417794, 7.00491463609814], + [6.31101879073157, 6.30982307753005]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[0.060310671139 , 0.23866058175939], + [0.86860246709073, 0.2668510459738 ], + [0.23375780078364, 1.88922102885943]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.618412914693162, 2.635726692647081], + [-2.116923463013243, 0.807460983059643], + [ 1.446547137248593, 2.485684213886024]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + random = Generator(MT19937(self.seed)) + actual = random.pareto(a=.123456789, size=(3, 2)) + desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04], + [7.2640150889064703e-01, 3.4650454783825594e+05], + [4.5852344481994740e+04, 6.5851383009539105e+07]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + random = Generator(MT19937(self.seed)) + actual = random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [0, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('int64').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + random = Generator(MT19937(self.seed)) + actual = random.power(a=.123456789, size=(3, 2)) + desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02], + [2.482442984543471e-10, 1.527108843266079e-01], + [8.188283434244285e-02, 3.950547209346948e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[4.19494429102666, 16.66920198906598], + [3.67184544902662, 17.74695521962917], + [16.27935397855501, 21.08355560691792]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_cauchy(size=(3, 2)) + desired = np.array([[-1.489437778266206, -3.275389641569784], + [ 0.560102864910406, -0.680780916282552], + [-1.314912905226277, 0.295852965660225]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_exponential(size=(3, 2), method='inv') + desired = np.array([[0.102031839440643, 1.229350298474972], + [0.088137284693098, 1.459859985522667], + [1.093830802293668, 1.256977002164613]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_expoential_type_error(self): + assert_raises(TypeError, random.standard_exponential, dtype=np.int32) + + def test_standard_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62970724056362, 1.22379851271008], + [3.899412530884 , 4.12479964250139], + [3.74994102464584, 3.74929307690815]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gammma_scalar_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(3, dtype=np.float32) + desired = 2.9242148399353027 + assert_array_almost_equal(actual, desired, decimal=6) + + def test_standard_gamma_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62971, 1.2238 ], + [3.89941, 4.1248 ], + [3.74994, 3.74929]]) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gammma_float_out(self): + actual = np.zeros((3, 2), dtype=np.float32) + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, dtype=np.float32) + desired = np.array([[10.14987, 7.87012], + [ 9.46284, 12.56832], + [13.82495, 7.81533]], dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gamma_unknown_type(self): + assert_raises(TypeError, random.standard_gamma, 1., + dtype='int32') + + def test_out_size_mismatch(self): + out = np.zeros(10) + assert_raises(ValueError, random.standard_gamma, 10.0, size=20, + out=out) + assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1), + out=out) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_normal(size=(3, 2)) + desired = np.array([[-1.870934851846581, 1.25613495182354 ], + [-1.120190126006621, 0.342002097029821], + [ 0.661545174124296, 1.181113712443012]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_normal_unsupported_type(self): + assert_raises(TypeError, random.standard_normal, dtype=np.int32) + + def test_standard_t(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df=10, size=(3, 2)) + desired = np.array([[-1.484666193042647, 0.30597891831161 ], + [ 1.056684299648085, -0.407312602088507], + [ 0.130704414281157, -2.038053410490321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + random = Generator(MT19937(self.seed)) + actual = random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[ 7.86664070590917, 13.6313848513185 ], + [ 7.68152445215983, 14.36169131136546], + [13.16105603911429, 13.72341621856971]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[2.13306255040998 , 7.816987531021207], + [2.015436610109887, 8.377577533009589], + [7.421792588856135, 7.891185744455209]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_uniform_zero_range(self): + func = random.uniform + result = func(1.5, 1.5) + assert_allclose(result, 1.5) + result = func([0.0, np.pi], [0.0, np.pi]) + assert_allclose(result, [0.0, np.pi]) + result = func([[2145.12], [2145.12]], [2145.12, 2145.12]) + assert_allclose(result, 2145.12 + np.zeros((2, 2))) + + def test_uniform_neg_range(self): + func = random.uniform + assert_raises(ValueError, func, 2, 1) + assert_raises(ValueError, func, [1, 2], [1, 1]) + assert_raises(ValueError, func, [[0, 1],[2, 3]], 2) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[ 1.107972248690106, 2.841536476232361], + [ 1.832602376042457, 1.945511926976032], + [-0.260147475776542, 2.058047492231698]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_nan(self): + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + @pytest.mark.parametrize("kappa", [1e4, 1e15]) + def test_vonmises_large_kappa(self, kappa): + random = Generator(MT19937(self.seed)) + rs = RandomState(random.bit_generator) + state = random.bit_generator.state + + random_state_vals = rs.vonmises(0, kappa, size=10) + random.bit_generator.state = state + gen_vals = random.vonmises(0, kappa, size=10) + if kappa < 1e6: + assert_allclose(random_state_vals, gen_vals) + else: + assert np.all(random_state_vals != gen_vals) + + @pytest.mark.parametrize("mu", [-7., -np.pi, -3.1, np.pi, 3.2]) + @pytest.mark.parametrize("kappa", [1e-9, 1e-6, 1, 1e3, 1e15]) + def test_vonmises_large_kappa_range(self, mu, kappa): + r = random.vonmises(mu, kappa, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_wald(self): + random = Generator(MT19937(self.seed)) + actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[0.26871721804551, 3.2233942732115 ], + [2.20328374987066, 2.40958405189353], + [2.07093587449261, 0.73073890064369]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + random = Generator(MT19937(self.seed)) + actual = random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.138613914769468, 1.306463419753191], + [0.111623365934763, 1.446570494646721], + [1.257145775276011, 1.914247725027957]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random = Generator(MT19937(self.seed)) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + random = Generator(MT19937(self.seed)) + actual = random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[ 1, 1], + [ 10, 867], + [354, 2]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup(self): + self.seed = 123456789 + + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + low = [0] + high = [1] + uniform = random.uniform + desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095]) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + random = Generator(MT19937(self.seed)) + desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) + + random = Generator(MT19937(self.seed)) + actual = random.normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.normal, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + normal = random.normal + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455]) + + random = Generator(MT19937(self.seed)) + beta = random.beta + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + random = Generator(MT19937(self.seed)) + actual = random.beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + std_gamma = random.standard_gamma + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258]) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763]) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629]) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + random = Generator(MT19937(self.seed)) + desired = np.array([0.04714867120827, 0.1239390327694]) + actual = random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589]) + + random = Generator(MT19937(self.seed)) + actual = random.chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399]) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983]) + + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326]) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013]) + + random = Generator(MT19937(self.seed)) + actual = random.pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807]) + + random = Generator(MT19937(self.seed)) + actual = random.power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202]) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081]) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397]) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc, bad_scale * 3) + assert_equal(random.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276]) + + random = Generator(MT19937(self.seed)) + lognormal = random.lognormal + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean, sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + desired = np.array( + [1.1597068009872629, + 0.6539188836253857, + 1.1981526554349398] + ) + + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864]) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean, scale * 3) + assert_raises(ValueError, random.wald, mean, bad_scale * 3) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326]) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, triangular, 10., 0., 20.) + assert_raises(ValueError, triangular, 10., 25., 20.) + assert_raises(ValueError, triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + binom = random.binomial + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 2, 1], dtype=np.int64) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + + lam = [1] + bad_lam_one = [-1] + desired = np.array([0, 0, 3]) + + random = Generator(MT19937(self.seed)) + max_lam = random._poisson_lam_max + bad_lam_two = [max_lam * 2] + poisson = random.poisson + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + desired = np.array([1, 8, 1]) + + random = Generator(MT19937(self.seed)) + zipf = random.zipf + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([1, 1, 3]) + + random = Generator(MT19937(self.seed)) + geometric = random.geometric + actual = geometric(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geometric, bad_p_one * 3) + assert_raises(ValueError, geometric, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [-1] + bad_nsample_two = [4] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) + + random = Generator(MT19937(self.seed)) + hypergeom = random.hypergeometric + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, hypergeom, -1, 10, 20) + assert_raises(ValueError, hypergeom, 10, -1, 20) + assert_raises(ValueError, hypergeom, 10, 10, -1) + assert_raises(ValueError, hypergeom, 10, 10, 25) + + # ValueError for arguments that are too big. + assert_raises(ValueError, hypergeom, 2**30, 10, 20) + assert_raises(ValueError, hypergeom, 999, 2**31, 50) + assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + desired = np.array([1, 1, 1]) + + random = Generator(MT19937(self.seed)) + logseries = random.logseries + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], + [[1, 0, 1, 0, 2, 1], + [7, 2, 2, 1, 4, 4]], + [[0, 2, 0, 1, 2, 0], + [3, 2, 3, 3, 4, 5]]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6) + desired = np.array([[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], dtype=np.int64) + assert_array_equal(actual, desired) + + +class TestThread: + # make sure each state produces the same sequence even in threads + def setup(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(Generator(MT19937(s)), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(Generator(MT19937(s)), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_integers(self, endpoint): + itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + func = random.integers + high = np.array([1]) + low = np.array([0]) + + for dt in itype: + out = func(low, high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low[0], high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low, high[0], endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) + + +@pytest.mark.parametrize("config", JUMP_TEST_DATA) +def test_jumped(config): + # Each config contains the initial seed, a number of raw steps + # the sha256 hashes of the initial and the final states' keys and + # the position of of the initial and the final state. + # These were produced using the original C implementation. + seed = config["seed"] + steps = config["steps"] + + mt19937 = MT19937(seed) + # Burn step + mt19937.random_raw(steps) + key = mt19937.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert mt19937.state["state"]["pos"] == config["initial"]["pos"] + assert sha256.hexdigest() == config["initial"]["key_sha256"] + + jumped = mt19937.jumped() + key = jumped.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert jumped.state["state"]["pos"] == config["jumped"]["pos"] + assert sha256.hexdigest() == config["jumped"]["key_sha256"] + + +def test_broadcast_size_error(): + mu = np.ones(3) + sigma = np.ones((4, 3)) + size = (10, 4, 2) + assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=size) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(1, 3)) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(4, 1, 1)) + # 1 arg + shape = np.ones((4, 3)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=size) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=(3,)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=3) + # Check out + out = np.empty(size) + with pytest.raises(ValueError): + random.standard_gamma(shape, out=out) + + # 2 arg + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.multinomial([2, 2], [.3, .7], size=(2, 1)) + + # 3 arg + a = random.chisquare(5, size=3) + b = random.chisquare(5, size=(4, 3)) + c = random.chisquare(5, size=(5, 4, 3)) + assert random.noncentral_f(a, b, c).shape == (5, 4, 3) + with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"): + random.noncentral_f(a, b, c, size=(6, 5, 1, 1)) + + +def test_broadcast_size_scalar(): + mu = np.ones(3) + sigma = np.ones(3) + random.normal(mu, sigma, size=3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=2) + + +def test_ragged_shuffle(): + # GH 18142 + seq = [[], [], 1] + gen = Generator(MT19937(0)) + assert_no_warnings(gen.shuffle, seq) + assert seq == [1, [], []] + + +@pytest.mark.parametrize("high", [-2, [-2]]) +@pytest.mark.parametrize("endpoint", [True, False]) +def test_single_arg_integer_exception(high, endpoint): + # GH 14333 + gen = Generator(MT19937(0)) + msg = 'high < 0' if endpoint else 'high <= 0' + with pytest.raises(ValueError, match=msg): + gen.integers(high, endpoint=endpoint) + msg = 'low > high' if endpoint else 'low >= high' + with pytest.raises(ValueError, match=msg): + gen.integers(-1, high, endpoint=endpoint) + with pytest.raises(ValueError, match=msg): + gen.integers([-1], high, endpoint=endpoint) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +def test_c_contig_req_out(dtype): + # GH 18704 + out = np.empty((2, 3), order="F", dtype=dtype) + shape = [1, 2, 3] + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, dtype=dtype) + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("dist", [random.standard_normal, random.random]) +def test_contig_req_out(dist, order, dtype): + # GH 18704 + out = np.empty((2, 3), dtype=dtype, order=order) + variates = dist(out=out, dtype=dtype) + assert variates is out + variates = dist(out=out, dtype=dtype, size=out.shape) + assert variates is out diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py b/MLPY/Lib/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py new file mode 100644 index 0000000000000000000000000000000000000000..ed1d923043cf63945dfc557ba61940dcbd20f03a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py @@ -0,0 +1,150 @@ +from numpy.testing import (assert_, assert_array_equal) +import numpy as np +import pytest +from numpy.random import Generator, MT19937, RandomState + +mt19937 = Generator(MT19937()) + + +class TestRegression: + + def test_vonmises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = mt19937.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems + assert_(mt19937.hypergeometric(*args) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + mt19937 = Generator(MT19937(0)) + rvsn = mt19937.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / float(N) + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / float(N) + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + mt19937 = Generator(MT19937(12345)) + shuffled = np.array(t, dtype=object) + mt19937.shuffle(shuffled) + expected = np.array([t[2], t[0], t[3], t[1]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom BitGenerator does not call into global state + res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4]) + for i in range(3): + mt19937 = Generator(MT19937(i)) + m = Generator(MT19937(4321)) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + mt19937.multivariate_normal([0], [[0]], size=1) + mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + mt19937 = Generator(MT19937(1234567890)) + x = mt19937.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + mt19937 = Generator(MT19937(1234)) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = mt19937.choice(a, p=probs) + assert_(c in a) + with pytest.raises(ValueError): + mt19937.choice(a, p=probs*0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + mt19937 = Generator(MT19937(1234)) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + mt19937 = Generator(MT19937(1234)) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + mt19937 = Generator(MT19937(1)) + orig = np.arange(3).view(N) + perm = mt19937.permutation(orig) + assert_array_equal(perm, np.array([2, 0, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self): + return self.a + + mt19937 = Generator(MT19937(1)) + m = M() + perm = mt19937.permutation(m) + assert_array_equal(perm, np.array([4, 1, 3, 0, 2])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_gamma_0(self): + assert mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(mt19937.standard_gamma([0.0]), 0.0) + + actual = mt19937.standard_gamma([0.0], dtype='float') + expected = np.array([0.], dtype=np.float32) + assert_array_equal(actual, expected) diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_random.py b/MLPY/Lib/site-packages/numpy/random/tests/test_random.py new file mode 100644 index 0000000000000000000000000000000000000000..f1abad8f20b2ae2388db37150f4ac55c6225db32 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_random.py @@ -0,0 +1,1739 @@ +import warnings + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_warns, + assert_no_warnings, assert_array_equal, assert_array_almost_equal, + suppress_warnings + ) +from numpy import random +import sys + + +class TestSeed: + def test_scalar(self): + s = np.random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = np.random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = np.random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = np.random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, -0.5) + assert_raises(ValueError, np.random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, [-0.5]) + assert_raises(ValueError, np.random.RandomState, [-1]) + assert_raises(ValueError, np.random.RandomState, [4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, np.random.RandomState, + np.array([], dtype=np.int64)) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, np.random.multinomial, 1, p, + float(1)) + + def test_multidimensional_pvals(self): + assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]]) + assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]])) + + +class TestSetState: + def setup(self): + self.seed = 1234567890 + self.prng = random.RandomState(self.seed) + self.state = self.prng.get_state() + + def test_basic(self): + old = self.prng.tomaxint(16) + self.prng.set_state(self.state) + new = self.prng.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.prng.standard_normal(size=3) + self.prng.set_state(self.state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.prng.standard_normal() + state = self.prng.get_state() + old = self.prng.standard_normal(size=3) + self.prng.set_state(state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.prng.standard_normal(size=16) + self.prng.set_state(old_state) + x2 = self.prng.standard_normal(size=16) + self.prng.set_state(self.state) + x3 = self.prng.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.prng.negative_binomial(0.5, 0.5) + + +class TestRandint: + + rfunc = np.random.randint + + # valid integer/boolean types + itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd)//2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + try: + self.rfunc(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + np.random.seed() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + import hashlib + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} + + for dt in self.itype[1:]: + np.random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + np.random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = np.random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int, np.compat.long): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup(self): + self.seed = 1234567890 + + def test_rand(self): + np.random.seed(self.seed) + actual = np.random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + np.random.seed(self.seed) + actual = np.random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randint(self): + np.random.seed(self.seed) + actual = np.random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + np.random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(-99, 99, size=(3, 2)) + assert_(len(w) == 1) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + assert_(len(w) == 1) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random(self): + np.random.seed(self.seed) + actual = np.random.random((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_choice_uniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False, + p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + np.random.seed(self.seed) + actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = np.random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(np.random.choice(2, replace=True))) + assert_(np.isscalar(np.random.choice(2, replace=False))) + assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) + assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) + assert_(np.isscalar(np.random.choice([1, 2], replace=True))) + assert_(np.random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, replace=True) is a) + + # Check 0-d array + s = tuple() + assert_(not np.isscalar(np.random.choice(2, s, replace=True))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False))) + assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) + assert_(np.random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(np.random.choice(6, s, replace=True).shape, s) + assert_equal(np.random.choice(6, s, replace=False).shape, s) + assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(np.random.randint(0, -10, size=0).shape, (0,)) + assert_equal(np.random.randint(10, 10, size=0).shape, (0,)) + assert_equal(np.random.choice(0, size=0).shape, (0,)) + assert_equal(np.random.choice([], size=(0,)).shape, (0,)) + assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, np.random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, np.random.choice, a, p=p) + + def test_bytes(self): + np.random.seed(self.seed) + actual = np.random.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object), ("b", np.int32)])]: + np.random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + np.random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + np.random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + np.random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + def test_shuffle_untyped_warning(self, random): + # Create a dict works like a sequence but isn't one + values = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6} + with pytest.warns(UserWarning, + match="you are shuffling a 'dict' object") as rec: + random.shuffle(values) + assert "test_random" in rec[0].filename + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + @pytest.mark.parametrize("use_array_like", [True, False]) + def test_shuffle_no_object_unpacking(self, random, use_array_like): + class MyArr(np.ndarray): + pass + + items = [ + None, np.array([3]), np.float64(3), np.array(10), np.float64(7) + ] + arr = np.array(items, dtype=object) + item_ids = {id(i) for i in items} + if use_array_like: + arr = arr.view(MyArr) + + # The array was created fine, and did not modify any objects: + assert all(id(i) in item_ids for i in arr) + + if use_array_like and not isinstance(random, np.random.Generator): + # The old API gives incorrect results, but warns about it. + with pytest.warns(UserWarning, + match="Shuffling a one dimensional array.*"): + random.shuffle(arr) + else: + random.shuffle(arr) + assert all(id(i) in item_ids for i in arr) + + def test_shuffle_memoryview(self): + # gh-18273 + # allow graceful handling of memoryviews + # (treat the same as arrays) + np.random.seed(self.seed) + a = np.arange(5).data + np.random.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) + rng = np.random.RandomState(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) + rng = np.random.default_rng(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [4, 1, 0, 3, 2]) + + def test_beta(self): + np.random.seed(self.seed) + actual = np.random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + np.random.seed(self.seed) + actual = np.random.binomial(100, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + def test_chisquare(self): + np.random.seed(self.seed) + actual = np.random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + np.random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, np.random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, np.random.mtrand.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_exponential(self): + np.random.seed(self.seed) + actual = np.random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(np.random.exponential(scale=0), 0) + assert_raises(ValueError, np.random.exponential, scale=-0.) + + def test_f(self): + np.random.seed(self.seed) + actual = np.random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + np.random.seed(self.seed) + actual = np.random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(np.random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + np.random.seed(self.seed) + actual = np.random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_gumbel(self): + np.random.seed(self.seed) + actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(np.random.gumbel(scale=0), 0) + assert_raises(ValueError, np.random.gumbel, scale=-0.) + + def test_hypergeometric(self): + np.random.seed(self.seed) + actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = np.random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = np.random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + np.random.seed(self.seed) + actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(np.random.laplace(scale=0), 0) + assert_raises(ValueError, np.random.laplace, scale=-0.) + + def test_logistic(self): + np.random.seed(self.seed) + actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + np.random.seed(self.seed) + actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(np.random.lognormal(sigma=0), 1) + assert_raises(ValueError, np.random.lognormal, sigma=-0.) + + def test_logseries(self): + np.random.seed(self.seed) + actual = np.random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_multinomial(self): + np.random.seed(self.seed) + actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + np.random.seed(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = np.random.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = np.random.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(np.random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + np.random.multivariate_normal(mean, cov) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + def test_negative_binomial(self): + np.random.seed(self.seed) + actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_noncentral_chisquare(self): + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + np.random.seed(self.seed) + actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + np.random.seed(self.seed) + actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(np.random.normal(scale=0), 0) + assert_raises(ValueError, np.random.normal, scale=-0.) + + def test_pareto(self): + np.random.seed(self.seed) + actual = np.random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + np.random.seed(self.seed) + actual = np.random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, np.random.poisson, lamneg) + assert_raises(ValueError, np.random.poisson, [lamneg]*10) + assert_raises(ValueError, np.random.poisson, lambig) + assert_raises(ValueError, np.random.poisson, [lambig]*10) + + def test_power(self): + np.random.seed(self.seed) + actual = np.random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + np.random.seed(self.seed) + actual = np.random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(np.random.rayleigh(scale=0), 0) + assert_raises(ValueError, np.random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + np.random.seed(self.seed) + actual = np.random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + np.random.seed(self.seed) + actual = np.random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + np.random.seed(self.seed) + actual = np.random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(np.random.standard_gamma(shape=0), 0) + assert_raises(ValueError, np.random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + np.random.seed(self.seed) + actual = np.random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + np.random.seed(self.seed) + actual = np.random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + np.random.seed(self.seed) + actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + np.random.seed(self.seed) + actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = np.random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, np.random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + __index__ = __int__ + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + np.random.seed(self.seed) + actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + np.random.seed(self.seed) + r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + np.testing.assert_(np.isfinite(r).all()) + + def test_wald(self): + np.random.seed(self.seed) + actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + np.random.seed(self.seed) + actual = np.random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + np.random.seed(self.seed) + assert_equal(np.random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, np.random.weibull, a=-0.) + + def test_zipf(self): + np.random.seed(self.seed) + actual = np.random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup(self): + self.seed = 123456789 + + def setSeed(self): + np.random.seed(self.seed) + + # TODO: Include test for randint once it can broadcast + # Can steal the test written in PR #6938 + + def test_uniform(self): + low = [0] + high = [1] + uniform = np.random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.setSeed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = np.random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.setSeed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.setSeed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = np.random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.setSeed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.setSeed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = np.random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = np.random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = np.random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.setSeed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.setSeed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = np.random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.setSeed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.setSeed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = np.random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.setSeed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + self.setSeed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = np.random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.setSeed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = np.random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.setSeed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.setSeed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = np.random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.setSeed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = np.random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.setSeed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.setSeed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = np.random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.setSeed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = np.random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = np.random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = np.random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.setSeed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.setSeed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = np.random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.setSeed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.setSeed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = np.random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.setSeed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.setSeed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = np.random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.setSeed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + self.setSeed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = np.random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.setSeed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = np.random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.setSeed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + + self.setSeed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + assert_raises(ValueError, wald, 0.0, 1) + assert_raises(ValueError, wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = np.random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.setSeed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + self.setSeed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + self.setSeed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = np.random.binomial + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.setSeed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = np.random.negative_binomial + desired = np.array([1, 0, 1]) + + self.setSeed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.setSeed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = np.random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = np.random.poisson + desired = np.array([1, 1, 0]) + + self.setSeed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = np.random.zipf + desired = np.array([2, 2, 1]) + + self.setSeed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = np.random.geometric + desired = np.array([2, 2, 2]) + + self.setSeed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = np.random.hypergeometric + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = np.random.logseries + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + +class TestThread: + # make sure each state produces the same sequence even in threads + def setup(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(np.random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(np.random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1/6.]*6, size=10000) + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (np.random.exponential, np.random.standard_gamma, + np.random.chisquare, np.random.standard_t, + np.random.pareto, np.random.weibull, + np.random.power, np.random.rayleigh, + np.random.poisson, np.random.zipf, + np.random.geometric, np.random.logseries) + + probfuncs = (np.random.geometric, np.random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (np.random.uniform, np.random.normal, + np.random.beta, np.random.gamma, + np.random.f, np.random.noncentral_chisquare, + np.random.vonmises, np.random.laplace, + np.random.gumbel, np.random.logistic, + np.random.lognormal, np.random.wald, + np.random.binomial, np.random.negative_binomial) + + probfuncs = (np.random.binomial, np.random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + +# TODO: Uncomment once randint can broadcast arguments +# def test_randint(self): +# itype = [bool, np.int8, np.uint8, np.int16, np.uint16, +# np.int32, np.uint32, np.int64, np.uint64] +# func = np.random.randint +# high = np.array([1]) +# low = np.array([0]) +# +# for dt in itype: +# out = func(low, high, dtype=dt) +# self.assert_equal(out.shape, self.tgtShape) +# +# out = func(low[0], high, dtype=dt) +# self.assert_equal(out.shape, self.tgtShape) +# +# out = func(low, high[0], dtype=dt) +# self.assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [np.random.noncentral_f, np.random.triangular, + np.random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_randomstate.py b/MLPY/Lib/site-packages/numpy/random/tests/test_randomstate.py new file mode 100644 index 0000000000000000000000000000000000000000..adb69b659ed66b4b53f5f0d86b1672e76c9176a6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_randomstate.py @@ -0,0 +1,2022 @@ +import hashlib +import pickle +import sys +import warnings + +import numpy as np +import pytest +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_warns, + assert_no_warnings, assert_array_equal, assert_array_almost_equal, + suppress_warnings + ) + +from numpy.random import MT19937, PCG64 +from numpy import random + +INT_FUNCS = {'binomial': (100.0, 0.6), + 'geometric': (.5,), + 'hypergeometric': (20, 20, 10), + 'logseries': (.5,), + 'multinomial': (20, np.ones(6) / 6.0), + 'negative_binomial': (100, .5), + 'poisson': (10.0,), + 'zipf': (2,), + } + +if np.iinfo(int).max < 2**32: + # Windows and some 32-bit platforms, e.g., ARM + INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263', + 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb', + 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf', + 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67', + 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3', + 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824', + 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7', + 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f', + } +else: + INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112', + 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9', + 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657', + 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db', + 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605', + 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61', + 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4', + 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45', + } + + +@pytest.fixture(scope='module', params=INT_FUNCS) +def int_func(request): + return (request.param, INT_FUNCS[request.param], + INT_FUNC_HASHES[request.param]) + + +def assert_mt19937_state_equal(a, b): + assert_equal(a['bit_generator'], b['bit_generator']) + assert_array_equal(a['state']['key'], b['state']['key']) + assert_array_equal(a['state']['pos'], b['state']['pos']) + assert_equal(a['has_gauss'], b['has_gauss']) + assert_equal(a['gauss'], b['gauss']) + + +class TestSeed: + def test_scalar(self): + s = random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, -0.5) + assert_raises(ValueError, random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, [-0.5]) + assert_raises(ValueError, random.RandomState, [-1]) + assert_raises(ValueError, random.RandomState, [4294967296]) + assert_raises(ValueError, random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, random.RandomState, np.array([], + dtype=np.int64)) + assert_raises(ValueError, random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + def test_cannot_seed(self): + rs = random.RandomState(PCG64(0)) + with assert_raises(TypeError): + rs.seed(1234) + + def test_invalid_initialization(self): + assert_raises(ValueError, random.RandomState, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + random.seed(1432985819) + non_contig = random.multinomial(100, pvals=pvals) + random.seed(1432985819) + contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + + +class TestSetState: + def setup(self): + self.seed = 1234567890 + self.random_state = random.RandomState(self.seed) + self.state = self.random_state.get_state() + + def test_basic(self): + old = self.random_state.tomaxint(16) + self.random_state.set_state(self.state) + new = self.random_state.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.random_state.standard_normal(size=3) + self.random_state.set_state(self.state) + new = self.random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.random_state.standard_normal() + state = self.random_state.get_state() + old = self.random_state.standard_normal(size=3) + self.random_state.set_state(state) + new = self.random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.random_state.standard_normal(size=16) + self.random_state.set_state(old_state) + x2 = self.random_state.standard_normal(size=16) + self.random_state.set_state(self.state) + x3 = self.random_state.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.random_state.negative_binomial(0.5, 0.5) + + def test_get_state_warning(self): + rs = random.RandomState(PCG64()) + with suppress_warnings() as sup: + w = sup.record(RuntimeWarning) + state = rs.get_state() + assert_(len(w) == 1) + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' + + def test_invalid_legacy_state_setting(self): + state = self.random_state.get_state() + new_state = ('Unknown', ) + state[1:] + assert_raises(ValueError, self.random_state.set_state, new_state) + assert_raises(TypeError, self.random_state.set_state, + np.array(new_state, dtype=object)) + state = self.random_state.get_state(legacy=False) + del state['bit_generator'] + assert_raises(ValueError, self.random_state.set_state, state) + + def test_pickle(self): + self.random_state.seed(0) + self.random_state.random_sample(100) + self.random_state.standard_normal() + pickled = self.random_state.get_state(legacy=False) + assert_equal(pickled['has_gauss'], 1) + rs_unpick = pickle.loads(pickle.dumps(self.random_state)) + unpickled = rs_unpick.get_state(legacy=False) + assert_mt19937_state_equal(pickled, unpickled) + + def test_state_setting(self): + attr_state = self.random_state.__getstate__() + self.random_state.standard_normal() + self.random_state.__setstate__(attr_state) + state = self.random_state.get_state(legacy=False) + assert_mt19937_state_equal(attr_state, state) + + def test_repr(self): + assert repr(self.random_state).startswith('RandomState(MT19937)') + + +class TestRandint: + + rfunc = random.randint + + # valid integer/boolean types + itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd)//2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + try: + self.rfunc(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + random.seed() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} + + for dt in self.itype[1:]: + random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[3992670689, 2438360420, 2557845020], + [4107320065, 4142558326, 3216529513], + [1605979228, 2807061240, 665605495]], + [[3211410639, 4128781000, 457175120], + [1712592594, 1282922662, 3081439808], + [3997822960, 2008322436, 1563495165]], + [[1398375547, 4269260146, 115316740], + [3414372578, 3437564012, 2112038651], + [3572980305, 2260248732, 3908238631]], + [[2561372503, 223155946, 3127879445], + [ 441282060, 3514786552, 2148440361], + [1629275283, 3479737011, 3003195987]], + [[ 412181688, 940383289, 3047321305], + [2978368172, 764731833, 2282559898], + [ 105711276, 720447391, 3596512484]]]) + for size in [None, (5, 3, 3)]: + random.seed(12345) + x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int, np.compat.long): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup(self): + self.seed = 1234567890 + + def test_rand(self): + random.seed(self.seed) + actual = random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rand_singleton(self): + random.seed(self.seed) + actual = random.rand() + desired = 0.61879477158567997 + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + random.seed(self.seed) + actual = random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random.seed(self.seed) + actual = random.randn() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_randint(self): + random.seed(self.seed) + actual = random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(-99, 99, size=(3, 2)) + assert_(len(w) == 1) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(198, size=(3, 2)) + assert_(len(w) == 1) + assert_array_equal(actual, desired + 100) + + def test_tomaxint(self): + random.seed(self.seed) + rs = random.RandomState(self.seed) + actual = rs.tomaxint(size=(3, 2)) + if np.iinfo(int).max == 2147483647: + desired = np.array([[1328851649, 731237375], + [1270502067, 320041495], + [1908433478, 499156889]], dtype=np.int64) + else: + desired = np.array([[5707374374421908479, 5456764827585442327], + [8196659375100692377, 8224063923314595285], + [4220315081820346526, 7177518203184491332]], + dtype=np.int64) + + assert_equal(actual, desired) + + rs.seed(self.seed) + actual = rs.tomaxint() + assert_equal(actual, desired[0, 0]) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + assert_(len(w) == 1) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + typer = np.dtype('l').type + actual = random.random_integers(typer(np.iinfo('l').max), + typer(np.iinfo('l').max)) + assert_(len(w) == 1) + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random_sample(self): + random.seed(self.seed) + actual = random.random_sample((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random.seed(self.seed) + actual = random.random_sample() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_choice_uniform_replace(self): + random.seed(self.seed) + actual = random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + random.seed(self.seed) + actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + random.seed(self.seed) + actual = random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + random.seed(self.seed) + actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + random.seed(self.seed) + actual = random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = tuple() + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.randint(0, -10, size=0).shape, (0,)) + assert_equal(random.randint(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + random.seed(self.seed) + non_contig = random.choice(5, 3, p=p[::2]) + random.seed(self.seed) + contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_bytes(self): + random.seed(self.seed) + actual = random.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_invalid_objects(self): + x = np.array(3) + assert_raises(TypeError, random.shuffle, x) + + def test_permutation(self): + random.seed(self.seed) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = random.permutation(alist) + desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] + assert_array_equal(actual, desired) + + random.seed(self.seed) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = random.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + random.seed(self.seed) + bad_x_str = "abcd" + assert_raises(IndexError, random.permutation, bad_x_str) + + random.seed(self.seed) + bad_x_float = 1.2 + assert_raises(IndexError, random.permutation, bad_x_float) + + integer_val = 10 + desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] + + random.seed(self.seed) + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_beta(self): + random.seed(self.seed) + actual = random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + random.seed(self.seed) + actual = random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + random.seed(self.seed) + actual = random.binomial(100.123, .456) + desired = 37 + assert_array_equal(actual, desired) + + def test_chisquare(self): + random.seed(self.seed) + actual = random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + random.seed(self.seed) + non_contig = random.dirichlet(alpha, size=(3, 2)) + random.seed(self.seed) + contig = random.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_exponential(self): + random.seed(self.seed) + actual = random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + random.seed(self.seed) + actual = random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + random.seed(self.seed) + actual = random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + random.seed(self.seed) + actual = random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + random.seed(self.seed) + actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + random.seed(self.seed) + actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + random.seed(self.seed) + actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + random.seed(self.seed) + actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + random.seed(self.seed) + actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + random.seed(self.seed) + actual = random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_logseries_exceptions(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.logseries, np.nan) + assert_raises(ValueError, random.logseries, [np.nan] * 10) + + def test_multinomial(self): + random.seed(self.seed) + actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + random.seed(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = random.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = random.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + random.multivariate_normal(mean, cov) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, random.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, random.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, random.multivariate_normal, + mu, np.eye(3)) + + def test_negative_binomial(self): + random.seed(self.seed) + actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_noncentral_chisquare(self): + random.seed(self.seed) + actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + random.seed(self.seed) + actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + random.seed(self.seed) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random.seed(self.seed) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + random.seed(self.seed) + actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + random.seed(self.seed) + actual = random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + random.seed(self.seed) + actual = random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + random.seed(self.seed) + actual = random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + random.seed(self.seed) + actual = random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + random.seed(self.seed) + actual = random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + random.seed(self.seed) + actual = random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + random.seed(self.seed) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + random.seed(self.seed) + actual = random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn_singleton(self): + random.seed(self.seed) + actual = random.randn() + desired = np.array(1.34016345771863121) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + random.seed(self.seed) + actual = random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + random.seed(self.seed) + actual = random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + random.seed(self.seed) + actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + random.seed(self.seed) + actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_large(self): + # guard against changes in RandomState when Generator is fixed + random.seed(self.seed) + actual = random.vonmises(mu=0., kappa=1e7, size=3) + desired = np.array([4.634253748521111e-04, + 3.558873596114509e-04, + -2.337119622577433e-04]) + assert_array_almost_equal(actual, desired, decimal=8) + + def test_vonmises_nan(self): + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + def test_wald(self): + random.seed(self.seed) + actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + random.seed(self.seed) + actual = random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random.seed(self.seed) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + random.seed(self.seed) + actual = random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup(self): + self.seed = 123456789 + + def set_seed(self): + random.seed(self.seed) + + def test_uniform(self): + low = [0] + high = [1] + uniform = random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.set_seed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.set_seed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.set_seed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.set_seed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.set_seed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.set_seed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.set_seed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.set_seed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.set_seed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.set_seed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.set_seed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.set_seed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.set_seed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + self.set_seed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.set_seed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.set_seed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.set_seed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.set_seed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.set_seed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.set_seed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.set_seed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.set_seed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.set_seed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.set_seed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.set_seed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.set_seed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.set_seed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.set_seed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + assert_equal(random.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.set_seed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) + + self.set_seed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.set_seed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.set_seed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + self.set_seed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + assert_raises(ValueError, wald, 0.0, 1) + assert_raises(ValueError, wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.set_seed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + self.set_seed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + self.set_seed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, triangular, 10., 0., 20.) + assert_raises(ValueError, triangular, 10., 25., 20.) + assert_raises(ValueError, triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = random.binomial + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.set_seed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = random.negative_binomial + desired = np.array([1, 0, 1]) + + self.set_seed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.set_seed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = random.poisson + desired = np.array([1, 1, 0]) + + self.set_seed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = random.zipf + desired = np.array([2, 2, 1]) + + self.set_seed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = random.geometric + desired = np.array([2, 2, 2]) + + self.set_seed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = random.hypergeometric + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.set_seed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.set_seed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, hypergeom, -1, 10, 20) + assert_raises(ValueError, hypergeom, 10, -1, 20) + assert_raises(ValueError, hypergeom, 10, 10, 0) + assert_raises(ValueError, hypergeom, 10, 10, 25) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = random.logseries + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + +class TestThread: + # make sure each state produces the same sequence even in threads + def setup(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) + + +# Ensure returned array dtype is correct for platform +def test_integer_dtype(int_func): + random.seed(123456789) + fname, args, sha256 = int_func + f = getattr(random, fname) + actual = f(*args, size=2) + assert_(actual.dtype == np.dtype('l')) + + +def test_integer_repeat(int_func): + random.seed(123456789) + fname, args, sha256 = int_func + f = getattr(random, fname) + val = f(*args, size=1000000) + if sys.byteorder != 'little': + val = val.byteswap() + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(res == sha256) + + +def test_broadcast_size_error(): + # GH-16833 + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_randomstate_regression.py b/MLPY/Lib/site-packages/numpy/random/tests/test_randomstate_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..88ba20d64b30b7c9c6911315d07d9777fab39b9e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_randomstate_regression.py @@ -0,0 +1,203 @@ +import sys + +import pytest + +from numpy.testing import ( + assert_, assert_array_equal, assert_raises, + ) +import numpy as np + +from numpy import random + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + random.seed(0) + rvsn = random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / float(N) + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / float(N) + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + random.multivariate_normal([0], [[0]], size=1) + random.multivariate_normal([0], [[0]], size=np.int_(1)) + random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + random.seed(1234567890) + x = random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, random.choice, a, p=probs*0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + random.seed(1) + orig = np.arange(3).view(N) + perm = random.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self): + return self.a + + random.seed(1) + m = M() + perm = random.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_warns_byteorder(self): + # GH 13159 + other_byteord_dt = 'i4' + with pytest.deprecated_call(match='non-native byteorder is not'): + random.randint(0, 200, size=10, dtype=other_byteord_dt) + + def test_named_argument_initialization(self): + # GH 13669 + rs1 = np.random.RandomState(123456789) + rs2 = np.random.RandomState(seed=123456789) + assert rs1.randint(0, 100) == rs2.randint(0, 100) + + def test_choice_retun_dtype(self): + # GH 9867 + c = np.random.choice(10, p=[.1]*10, size=2) + assert c.dtype == np.dtype(int) + c = np.random.choice(10, p=[.1]*10, replace=False, size=2) + assert c.dtype == np.dtype(int) + c = np.random.choice(10, size=2) + assert c.dtype == np.dtype(int) + c = np.random.choice(10, replace=False, size=2) + assert c.dtype == np.dtype(int) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_randint_117(self): + # GH 14189 + random.seed(0) + expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, + 2588848963, 3684848379, 2340255427, 3638918503, + 1819583497, 2678185683], dtype='int64') + actual = random.randint(2**32, size=10) + assert_array_equal(actual, expected) + + def test_p_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(12345) + assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + [0, 0, 0, 1, 1]) + + def test_n_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(8675309) + expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) + assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + expected) diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_regression.py b/MLPY/Lib/site-packages/numpy/random/tests/test_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..acff4f843b132d8509947f3747db1374ed2f70a0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_regression.py @@ -0,0 +1,149 @@ +import sys +from numpy.testing import ( + assert_, assert_array_equal, assert_raises, + ) +from numpy import random +import numpy as np + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.mtrand.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(np.random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + np.random.seed(0) + rvsn = np.random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / float(N) + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / float(N) + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + np.random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = np.random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + np.random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + np.random.multivariate_normal([0], [[0]], size=1) + np.random.multivariate_normal([0], [[0]], size=np.int_(1)) + np.random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + np.random.seed(1234567890) + x = np.random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + np.random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = np.random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, np.random.choice, a, p=probs*0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + np.random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + np.random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + np.random.seed(1) + orig = np.arange(3).view(N) + perm = np.random.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self): + return self.a + + np.random.seed(1) + m = M() + perm = np.random.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_seed_sequence.py b/MLPY/Lib/site-packages/numpy/random/tests/test_seed_sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..304d06bef9ec3d3ab9c918c515c1a4b8f2835750 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_seed_sequence.py @@ -0,0 +1,80 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_array_compare + +from numpy.random import SeedSequence + + +def test_reference_data(): + """ Check that SeedSequence generates data the same as the C++ reference. + + https://gist.github.com/imneme/540829265469e673d045 + """ + inputs = [ + [3735928559, 195939070, 229505742, 305419896], + [3668361503, 4165561550, 1661411377, 3634257570], + [164546577, 4166754639, 1765190214, 1303880213], + [446610472, 3941463886, 522937693, 1882353782], + [1864922766, 1719732118, 3882010307, 1776744564], + [4141682960, 3310988675, 553637289, 902896340], + [1134851934, 2352871630, 3699409824, 2648159817], + [1240956131, 3107113773, 1283198141, 1924506131], + [2669565031, 579818610, 3042504477, 2774880435], + [2766103236, 2883057919, 4029656435, 862374500], + ] + outputs = [ + [3914649087, 576849849, 3593928901, 2229911004], + [2240804226, 3691353228, 1365957195, 2654016646], + [3562296087, 3191708229, 1147942216, 3726991905], + [1403443605, 3591372999, 1291086759, 441919183], + [1086200464, 2191331643, 560336446, 3658716651], + [3249937430, 2346751812, 847844327, 2996632307], + [2584285912, 4034195531, 3523502488, 169742686], + [959045797, 3875435559, 1886309314, 359682705], + [3978441347, 432478529, 3223635119, 138903045], + [296367413, 4262059219, 13109864, 3283683422], + ] + outputs64 = [ + [2477551240072187391, 9577394838764454085], + [15854241394484835714, 11398914698975566411], + [13708282465491374871, 16007308345579681096], + [15424829579845884309, 1898028439751125927], + [9411697742461147792, 15714068361935982142], + [10079222287618677782, 12870437757549876199], + [17326737873898640088, 729039288628699544], + [16644868984619524261, 1544825456798124994], + [1857481142255628931, 596584038813451439], + [18305404959516669237, 14103312907920476776], + ] + for seed, expected, expected64 in zip(inputs, outputs, outputs64): + expected = np.array(expected, dtype=np.uint32) + ss = SeedSequence(seed) + state = ss.generate_state(len(expected)) + assert_array_equal(state, expected) + state64 = ss.generate_state(len(expected64), dtype=np.uint64) + assert_array_equal(state64, expected64) + + +def test_zero_padding(): + """ Ensure that the implicit zero-padding does not cause problems. + """ + # Ensure that large integers are inserted in little-endian fashion to avoid + # trailing 0s. + ss0 = SeedSequence(42) + ss1 = SeedSequence(42 << 32) + assert_array_compare( + np.not_equal, + ss0.generate_state(4), + ss1.generate_state(4)) + + # Ensure backwards compatibility with the original 0.17 release for small + # integers and no spawn key. + expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988], + dtype=np.uint32) + assert_array_equal(SeedSequence(42).generate_state(4), expected42) + + # Regression test for gh-16539 to ensure that the implicit 0s don't + # conflict with spawn keys. + assert_array_compare( + np.not_equal, + SeedSequence(42, spawn_key=(0,)).generate_state(4), + expected42) diff --git a/MLPY/Lib/site-packages/numpy/random/tests/test_smoke.py b/MLPY/Lib/site-packages/numpy/random/tests/test_smoke.py new file mode 100644 index 0000000000000000000000000000000000000000..d79ef7761fb7c7c0a8db5fbdb2ec6ecf4aabd136 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/random/tests/test_smoke.py @@ -0,0 +1,818 @@ +import pickle +from functools import partial + +import numpy as np +import pytest +from numpy.testing import assert_equal, assert_, assert_array_equal +from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64) + +@pytest.fixture(scope='module', + params=(np.bool_, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64)) +def dtype(request): + return request.param + + +def params_0(f): + val = f() + assert_(np.isscalar(val)) + val = f(10) + assert_(val.shape == (10,)) + val = f((10, 10)) + assert_(val.shape == (10, 10)) + val = f((10, 10, 10)) + assert_(val.shape == (10, 10, 10)) + val = f(size=(5, 5)) + assert_(val.shape == (5, 5)) + + +def params_1(f, bounded=False): + a = 5.0 + b = np.arange(2.0, 12.0) + c = np.arange(2.0, 102.0).reshape((10, 10)) + d = np.arange(2.0, 1002.0).reshape((10, 10, 10)) + e = np.array([2.0, 3.0]) + g = np.arange(2.0, 12.0).reshape((1, 10, 1)) + if bounded: + a = 0.5 + b = b / (1.5 * b.max()) + c = c / (1.5 * c.max()) + d = d / (1.5 * d.max()) + e = e / (1.5 * e.max()) + g = g / (1.5 * g.max()) + + # Scalar + f(a) + # Scalar - size + f(a, size=(10, 10)) + # 1d + f(b) + # 2d + f(c) + # 3d + f(d) + # 1d size + f(b, size=10) + # 2d - size - broadcast + f(e, size=(10, 2)) + # 3d - size + f(g, size=(10, 10, 10)) + + +def comp_state(state1, state2): + identical = True + if isinstance(state1, dict): + for key in state1: + identical &= comp_state(state1[key], state2[key]) + elif type(state1) != type(state2): + identical &= type(state1) == type(state2) + else: + if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( + state2, (list, tuple, np.ndarray))): + for s1, s2 in zip(state1, state2): + identical &= comp_state(s1, s2) + else: + identical &= state1 == state2 + return identical + + +def warmup(rg, n=None): + if n is None: + n = 11 + np.random.randint(0, 20) + rg.standard_normal(n) + rg.standard_normal(n) + rg.standard_normal(n, dtype=np.float32) + rg.standard_normal(n, dtype=np.float32) + rg.integers(0, 2 ** 24, n, dtype=np.uint64) + rg.integers(0, 2 ** 48, n, dtype=np.uint64) + rg.standard_gamma(11.0, n) + rg.standard_gamma(11.0, n, dtype=np.float32) + rg.random(n, dtype=np.float64) + rg.random(n, dtype=np.float32) + + +class RNG: + @classmethod + def setup_class(cls): + # Overridden in test classes. Place holder to silence IDE noise + cls.bit_generator = PCG64 + cls.advance = None + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + @classmethod + def _extra_setup(cls): + cls.vec_1d = np.arange(2.0, 102.0) + cls.vec_2d = np.arange(2.0, 102.0)[None, :] + cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + cls.seed_error = TypeError + + def _reset_state(self): + self.rg.bit_generator.state = self.initial_state + + def test_init(self): + rg = Generator(self.bit_generator()) + state = rg.bit_generator.state + rg.standard_normal(1) + rg.standard_normal(1) + rg.bit_generator.state = state + new_state = rg.bit_generator.state + assert_(comp_state(state, new_state)) + + def test_advance(self): + state = self.rg.bit_generator.state + if hasattr(self.rg.bit_generator, 'advance'): + self.rg.bit_generator.advance(self.advance) + assert_(not comp_state(state, self.rg.bit_generator.state)) + else: + bitgen_name = self.rg.bit_generator.__class__.__name__ + pytest.skip(f'Advance is not supported by {bitgen_name}') + + def test_jump(self): + state = self.rg.bit_generator.state + if hasattr(self.rg.bit_generator, 'jumped'): + bit_gen2 = self.rg.bit_generator.jumped() + jumped_state = bit_gen2.state + assert_(not comp_state(state, jumped_state)) + self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + self.rg.bit_generator.state = state + bit_gen3 = self.rg.bit_generator.jumped() + rejumped_state = bit_gen3.state + assert_(comp_state(jumped_state, rejumped_state)) + else: + bitgen_name = self.rg.bit_generator.__class__.__name__ + if bitgen_name not in ('SFC64',): + raise AttributeError(f'no "jumped" in {bitgen_name}') + pytest.skip(f'Jump is not supported by {bitgen_name}') + + def test_uniform(self): + r = self.rg.uniform(-1.0, 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_uniform_array(self): + r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = self.rg.uniform(np.array([-1.0] * 10), + np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_random(self): + assert_(len(self.rg.random(10)) == 10) + params_0(self.rg.random) + + def test_standard_normal_zig(self): + assert_(len(self.rg.standard_normal(10)) == 10) + + def test_standard_normal(self): + assert_(len(self.rg.standard_normal(10)) == 10) + params_0(self.rg.standard_normal) + + def test_standard_gamma(self): + assert_(len(self.rg.standard_gamma(10, 10)) == 10) + assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(self.rg.standard_gamma) + + def test_standard_exponential(self): + assert_(len(self.rg.standard_exponential(10)) == 10) + params_0(self.rg.standard_exponential) + + def test_standard_exponential_float(self): + randoms = self.rg.standard_exponential(10, dtype='float32') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(self.rg.standard_exponential, dtype='float32')) + + def test_standard_exponential_float_log(self): + randoms = self.rg.standard_exponential(10, dtype='float32', + method='inv') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(self.rg.standard_exponential, dtype='float32', + method='inv')) + + def test_standard_cauchy(self): + assert_(len(self.rg.standard_cauchy(10)) == 10) + params_0(self.rg.standard_cauchy) + + def test_standard_t(self): + assert_(len(self.rg.standard_t(10, 10)) == 10) + params_1(self.rg.standard_t) + + def test_binomial(self): + assert_(self.rg.binomial(10, .5) >= 0) + assert_(self.rg.binomial(1000, .5) >= 0) + + def test_reset_state(self): + state = self.rg.bit_generator.state + int_1 = self.rg.integers(2**31) + self.rg.bit_generator.state = state + int_2 = self.rg.integers(2**31) + assert_(int_1 == int_2) + + def test_entropy_init(self): + rg = Generator(self.bit_generator()) + rg2 = Generator(self.bit_generator()) + assert_(not comp_state(rg.bit_generator.state, + rg2.bit_generator.state)) + + def test_seed(self): + rg = Generator(self.bit_generator(*self.seed)) + rg2 = Generator(self.bit_generator(*self.seed)) + rg.random() + rg2.random() + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_reset_state_gauss(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.standard_normal() + state = rg.bit_generator.state + n1 = rg.standard_normal(size=10) + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.standard_normal(size=10) + assert_array_equal(n1, n2) + + def test_reset_state_uint32(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.integers(0, 2 ** 24, 120, dtype=np.uint32) + state = rg.bit_generator.state + n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) + assert_array_equal(n1, n2) + + def test_reset_state_float(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.random(dtype='float32') + state = rg.bit_generator.state + n1 = rg.random(size=10, dtype='float32') + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.random(size=10, dtype='float32') + assert_((n1 == n2).all()) + + def test_shuffle(self): + original = np.arange(200, 0, -1) + permuted = self.rg.permutation(original) + assert_((original != permuted).any()) + + def test_permutation(self): + original = np.arange(200, 0, -1) + permuted = self.rg.permutation(original) + assert_((original != permuted).any()) + + def test_beta(self): + vals = self.rg.beta(2.0, 2.0, 10) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), 2.0) + assert_(len(vals) == 10) + vals = self.rg.beta(2.0, np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + assert_(vals.shape == (10, 10)) + + def test_bytes(self): + vals = self.rg.bytes(10) + assert_(len(vals) == 10) + + def test_chisquare(self): + vals = self.rg.chisquare(2.0, 10) + assert_(len(vals) == 10) + params_1(self.rg.chisquare) + + def test_exponential(self): + vals = self.rg.exponential(2.0, 10) + assert_(len(vals) == 10) + params_1(self.rg.exponential) + + def test_f(self): + vals = self.rg.f(3, 1000, 10) + assert_(len(vals) == 10) + + def test_gamma(self): + vals = self.rg.gamma(3, 2, 10) + assert_(len(vals) == 10) + + def test_geometric(self): + vals = self.rg.geometric(0.5, 10) + assert_(len(vals) == 10) + params_1(self.rg.exponential, bounded=True) + + def test_gumbel(self): + vals = self.rg.gumbel(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_laplace(self): + vals = self.rg.laplace(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logitic(self): + vals = self.rg.logistic(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logseries(self): + vals = self.rg.logseries(0.5, 10) + assert_(len(vals) == 10) + + def test_negative_binomial(self): + vals = self.rg.negative_binomial(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_noncentral_chisquare(self): + vals = self.rg.noncentral_chisquare(10, 2, 10) + assert_(len(vals) == 10) + + def test_noncentral_f(self): + vals = self.rg.noncentral_f(3, 1000, 2, 10) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) + assert_(len(vals) == 10) + + def test_normal(self): + vals = self.rg.normal(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_pareto(self): + vals = self.rg.pareto(3.0, 10) + assert_(len(vals) == 10) + + def test_poisson(self): + vals = self.rg.poisson(10, 10) + assert_(len(vals) == 10) + vals = self.rg.poisson(np.array([10] * 10)) + assert_(len(vals) == 10) + params_1(self.rg.poisson) + + def test_power(self): + vals = self.rg.power(0.2, 10) + assert_(len(vals) == 10) + + def test_integers(self): + vals = self.rg.integers(10, 20, 10) + assert_(len(vals) == 10) + + def test_rayleigh(self): + vals = self.rg.rayleigh(0.2, 10) + assert_(len(vals) == 10) + params_1(self.rg.rayleigh, bounded=True) + + def test_vonmises(self): + vals = self.rg.vonmises(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_wald(self): + vals = self.rg.wald(1.0, 1.0, 10) + assert_(len(vals) == 10) + + def test_weibull(self): + vals = self.rg.weibull(1.0, 10) + assert_(len(vals) == 10) + + def test_zipf(self): + vals = self.rg.zipf(10, 10) + assert_(len(vals) == 10) + vals = self.rg.zipf(self.vec_1d) + assert_(len(vals) == 100) + vals = self.rg.zipf(self.vec_2d) + assert_(vals.shape == (1, 100)) + vals = self.rg.zipf(self.mat) + assert_(vals.shape == (100, 100)) + + def test_hypergeometric(self): + vals = self.rg.hypergeometric(25, 25, 20) + assert_(np.isscalar(vals)) + vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) + assert_(vals.shape == (10,)) + + def test_triangular(self): + vals = self.rg.triangular(-5, 0, 5) + assert_(np.isscalar(vals)) + vals = self.rg.triangular(-5, np.array([0] * 10), 5) + assert_(vals.shape == (10,)) + + def test_multivariate_normal(self): + mean = [0, 0] + cov = [[1, 0], [0, 100]] # diagonal covariance + x = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_zig = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_inv = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + assert_((x_zig != x_inv).any()) + + def test_multinomial(self): + vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + assert_(vals.shape == (2,)) + vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + assert_(vals.shape == (10, 2)) + + def test_dirichlet(self): + s = self.rg.dirichlet((10, 5, 3), 20) + assert_(s.shape == (20, 3)) + + def test_pickle(self): + pick = pickle.dumps(self.rg) + unpick = pickle.loads(pick) + assert_((type(self.rg) == type(unpick))) + assert_(comp_state(self.rg.bit_generator.state, + unpick.bit_generator.state)) + + pick = pickle.dumps(self.rg) + unpick = pickle.loads(pick) + assert_((type(self.rg) == type(unpick))) + assert_(comp_state(self.rg.bit_generator.state, + unpick.bit_generator.state)) + + def test_seed_array(self): + if self.seed_vector_bits is None: + bitgen_name = self.bit_generator.__name__ + pytest.skip(f'Vector seeding is not supported by {bitgen_name}') + + if self.seed_vector_bits == 32: + dtype = np.uint32 + else: + dtype = np.uint64 + seed = np.array([1], dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(1) + state2 = bg.state + assert_(comp_state(state1, state2)) + + seed = np.arange(4, dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = np.arange(1500, dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = 2 ** np.mod(np.arange(1500, dtype=dtype), + self.seed_vector_bits - 1) + 1 + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + def test_uniform_float(self): + rg = Generator(self.bit_generator(12345)) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.random(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.random(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_gamma_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_zig_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_output_fill(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=existing) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size) + assert_equal(direct, existing) + + sized = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=sized, size=sized.shape) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_normal(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_uniform(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.random(out=existing) + rg.bit_generator.state = state + direct = rg.random(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.random(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.random(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_exponential(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_exponential(out=existing) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_exponential(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma_broadcast(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + mu = np.arange(97.0) + 1.0 + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_fill_error(self): + rg = self.rg + size = (31, 7, 97) + existing = np.empty(size) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_normal(out=existing[::3]) + existing = np.empty(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float64) + + existing = np.zeros(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float64) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32) + existing = np.zeros(size, dtype=np.float64) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3]) + + def test_integers_broadcast(self, dtype): + if dtype == np.bool_: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + self._reset_state() + a = self.rg.integers(lower, [upper] * 10, dtype=dtype) + self._reset_state() + b = self.rg.integers([lower] * 10, upper, dtype=dtype) + assert_equal(a, b) + self._reset_state() + c = self.rg.integers(lower, upper, size=10, dtype=dtype) + assert_equal(a, c) + self._reset_state() + d = self.rg.integers(np.array( + [lower] * 10), np.array([upper], dtype=object), size=10, + dtype=dtype) + assert_equal(a, d) + self._reset_state() + e = self.rg.integers( + np.array([lower] * 10), np.array([upper] * 10), size=10, + dtype=dtype) + assert_equal(a, e) + + self._reset_state() + a = self.rg.integers(0, upper, size=10, dtype=dtype) + self._reset_state() + b = self.rg.integers([upper] * 10, dtype=dtype) + assert_equal(a, b) + + def test_integers_numpy(self, dtype): + high = np.array([1]) + low = np.array([0]) + + out = self.rg.integers(low, high, dtype=dtype) + assert out.shape == (1,) + + out = self.rg.integers(low[0], high, dtype=dtype) + assert out.shape == (1,) + + out = self.rg.integers(low, high[0], dtype=dtype) + assert out.shape == (1,) + + def test_integers_broadcast_errors(self, dtype): + if dtype == np.bool_: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + with pytest.raises(ValueError): + self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers([0], [0], dtype=dtype) + + +class TestMT19937(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = MT19937 + cls.advance = None + cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 32 + cls._extra_setup() + cls.seed_error = ValueError + + def test_numpy_state(self): + nprg = np.random.RandomState() + nprg.standard_normal(99) + state = nprg.get_state() + self.rg.bit_generator.state = state + state2 = self.rg.bit_generator.state + assert_((state[1] == state2['state']['key']).all()) + assert_((state[2] == state2['state']['pos'])) + + +class TestPhilox(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = Philox + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestSFC64(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = SFC64 + cls.advance = None + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 192 + cls._extra_setup() + + +class TestPCG64(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestPCG64DXSM(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestDefaultRNG(RNG): + @classmethod + def setup_class(cls): + # This will duplicate some tests that directly instantiate a fresh + # Generator(), but that's okay. + cls.bit_generator = PCG64 + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = np.random.default_rng(*cls.seed) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + def test_default_is_pcg64(self): + # In order to change the default BitGenerator, we'll go through + # a deprecation cycle to move to a different function. + assert_(isinstance(self.rg.bit_generator, PCG64)) + + def test_seed(self): + np.random.default_rng() + np.random.default_rng(None) + np.random.default_rng(12345) + np.random.default_rng(0) + np.random.default_rng(43660444402423911716352051725018508569) + np.random.default_rng([43660444402423911716352051725018508569, + 279705150948142787361475340226491943209]) + with pytest.raises(ValueError): + np.random.default_rng(-1) + with pytest.raises(ValueError): + np.random.default_rng([12345, -1]) diff --git a/MLPY/Lib/site-packages/numpy/rec.pyi b/MLPY/Lib/site-packages/numpy/rec.pyi new file mode 100644 index 0000000000000000000000000000000000000000..adb2e49a629e68ebce66d056892d8126c1200bbc --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/rec.pyi @@ -0,0 +1,65 @@ +from typing import List + +from numpy import ( + format_parser as format_parser, + record as record, + recarray as recarray, +) + +__all__: List[str] + +def fromarrays( + arrayList, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., +): ... +def fromrecords( + recList, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., +): ... +def fromstring( + datastring, + dtype=..., + shape=..., + offset=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., +): ... +def fromfile( + fd, + dtype=..., + shape=..., + offset=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., +): ... +def array( + obj, + dtype=..., + shape=..., + offset=..., + strides=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + copy=..., +): ... diff --git a/MLPY/Lib/site-packages/numpy/setup.py b/MLPY/Lib/site-packages/numpy/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..9dab8b64df53ef07077bedacf4816b540cb84e21 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/setup.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('numpy', parent_package, top_path) + + config.add_subpackage('compat') + config.add_subpackage('core') + config.add_subpackage('distutils') + config.add_subpackage('doc') + config.add_subpackage('f2py') + config.add_subpackage('fft') + config.add_subpackage('lib') + config.add_subpackage('linalg') + config.add_subpackage('ma') + config.add_subpackage('matrixlib') + config.add_subpackage('polynomial') + config.add_subpackage('random') + config.add_subpackage('testing') + config.add_subpackage('typing') + config.add_data_dir('doc') + config.add_data_files('py.typed') + config.add_data_files('*.pyi') + config.add_subpackage('tests') + config.make_config_py() # installs __config__.py + return config + +if __name__ == '__main__': + print('This is the wrong setup.py file to run') diff --git a/MLPY/Lib/site-packages/numpy/testing/__init__.py b/MLPY/Lib/site-packages/numpy/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c272fe1437b46bc7b866bc21e073faf42f446e54 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/__init__.py @@ -0,0 +1,20 @@ +"""Common test support for all numpy test scripts. + +This single module should provide all the common functionality for numpy tests +in a single location, so that test scripts can just import it and work right +away. + +""" +from unittest import TestCase + +from ._private.utils import * +from ._private import decorators as dec +from ._private.nosetester import ( + run_module_suite, NoseTester as Tester + ) + +__all__ = _private.utils.__all__ + ['TestCase', 'run_module_suite'] + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/testing/__init__.pyi b/MLPY/Lib/site-packages/numpy/testing/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f122756229be60363161e17394f0ff9adf90fd36 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/__init__.pyi @@ -0,0 +1,113 @@ +import sys +import warnings +from typing import Any, List, ClassVar, Tuple, Set + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + +from unittest import ( + TestCase as TestCase, +) + +from unittest.case import ( + SkipTest as SkipTest, +) + +__all__: List[str] + +def run_module_suite(file_to_run=..., argv=...): ... + +class KnownFailureException(Exception): ... +class IgnoreException(Exception): ... + +class clear_and_catch_warnings(warnings.catch_warnings): + class_modules: ClassVar[Tuple[str, ...]] + modules: Set[str] + def __init__(self, record=..., modules=...): ... + def __enter__(self): ... + def __exit__(self, *exc_info): ... + +class suppress_warnings: + log: List[warnings.WarningMessage] + def __init__(self, forwarding_rule=...): ... + def filter(self, category=..., message=..., module=...): ... + def record(self, category=..., message=..., module=...): ... + def __enter__(self): ... + def __exit__(self, *exc_info): ... + def __call__(self, func): ... + +verbose: int +IS_PYPY: Final[bool] +HAS_REFCOUNT: Final[bool] +HAS_LAPACK64: Final[bool] + +def assert_(val, msg=...): ... +def memusage(processName=..., instance=...): ... +def jiffies(_proc_pid_stat=..., _load_time=...): ... +def build_err_msg( + arrays, + err_msg, + header=..., + verbose=..., + names=..., + precision=..., +): ... +def assert_equal(actual, desired, err_msg=..., verbose=...): ... +def print_assert_equal(test_string, actual, desired): ... +def assert_almost_equal( + actual, + desired, + decimal=..., + err_msg=..., + verbose=..., +): ... +def assert_approx_equal( + actual, + desired, + significant=..., + err_msg=..., + verbose=..., +): ... +def assert_array_compare( + comparison, + x, + y, + err_msg=..., + verbose=..., + header=..., + precision=..., + equal_nan=..., + equal_inf=..., +): ... +def assert_array_equal(x, y, err_msg=..., verbose=...): ... +def assert_array_almost_equal(x, y, decimal=..., err_msg=..., verbose=...): ... +def assert_array_less(x, y, err_msg=..., verbose=...): ... +def runstring(astr, dict): ... +def assert_string_equal(actual, desired): ... +def rundocs(filename=..., raise_on_error=...): ... +def raises(*args): ... +def assert_raises(*args, **kwargs): ... +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): ... +def decorate_methods(cls, decorator, testmatch=...): ... +def measure(code_str, times=..., label=...): ... +def assert_allclose( + actual, + desired, + rtol=..., + atol=..., + equal_nan=..., + err_msg=..., + verbose=..., +): ... +def assert_array_almost_equal_nulp(x, y, nulp=...): ... +def assert_array_max_ulp(a, b, maxulp=..., dtype=...): ... +def assert_warns(warning_class, *args, **kwargs): ... +def assert_no_warnings(*args, **kwargs): ... +def tempdir(*args, **kwargs): ... +def temppath(*args, **kwargs): ... +def assert_no_gc_cycles(*args, **kwargs): ... +def break_cycles(): ... +def _assert_valid_refcount(op): ... +def _gen_alignment_data(dtype=..., type=..., max_size=...): ... diff --git a/MLPY/Lib/site-packages/numpy/testing/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ac0202e3127f2294221178362b315d7204f2280 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..584c020ad74f70dda722d87566f56249fa8dd005 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d244bcb50d2c452ed4f2bb83fe13d6d8482d74a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a246a54f543ff0f0fac09d3493cabaea776231a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/__init__.py b/MLPY/Lib/site-packages/numpy/testing/_private/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7de780892800898bc5de9490ea5219f2408db4f8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/decorators.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/decorators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f63945016596d06a7d7030a28bdd21d947ec2b74 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/decorators.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/noseclasses.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/noseclasses.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e2c544899f6abd57180ca749c5c2515c0673da6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/noseclasses.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/nosetester.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/nosetester.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16657f36ba66f45a6d8cd62ef0f4c9287b799af0 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/nosetester.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/parameterized.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/parameterized.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a2db2a42fa5b5b5a84e369dcb4ce1cd9177f776 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/parameterized.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee4c8b081b94c295d8d15d7e1351403f8a80eaf1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/_private/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/decorators.py b/MLPY/Lib/site-packages/numpy/testing/_private/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..bf8db2f46bdd3203af89c594c2808cd60454eea1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/_private/decorators.py @@ -0,0 +1,331 @@ +""" +Decorators for labeling and modifying behavior of test objects. + +Decorators that merely return a modified version of the original +function object are straightforward. Decorators that return a new +function object need to use +:: + + nose.tools.make_decorator(original_function)(decorator) + +in returning the decorator, in order to preserve meta-data such as +function name, setup and teardown functions and so on - see +``nose.tools`` for more information. + +""" +import collections.abc +import warnings + +from .utils import SkipTest, assert_warns, HAS_REFCOUNT + +__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated', + 'parametrize', '_needs_refcount',] + + +def slow(t): + """ + .. deprecated:: 1.21 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + + Label a test as 'slow'. + + The exact definition of a slow test is obviously both subjective and + hardware-dependent, but in general any individual test that requires more + than a second or two should be labeled as slow (the whole suite consists of + thousands of tests, so even a second is significant). + + Parameters + ---------- + t : callable + The test to label as slow. + + Returns + ------- + t : callable + The decorated test `t`. + + Examples + -------- + The `numpy.testing` module includes ``import decorators as dec``. + A test can be decorated as slow like this:: + + from numpy.testing import * + + @dec.slow + def test_big(self): + print('Big, slow test') + + """ + # Numpy 1.21, 2020-12-20 + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + + t.slow = True + return t + +def setastest(tf=True): + """ + .. deprecated:: 1.21 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + + Signals to nose that this function is or is not a test. + + Parameters + ---------- + tf : bool + If True, specifies that the decorated callable is a test. + If False, specifies that the decorated callable is not a test. + Default is True. + + Notes + ----- + This decorator can't use the nose namespace, because it can be + called from a non-test module. See also ``istest`` and ``nottest`` in + ``nose.tools``. + + Examples + -------- + `setastest` can be used in the following way:: + + from numpy.testing import dec + + @dec.setastest(False) + def func_with_test_in_name(arg1, arg2): + pass + + """ + # Numpy 1.21, 2020-12-20 + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + def set_test(t): + t.__test__ = tf + return t + return set_test + +def skipif(skip_condition, msg=None): + """ + .. deprecated:: 1.21 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + + Make function raise SkipTest exception if a given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + skip_condition : bool or callable + Flag to determine whether to skip the decorated test. + msg : str, optional + Message to give on raising a SkipTest exception. Default is None. + + Returns + ------- + decorator : function + Decorator which, when applied to a function, causes SkipTest + to be raised when `skip_condition` is True, and the function + to be called normally otherwise. + + Notes + ----- + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ + + def skip_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + + # Numpy 1.21, 2020-12-20 + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + + # Allow for both boolean or callable skip conditions. + if isinstance(skip_condition, collections.abc.Callable): + skip_val = lambda: skip_condition() + else: + skip_val = lambda: skip_condition + + def get_msg(func,msg=None): + """Skip message with information about function being skipped.""" + if msg is None: + out = 'Test skipped due to test condition' + else: + out = msg + + return f'Skipping test: {func.__name__}: {out}' + + # We need to define *two* skippers because Python doesn't allow both + # return with value and yield inside the same function. + def skipper_func(*args, **kwargs): + """Skipper for normal test functions.""" + if skip_val(): + raise SkipTest(get_msg(f, msg)) + else: + return f(*args, **kwargs) + + def skipper_gen(*args, **kwargs): + """Skipper for test generators.""" + if skip_val(): + raise SkipTest(get_msg(f, msg)) + else: + yield from f(*args, **kwargs) + + # Choose the right skipper to use when building the actual decorator. + if nose.util.isgenerator(f): + skipper = skipper_gen + else: + skipper = skipper_func + + return nose.tools.make_decorator(f)(skipper) + + return skip_decorator + + +def knownfailureif(fail_condition, msg=None): + """ + .. deprecated:: 1.21 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + + Make function raise KnownFailureException exception if given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + fail_condition : bool or callable + Flag to determine whether to mark the decorated test as a known + failure (if True) or not (if False). + msg : str, optional + Message to give on raising a KnownFailureException exception. + Default is None. + + Returns + ------- + decorator : function + Decorator, which, when applied to a function, causes + KnownFailureException to be raised when `fail_condition` is True, + and the function to be called normally otherwise. + + Notes + ----- + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ + # Numpy 1.21, 2020-12-20 + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + + if msg is None: + msg = 'Test skipped due to known failure' + + # Allow for both boolean or callable known failure conditions. + if isinstance(fail_condition, collections.abc.Callable): + fail_val = lambda: fail_condition() + else: + fail_val = lambda: fail_condition + + def knownfail_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + from .noseclasses import KnownFailureException + + def knownfailer(*args, **kwargs): + if fail_val(): + raise KnownFailureException(msg) + else: + return f(*args, **kwargs) + return nose.tools.make_decorator(f)(knownfailer) + + return knownfail_decorator + +def deprecated(conditional=True): + """ + .. deprecated:: 1.21 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + + Filter deprecation warnings while running the test suite. + + This decorator can be used to filter DeprecationWarning's, to avoid + printing them during the test suite run, while checking that the test + actually raises a DeprecationWarning. + + Parameters + ---------- + conditional : bool or callable, optional + Flag to determine whether to mark test as deprecated or not. If the + condition is a callable, it is used at runtime to dynamically make the + decision. Default is True. + + Returns + ------- + decorator : function + The `deprecated` decorator itself. + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + def deprecate_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + + # Numpy 1.21, 2020-12-20 + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + + def _deprecated_imp(*args, **kwargs): + # Poor man's replacement for the with statement + with assert_warns(DeprecationWarning): + f(*args, **kwargs) + + if isinstance(conditional, collections.abc.Callable): + cond = conditional() + else: + cond = conditional + if cond: + return nose.tools.make_decorator(f)(_deprecated_imp) + else: + return f + return deprecate_decorator + + +def parametrize(vars, input): + """ + .. deprecated:: 1.21 + This decorator is retained for compatibility with the nose testing framework, which is being phased out. + Please use the nose2 or pytest frameworks instead. + + Pytest compatibility class. This implements the simplest level of + pytest.mark.parametrize for use in nose as an aid in making the transition + to pytest. It achieves that by adding a dummy var parameter and ignoring + the doc_func parameter of the base class. It does not support variable + substitution by name, nor does it support nesting or classes. See the + pytest documentation for usage. + + .. versionadded:: 1.14.0 + + """ + from .parameterized import parameterized + + # Numpy 1.21, 2020-12-20 + warnings.warn('the np.testing.dec decorators are included for nose support, and are ' + 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2) + + return parameterized(input) + +_needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/noseclasses.py b/MLPY/Lib/site-packages/numpy/testing/_private/noseclasses.py new file mode 100644 index 0000000000000000000000000000000000000000..1fea7d467e31e55f8468e68ba6bc8fdb1bf18588 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/_private/noseclasses.py @@ -0,0 +1,364 @@ +# These classes implement a doctest runner plugin for nose, a "known failure" +# error class, and a customized TestProgram for NumPy. + +# Because this module imports nose directly, it should not +# be used except by nosetester.py to avoid a general NumPy +# dependency on nose. +import os +import sys +import doctest +import inspect + +import numpy +import nose +from nose.plugins import doctests as npd +from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin +from nose.plugins.base import Plugin +from nose.util import src +from .nosetester import get_package_name +from .utils import KnownFailureException, KnownFailureTest + + +# Some of the classes in this module begin with 'Numpy' to clearly distinguish +# them from the plethora of very similar names from nose/unittest/doctest + +#----------------------------------------------------------------------------- +# Modified version of the one in the stdlib, that fixes a python bug (doctests +# not found in extension modules, https://bugs.python.org/issue3158) +class NumpyDocTestFinder(doctest.DocTestFinder): + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + return True + elif inspect.isfunction(object): + return module.__dict__ is object.__globals__ + elif inspect.isbuiltin(object): + return module.__name__ == object.__module__ + elif inspect.isclass(object): + return module.__name__ == object.__module__ + elif inspect.ismethod(object): + # This one may be a bug in cython that fails to correctly set the + # __module__ attribute of methods, but since the same error is easy + # to make by extension code writers, having this safety in place + # isn't such a bad idea + return module.__name__ == object.__self__.__class__.__module__ + elif inspect.getmodule(object) is not None: + return module is inspect.getmodule(object) + elif hasattr(object, '__module__'): + return module.__name__ == object.__module__ + elif isinstance(object, property): + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + + doctest.DocTestFinder._find(self, tests, obj, name, module, + source_lines, globs, seen) + + # Below we re-run pieces of the above method with manual modifications, + # because the original code is buggy and fails to correctly identify + # doctests in extension modules. + + # Local shorthands + from inspect import ( + isroutine, isclass, ismodule, isfunction, ismethod + ) + + # Look for tests in a module's contained objects. + if ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + valname1 = f'{name}.{valname}' + if ( (isroutine(val) or isclass(val)) + and self._from_module(module, val)): + + self._find(tests, val, valname1, module, source_lines, + globs, seen) + + # Look for tests in a class's contained objects. + if isclass(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = getattr(obj, valname).__func__ + + # Recurse to methods, properties, and nested classes. + if ((isfunction(val) or isclass(val) or + ismethod(val) or isinstance(val, property)) and + self._from_module(module, val)): + valname = f'{name}.{valname}' + self._find(tests, val, valname, module, source_lines, + globs, seen) + + +# second-chance checker; if the default comparison doesn't +# pass, then see if the expected output string contains flags that +# tell us to ignore the output +class NumpyOutputChecker(doctest.OutputChecker): + def check_output(self, want, got, optionflags): + ret = doctest.OutputChecker.check_output(self, want, got, + optionflags) + if not ret: + if "#random" in want: + return True + + # it would be useful to normalize endianness so that + # bigendian machines don't fail all the tests (and there are + # actually some bigendian examples in the doctests). Let's try + # making them all little endian + got = got.replace("'>", "'<") + want = want.replace("'>", "'<") + + # try to normalize out 32 and 64 bit default int sizes + for sz in [4, 8]: + got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') + 'numpy' + + """ + + fullpath = filepath[:] + pkg_name = [] + while 'site-packages' in filepath or 'dist-packages' in filepath: + filepath, p2 = os.path.split(filepath) + if p2 in ('site-packages', 'dist-packages'): + break + pkg_name.append(p2) + + # if package name determination failed, just default to numpy/scipy + if not pkg_name: + if 'scipy' in fullpath: + return 'scipy' + else: + return 'numpy' + + # otherwise, reverse to get correct order and return + pkg_name.reverse() + + # don't include the outer egg directory + if pkg_name[0].endswith('.egg'): + pkg_name.pop(0) + + return '.'.join(pkg_name) + + +def run_module_suite(file_to_run=None, argv=None): + """ + Run a test module. + + Equivalent to calling ``$ nosetests `` from + the command line + + Parameters + ---------- + file_to_run : str, optional + Path to test module, or None. + By default, run the module from which this function is called. + argv : list of strings + Arguments to be passed to the nose test runner. ``argv[0]`` is + ignored. All command line arguments accepted by ``nosetests`` + will work. If it is the default value None, sys.argv is used. + + .. versionadded:: 1.9.0 + + Examples + -------- + Adding the following:: + + if __name__ == "__main__" : + run_module_suite(argv=sys.argv) + + at the end of a test module will run the tests when that module is + called in the python interpreter. + + Alternatively, calling:: + + >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") # doctest: +SKIP + + from an interpreter will run all the test routine in 'test_matlib.py'. + """ + if file_to_run is None: + f = sys._getframe(1) + file_to_run = f.f_locals.get('__file__', None) + if file_to_run is None: + raise AssertionError + + if argv is None: + argv = sys.argv + [file_to_run] + else: + argv = argv + [file_to_run] + + nose = import_nose() + from .noseclasses import KnownFailurePlugin + nose.run(argv=argv, addplugins=[KnownFailurePlugin()]) + + +class NoseTester: + """ + Nose test runner. + + This class is made available as numpy.testing.Tester, and a test function + is typically added to a package's __init__.py like so:: + + from numpy.testing import Tester + test = Tester().test + + Calling this test function finds and runs all tests associated with the + package and all its sub-packages. + + Attributes + ---------- + package_path : str + Full path to the package to test. + package_name : str + Name of the package to test. + + Parameters + ---------- + package : module, str or None, optional + The package to test. If a string, this should be the full path to + the package. If None (default), `package` is set to the module from + which `NoseTester` is initialized. + raise_warnings : None, str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of being shown once during the test execution. Valid strings are: + + - "develop" : equals ``(Warning,)`` + - "release" : equals ``()``, don't raise on any warnings. + + Default is "release". + depth : int, optional + If `package` is None, then this can be used to initialize from the + module of the caller of (the caller of (...)) the code that + initializes `NoseTester`. Default of 0 means the module of the + immediate caller; higher values are useful for utility routines that + want to initialize `NoseTester` objects on behalf of other code. + + """ + def __init__(self, package=None, raise_warnings="release", depth=0, + check_fpu_mode=False): + # Back-compat: 'None' used to mean either "release" or "develop" + # depending on whether this was a release or develop version of + # numpy. Those semantics were fine for testing numpy, but not so + # helpful for downstream projects like scipy that use + # numpy.testing. (They want to set this based on whether *they* are a + # release or develop version, not whether numpy is.) So we continue to + # accept 'None' for back-compat, but it's now just an alias for the + # default "release". + if raise_warnings is None: + raise_warnings = "release" + + package_name = None + if package is None: + f = sys._getframe(1 + depth) + package_path = f.f_locals.get('__file__', None) + if package_path is None: + raise AssertionError + package_path = os.path.dirname(package_path) + package_name = f.f_locals.get('__name__', None) + elif isinstance(package, type(os)): + package_path = os.path.dirname(package.__file__) + package_name = getattr(package, '__name__', None) + else: + package_path = str(package) + + self.package_path = package_path + + # Find the package name under test; this name is used to limit coverage + # reporting (if enabled). + if package_name is None: + package_name = get_package_name(package_path) + self.package_name = package_name + + # Set to "release" in constructor in maintenance branches. + self.raise_warnings = raise_warnings + + # Whether to check for FPU mode changes + self.check_fpu_mode = check_fpu_mode + + def _test_argv(self, label, verbose, extra_argv): + ''' Generate argv for nosetest command + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + see ``test`` docstring + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + argv : list + command line arguments that will be passed to nose + ''' + argv = [__file__, self.package_path, '-s'] + if label and label != 'full': + if not isinstance(label, str): + raise TypeError('Selection label should be a string') + if label == 'fast': + label = 'not slow' + argv += ['-A', label] + argv += ['--verbosity', str(verbose)] + + # When installing with setuptools, and also in some other cases, the + # test_*.py files end up marked +x executable. Nose, by default, does + # not run files marked with +x as they might be scripts. However, in + # our case nose only looks for test_*.py files under the package + # directory, which should be safe. + argv += ['--exe'] + + if extra_argv: + argv += extra_argv + return argv + + def _show_system_info(self): + nose = import_nose() + + import numpy + print(f'NumPy version {numpy.__version__}') + relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous + print("NumPy relaxed strides checking option:", relaxed_strides) + npdir = os.path.dirname(numpy.__file__) + print(f'NumPy is installed in {npdir}') + + if 'scipy' in self.package_name: + import scipy + print(f'SciPy version {scipy.__version__}') + spdir = os.path.dirname(scipy.__file__) + print(f'SciPy is installed in {spdir}') + + pyversion = sys.version.replace('\n', '') + print(f'Python version {pyversion}') + print("nose version %d.%d.%d" % nose.__versioninfo__) + + def _get_custom_doctester(self): + """ Return instantiated plugin for doctests + + Allows subclassing of this class to override doctester + + A return value of None means use the nose builtin doctest plugin + """ + from .noseclasses import NumpyDoctest + return NumpyDoctest() + + def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, timer=False): + """ + Run tests for module using nose. + + This method does the heavy lifting for the `test` method. It takes all + the same arguments, for details see `test`. + + See Also + -------- + test + + """ + # fail with nice error message if nose is not present + import_nose() + # compile argv + argv = self._test_argv(label, verbose, extra_argv) + # our way of doing coverage + if coverage: + argv += [f'--cover-package={self.package_name}', '--with-coverage', + '--cover-tests', '--cover-erase'] + + if timer: + if timer is True: + argv += ['--with-timer'] + elif isinstance(timer, int): + argv += ['--with-timer', '--timer-top-n', str(timer)] + + # construct list of plugins + import nose.plugins.builtin + from nose.plugins import EntryPointPluginManager + from .noseclasses import (KnownFailurePlugin, Unplugger, + FPUModeCheckPlugin) + plugins = [KnownFailurePlugin()] + plugins += [p() for p in nose.plugins.builtin.plugins] + if self.check_fpu_mode: + plugins += [FPUModeCheckPlugin()] + argv += ["--with-fpumodecheckplugin"] + try: + # External plugins (like nose-timer) + entrypoint_manager = EntryPointPluginManager() + entrypoint_manager.loadPlugins() + plugins += [p for p in entrypoint_manager.plugins] + except ImportError: + # Relies on pkg_resources, not a hard dependency + pass + + # add doctesting if required + doctest_argv = '--with-doctest' in argv + if doctests == False and doctest_argv: + doctests = True + plug = self._get_custom_doctester() + if plug is None: + # use standard doctesting + if doctests and not doctest_argv: + argv += ['--with-doctest'] + else: # custom doctesting + if doctest_argv: # in fact the unplugger would take care of this + argv.remove('--with-doctest') + plugins += [Unplugger('doctest'), plug] + if doctests: + argv += ['--with-' + plug.name] + return argv, plugins + + def test(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, raise_warnings=None, + timer=False): + """ + Run tests for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the tests to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow tests as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + * attribute_identifier - string passed directly to nosetests as '-A'. + + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + doctests : bool, optional + If True, run doctests in module. Default is False. + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + (This requires the + `coverage module `_). + raise_warnings : None, str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of being shown once during the test execution. Valid strings are: + + * "develop" : equals ``(Warning,)`` + * "release" : equals ``()``, do not raise on any warnings. + timer : bool or int, optional + Timing of individual tests with ``nose-timer`` (which needs to be + installed). If True, time tests and report on all of them. + If an integer (say ``N``), report timing results for ``N`` slowest + tests. + + Returns + ------- + result : object + Returns the result of running the tests as a + ``nose.result.TextTestResult`` object. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for it. + For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + Running unit tests for numpy.lib + ... + Ran 976 tests in 3.933s + + OK + + >>> result.errors #doctest: +SKIP + [] + >>> result.knownfail #doctest: +SKIP + [] + """ + + # cap verbosity at 3 because nose becomes *very* verbose beyond that + verbose = min(verbose, 3) + + from . import utils + utils.verbose = verbose + + argv, plugins = self.prepare_test_args( + label, verbose, extra_argv, doctests, coverage, timer) + + if doctests: + print(f'Running unit tests and doctests for {self.package_name}') + else: + print(f'Running unit tests for {self.package_name}') + + self._show_system_info() + + # reset doctest state on every run + import doctest + doctest.master = None + + if raise_warnings is None: + raise_warnings = self.raise_warnings + + _warn_opts = dict(develop=(Warning,), + release=()) + if isinstance(raise_warnings, str): + raise_warnings = _warn_opts[raise_warnings] + + with suppress_warnings("location") as sup: + # Reset the warning filters to the default state, + # so that running the tests is more repeatable. + warnings.resetwarnings() + # Set all warnings to 'warn', this is because the default 'once' + # has the bad property of possibly shadowing later warnings. + warnings.filterwarnings('always') + # Force the requested warnings to raise + for warningtype in raise_warnings: + warnings.filterwarnings('error', category=warningtype) + # Filter out annoying import messages. + sup.filter(message='Not importing directory') + sup.filter(message="numpy.dtype size changed") + sup.filter(message="numpy.ufunc size changed") + sup.filter(category=np.ModuleDeprecationWarning) + # Filter out boolean '-' deprecation messages. This allows + # older versions of scipy to test without a flood of messages. + sup.filter(message=".*boolean negative.*") + sup.filter(message=".*boolean subtract.*") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + with warnings.catch_warnings(): + warnings.simplefilter("always") + from ...distutils import cpuinfo + sup.filter(category=UserWarning, module=cpuinfo) + # Filter out some deprecation warnings inside nose 1.3.7 when run + # on python 3.5b2. See + # https://github.com/nose-devs/nose/issues/929 + # Note: it is hard to filter based on module for sup (lineno could + # be implemented). + warnings.filterwarnings("ignore", message=".*getargspec.*", + category=DeprecationWarning, + module=r"nose\.") + + from .noseclasses import NumpyTestProgram + + t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) + + return t.result + + def bench(self, label='fast', verbose=1, extra_argv=None): + """ + Run benchmarks for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the benchmarks to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow benchmarks as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + * attribute_identifier - string passed directly to nosetests as '-A'. + + verbose : int, optional + Verbosity value for benchmark outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + success : bool + Returns True if running the benchmarks works, False if an error + occurred. + + Notes + ----- + Benchmarks are like tests, but have names starting with "bench" instead + of "test", and can be found under the "benchmarks" sub-directory of the + module. + + Each NumPy module exposes `bench` in its namespace to run all benchmarks + for it. + + Examples + -------- + >>> success = np.lib.bench() #doctest: +SKIP + Running benchmarks for numpy.lib + ... + using 562341 items: + unique: + 0.11 + unique1d: + 0.11 + ratio: 1.0 + nUnique: 56230 == 56230 + ... + OK + + >>> success #doctest: +SKIP + True + + """ + + print(f'Running benchmarks for {self.package_name}') + self._show_system_info() + + argv = self._test_argv(label, verbose, extra_argv) + argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] + + # import nose or make informative error + nose = import_nose() + + # get plugin to disable doctests + from .noseclasses import Unplugger + add_plugins = [Unplugger('doctest')] + + return nose.run(argv=argv, addplugins=add_plugins) + + +def _numpy_tester(): + if hasattr(np, "__version__") and ".dev0" in np.__version__: + mode = "develop" + else: + mode = "release" + return NoseTester(raise_warnings=mode, depth=1, + check_fpu_mode=True) diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/parameterized.py b/MLPY/Lib/site-packages/numpy/testing/_private/parameterized.py new file mode 100644 index 0000000000000000000000000000000000000000..57413fe553fdd6a6dcaf0b041e9aff68e80eae5a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/_private/parameterized.py @@ -0,0 +1,432 @@ +""" +tl;dr: all code code is licensed under simplified BSD, unless stated otherwise. + +Unless stated otherwise in the source files, all code is copyright 2010 David +Wolever . All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those +of the authors and should not be interpreted as representing official policies, +either expressed or implied, of David Wolever. + +""" +import re +import inspect +import warnings +from functools import wraps +from types import MethodType +from collections import namedtuple + +from unittest import TestCase + +_param = namedtuple("param", "args kwargs") + +class param(_param): + """ Represents a single parameter to a test case. + + For example:: + + >>> p = param("foo", bar=16) + >>> p + param("foo", bar=16) + >>> p.args + ('foo', ) + >>> p.kwargs + {'bar': 16} + + Intended to be used as an argument to ``@parameterized``:: + + @parameterized([ + param("foo", bar=16), + ]) + def test_stuff(foo, bar=16): + pass + """ + + def __new__(cls, *args , **kwargs): + return _param.__new__(cls, args, kwargs) + + @classmethod + def explicit(cls, args=None, kwargs=None): + """ Creates a ``param`` by explicitly specifying ``args`` and + ``kwargs``:: + + >>> param.explicit([1,2,3]) + param(*(1, 2, 3)) + >>> param.explicit(kwargs={"foo": 42}) + param(*(), **{"foo": "42"}) + """ + args = args or () + kwargs = kwargs or {} + return cls(*args, **kwargs) + + @classmethod + def from_decorator(cls, args): + """ Returns an instance of ``param()`` for ``@parameterized`` argument + ``args``:: + + >>> param.from_decorator((42, )) + param(args=(42, ), kwargs={}) + >>> param.from_decorator("foo") + param(args=("foo", ), kwargs={}) + """ + if isinstance(args, param): + return args + elif isinstance(args, (str,)): + args = (args, ) + try: + return cls(*args) + except TypeError as e: + if "after * must be" not in str(e): + raise + raise TypeError( + "Parameters must be tuples, but %r is not (hint: use '(%r, )')" + %(args, args), + ) + + def __repr__(self): + return "param(*%r, **%r)" %self + + +def parameterized_argument_value_pairs(func, p): + """Return tuples of parameterized arguments and their values. + + This is useful if you are writing your own doc_func + function and need to know the values for each parameter name:: + + >>> def func(a, foo=None, bar=42, **kwargs): pass + >>> p = param(1, foo=7, extra=99) + >>> parameterized_argument_value_pairs(func, p) + [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})] + + If the function's first argument is named ``self`` then it will be + ignored:: + + >>> def func(self, a): pass + >>> p = param(1) + >>> parameterized_argument_value_pairs(func, p) + [("a", 1)] + + Additionally, empty ``*args`` or ``**kwargs`` will be ignored:: + + >>> def func(foo, *args): pass + >>> p = param(1) + >>> parameterized_argument_value_pairs(func, p) + [("foo", 1)] + >>> p = param(1, 16) + >>> parameterized_argument_value_pairs(func, p) + [("foo", 1), ("*args", (16, ))] + """ + argspec = inspect.getargspec(func) + arg_offset = 1 if argspec.args[:1] == ["self"] else 0 + + named_args = argspec.args[arg_offset:] + + result = list(zip(named_args, p.args)) + named_args = argspec.args[len(result) + arg_offset:] + varargs = p.args[len(result):] + + result.extend([ + (name, p.kwargs.get(name, default)) + for (name, default) + in zip(named_args, argspec.defaults or []) + ]) + + seen_arg_names = {n for (n, _) in result} + keywords = dict(sorted([ + (name, p.kwargs[name]) + for name in p.kwargs + if name not in seen_arg_names + ])) + + if varargs: + result.append(("*%s" %(argspec.varargs, ), tuple(varargs))) + + if keywords: + result.append(("**%s" %(argspec.keywords, ), keywords)) + + return result + +def short_repr(x, n=64): + """ A shortened repr of ``x`` which is guaranteed to be ``unicode``:: + + >>> short_repr("foo") + u"foo" + >>> short_repr("123456789", n=4) + u"12...89" + """ + + x_repr = repr(x) + if isinstance(x_repr, bytes): + try: + x_repr = str(x_repr, "utf-8") + except UnicodeDecodeError: + x_repr = str(x_repr, "latin1") + if len(x_repr) > n: + x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:] + return x_repr + +def default_doc_func(func, num, p): + if func.__doc__ is None: + return None + + all_args_with_values = parameterized_argument_value_pairs(func, p) + + # Assumes that the function passed is a bound method. + descs = [f'{n}={short_repr(v)}' for n, v in all_args_with_values] + + # The documentation might be a multiline string, so split it + # and just work with the first string, ignoring the period + # at the end if there is one. + first, nl, rest = func.__doc__.lstrip().partition("\n") + suffix = "" + if first.endswith("."): + suffix = "." + first = first[:-1] + args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs)) + return "".join([first.rstrip(), args, suffix, nl, rest]) + +def default_name_func(func, num, p): + base_name = func.__name__ + name_suffix = "_%s" %(num, ) + if len(p.args) > 0 and isinstance(p.args[0], (str,)): + name_suffix += "_" + parameterized.to_safe_name(p.args[0]) + return base_name + name_suffix + + +# force nose for numpy purposes. +_test_runner_override = 'nose' +_test_runner_guess = False +_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"]) +_test_runner_aliases = { + "_pytest": "pytest", +} + +def set_test_runner(name): + global _test_runner_override + if name not in _test_runners: + raise TypeError( + "Invalid test runner: %r (must be one of: %s)" + %(name, ", ".join(_test_runners)), + ) + _test_runner_override = name + +def detect_runner(): + """ Guess which test runner we're using by traversing the stack and looking + for the first matching module. This *should* be reasonably safe, as + it's done during test discovery where the test runner should be the + stack frame immediately outside. """ + if _test_runner_override is not None: + return _test_runner_override + global _test_runner_guess + if _test_runner_guess is False: + stack = inspect.stack() + for record in reversed(stack): + frame = record[0] + module = frame.f_globals.get("__name__").partition(".")[0] + if module in _test_runner_aliases: + module = _test_runner_aliases[module] + if module in _test_runners: + _test_runner_guess = module + break + else: + _test_runner_guess = None + return _test_runner_guess + +class parameterized: + """ Parameterize a test case:: + + class TestInt: + @parameterized([ + ("A", 10), + ("F", 15), + param("10", 42, base=42) + ]) + def test_int(self, input, expected, base=16): + actual = int(input, base=base) + assert_equal(actual, expected) + + @parameterized([ + (2, 3, 5) + (3, 5, 8), + ]) + def test_add(a, b, expected): + assert_equal(a + b, expected) + """ + + def __init__(self, input, doc_func=None): + self.get_input = self.input_as_callable(input) + self.doc_func = doc_func or default_doc_func + + def __call__(self, test_func): + self.assert_not_in_testcase_subclass() + + @wraps(test_func) + def wrapper(test_self=None): + test_cls = test_self and type(test_self) + + original_doc = wrapper.__doc__ + for num, args in enumerate(wrapper.parameterized_input): + p = param.from_decorator(args) + unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p) + try: + wrapper.__doc__ = nose_tuple[0].__doc__ + # Nose uses `getattr(instance, test_func.__name__)` to get + # a method bound to the test instance (as opposed to a + # method bound to the instance of the class created when + # tests were being enumerated). Set a value here to make + # sure nose can get the correct test method. + if test_self is not None: + setattr(test_cls, test_func.__name__, unbound_func) + yield nose_tuple + finally: + if test_self is not None: + delattr(test_cls, test_func.__name__) + wrapper.__doc__ = original_doc + wrapper.parameterized_input = self.get_input() + wrapper.parameterized_func = test_func + test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, ) + return wrapper + + def param_as_nose_tuple(self, test_self, func, num, p): + nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1])) + nose_func.__doc__ = self.doc_func(func, num, p) + # Track the unbound function because we need to setattr the unbound + # function onto the class for nose to work (see comments above), and + # Python 3 doesn't let us pull the function out of a bound method. + unbound_func = nose_func + if test_self is not None: + nose_func = MethodType(nose_func, test_self) + return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, ) + + def assert_not_in_testcase_subclass(self): + parent_classes = self._terrible_magic_get_defining_classes() + if any(issubclass(cls, TestCase) for cls in parent_classes): + raise Exception("Warning: '@parameterized' tests won't work " + "inside subclasses of 'TestCase' - use " + "'@parameterized.expand' instead.") + + def _terrible_magic_get_defining_classes(self): + """ Returns the list of parent classes of the class currently being defined. + Will likely only work if called from the ``parameterized`` decorator. + This function is entirely @brandon_rhodes's fault, as he suggested + the implementation: http://stackoverflow.com/a/8793684/71522 + """ + stack = inspect.stack() + if len(stack) <= 4: + return [] + frame = stack[4] + code_context = frame[4] and frame[4][0].strip() + if not (code_context and code_context.startswith("class ")): + return [] + _, _, parents = code_context.partition("(") + parents, _, _ = parents.partition(")") + return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals) + + @classmethod + def input_as_callable(cls, input): + if callable(input): + return lambda: cls.check_input_values(input()) + input_values = cls.check_input_values(input) + return lambda: input_values + + @classmethod + def check_input_values(cls, input_values): + # Explicitly convert non-list inputs to a list so that: + # 1. A helpful exception will be raised if they aren't iterable, and + # 2. Generators are unwrapped exactly once (otherwise `nosetests + # --processes=n` has issues; see: + # https://github.com/wolever/nose-parameterized/pull/31) + if not isinstance(input_values, list): + input_values = list(input_values) + return [ param.from_decorator(p) for p in input_values ] + + @classmethod + def expand(cls, input, name_func=None, doc_func=None, **legacy): + """ A "brute force" method of parameterizing test cases. Creates new + test cases and injects them into the namespace that the wrapped + function is being defined in. Useful for parameterizing tests in + subclasses of 'UnitTest', where Nose test generators don't work. + + >>> @parameterized.expand([("foo", 1, 2)]) + ... def test_add1(name, input, expected): + ... actual = add1(input) + ... assert_equal(actual, expected) + ... + >>> locals() + ... 'test_add1_foo_0': ... + >>> + """ + + if "testcase_func_name" in legacy: + warnings.warn("testcase_func_name= is deprecated; use name_func=", + DeprecationWarning, stacklevel=2) + if not name_func: + name_func = legacy["testcase_func_name"] + + if "testcase_func_doc" in legacy: + warnings.warn("testcase_func_doc= is deprecated; use doc_func=", + DeprecationWarning, stacklevel=2) + if not doc_func: + doc_func = legacy["testcase_func_doc"] + + doc_func = doc_func or default_doc_func + name_func = name_func or default_name_func + + def parameterized_expand_wrapper(f, instance=None): + stack = inspect.stack() + frame = stack[1] + frame_locals = frame[0].f_locals + + parameters = cls.input_as_callable(input)() + for num, p in enumerate(parameters): + name = name_func(f, num, p) + frame_locals[name] = cls.param_as_standalone_func(p, f, name) + frame_locals[name].__doc__ = doc_func(f, num, p) + + f.__test__ = False + return parameterized_expand_wrapper + + @classmethod + def param_as_standalone_func(cls, p, func, name): + @wraps(func) + def standalone_func(*a): + return func(*(a + p.args), **p.kwargs) + standalone_func.__name__ = name + + # place_as is used by py.test to determine what source file should be + # used for this test. + standalone_func.place_as = func + + # Remove __wrapped__ because py.test will try to look at __wrapped__ + # to determine which parameters should be used with this test case, + # and obviously we don't need it to do any parameterization. + try: + del standalone_func.__wrapped__ + except AttributeError: + pass + return standalone_func + + @classmethod + def to_safe_name(cls, s): + return str(re.sub("[^a-zA-Z0-9_]+", "_", s)) diff --git a/MLPY/Lib/site-packages/numpy/testing/_private/utils.py b/MLPY/Lib/site-packages/numpy/testing/_private/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a4bfd48b1d5f600cf5562b7f1205b6c27c5abd17 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/_private/utils.py @@ -0,0 +1,2521 @@ +""" +Utility function to facilitate testing. + +""" +import os +import sys +import platform +import re +import gc +import operator +import warnings +from functools import partial, wraps +import shutil +import contextlib +from tempfile import mkdtemp, mkstemp +from unittest.case import SkipTest +from warnings import WarningMessage +import pprint + +import numpy as np +from numpy.core import( + intp, float32, empty, arange, array_repr, ndarray, isnat, array) +import numpy.linalg.lapack_lite + +from io import StringIO + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'raises', 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', + '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles', + 'break_cycles', 'HAS_LAPACK64' + ] + + +class KnownFailureException(Exception): + '''Raise this exception to mark a test as a known failing test.''' + pass + + +KnownFailureTest = KnownFailureException # backwards compat +verbose = 0 + +IS_PYPY = platform.python_implementation() == 'PyPy' +HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None +HAS_LAPACK64 = numpy.linalg.lapack_lite._ilp64 + + +def import_nose(): + """ Import nose only when needed. + """ + nose_is_good = True + minimum_nose_version = (1, 0, 0) + try: + import nose + except ImportError: + nose_is_good = False + else: + if nose.__versioninfo__ < minimum_nose_version: + nose_is_good = False + + if not nose_is_good: + msg = ('Need nose >= %d.%d.%d for tests - see ' + 'https://nose.readthedocs.io' % + minimum_nose_version) + raise ImportError(msg) + + return nose + + +def assert_(val, msg=''): + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + __tracebackhide__ = True # Hide traceback for py.test + if not val: + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + + +def gisnan(x): + """like isnan, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isnan and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isnan + st = isnan(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isnan not supported for this type") + return st + + +def gisfinite(x): + """like isfinite, but always raise an error if type not supported instead + of returning a TypeError object. + + Notes + ----- + isfinite and other ufunc sometimes return a NotImplementedType object + instead of raising any exception. This function is a wrapper to make sure + an exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isfinite, errstate + with errstate(invalid='ignore'): + st = isfinite(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isfinite not supported for this type") + return st + + +def gisinf(x): + """like isinf, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isinf and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isinf, errstate + with errstate(invalid='ignore'): + st = isinf(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isinf not supported for this type") + return st + + +if os.name == 'nt': + # Code "stolen" from enthought/debug/memusage.py + def GetPerformanceAttributes(object, counter, instance=None, + inum=-1, format=None, machine=None): + # NOTE: Many counters require 2 samples to give accurate results, + # including "% Processor Time" (as by definition, at any instant, a + # thread's CPU usage is either 0 or 100). To read counters like this, + # you should copy this function, but keep the counter open, and call + # CollectQueryData() each time you need to know. + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link) + # My older explanation for this was that the "AddCounter" process + # forced the CPU to 100%, but the above makes more sense :) + import win32pdh + if format is None: + format = win32pdh.PDH_FMT_LONG + path = win32pdh.MakeCounterPath( (machine, object, instance, None, + inum, counter)) + hq = win32pdh.OpenQuery() + try: + hc = win32pdh.AddCounter(hq, path) + try: + win32pdh.CollectQueryData(hq) + type, val = win32pdh.GetFormattedCounterValue(hc, format) + return val + finally: + win32pdh.RemoveCounter(hc) + finally: + win32pdh.CloseQuery(hq) + + def memusage(processName="python", instance=0): + # from win32pdhutil, part of the win32all package + import win32pdh + return GetPerformanceAttributes("Process", "Virtual Bytes", + processName, instance, + win32pdh.PDH_FMT_LONG, None) +elif sys.platform[:5] == 'linux': + + def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'): + """ + Return virtual memory size in bytes of the running python. + + """ + try: + with open(_proc_pid_stat, 'r') as f: + l = f.readline().split(' ') + return int(l[22]) + except Exception: + return +else: + def memusage(): + """ + Return memory usage of running python. [Not implemented] + + """ + raise NotImplementedError + + +if sys.platform[:5] == 'linux': + def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + try: + with open(_proc_pid_stat, 'r') as f: + l = f.readline().split(' ') + return int(l[13]) + except Exception: + return int(100*(time.time()-_load_time[0])) +else: + # os.getpid is not in all platforms available. + # Using time is safe but inaccurate, especially when process + # was suspended or sleeping. + def jiffies(_load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + return int(100*(time.time()-_load_time[0])) + + +def build_err_msg(arrays, err_msg, header='Items are not equal:', + verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): + msg = ['\n' + header] + if err_msg: + if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + msg = [msg[0] + ' ' + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + r_func = partial(array_repr, precision=precision) + else: + r_func = repr + + try: + r = r_func(a) + except Exception as exc: + r = f'[repr failed for <{type(a).__name__}>: {exc}]' + if r.count('\n') > 3: + r = '\n'.join(r.splitlines()[:3]) + r += '...' + msg.append(f' {names[i]}: {r}') + return '\n'.join(msg) + + +def assert_equal(actual, desired, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + When one of `actual` and `desired` is a scalar and the other is array_like, + the function checks that each element of the array_like object is equal to + the scalar. + + This function handles NaN comparisons as if NaN was a "normal" number. + That is, AssertionError is not raised if both objects have NaNs in the same + positions. This is in contrast to the IEEE standard on NaNs, which says + that NaN compared to anything must return False. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal. + + Examples + -------- + >>> np.testing.assert_equal([4,5], [4,6]) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + The following comparison does not raise an exception. There are NaNs + in the inputs, but they are in the same positions. + + >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', + verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', + verbose) + return + from numpy.core import ndarray, isscalar, signbit + from numpy.lib import iscomplexobj, real, imag + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except (ValueError, TypeError): + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + try: + isdesnat = isnat(desired) + isactnat = isnat(actual) + dtypes_match = (np.asarray(desired).dtype.type == + np.asarray(actual).dtype.type) + if isdesnat and isactnat: + # If both are NaT (and have the same dtype -- datetime or + # timedelta) they are considered equal. + if dtypes_match: + return + else: + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + # Inf/nan/negative zero handling + try: + isdesnan = gisnan(desired) + isactnan = gisnan(actual) + if isdesnan and isactnan: + return # both nan, so equal + + # handle signed zero specially for floats + array_actual = np.asarray(actual) + array_desired = np.asarray(desired) + if (array_actual.dtype.char in 'Mm' or + array_desired.dtype.char in 'Mm'): + # version 1.18 + # until this version, gisnan failed for datetime64 and timedelta64. + # Now it succeeds but comparison to scalar with a different type + # emits a DeprecationWarning. + # Avoid that by skipping the next check + raise NotImplementedError('cannot compare to a scalar ' + 'with a different type') + + if desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + # Explicitly use __eq__ for comparison, gh-2552 + if not (desired == actual): + raise AssertionError(msg) + + except (DeprecationWarning, FutureWarning) as e: + # this handles the case when the two types are not even comparable + if 'elementwise == comparison' in e.args[0]: + raise AssertionError(msg) + else: + raise + + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + __tracebackhide__ = True # Hide traceback for py.test + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual, msg) + msg.write('DESIRED: \n') + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + + +def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies that the elements of `actual` and `desired` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation in `assert_array_almost_equal` did up to rounding + vagaries. An exception is raised at conflicting values. For ndarrays this + delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> from numpy.testing import assert_almost_equal + >>> assert_almost_equal(2.3333333333333, 2.33333334) + >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 10 decimals + ACTUAL: 2.3333333333333 + DESIRED: 2.33333334 + + >>> assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 9 decimals + + Mismatched elements: 1 / 2 (50%) + Max absolute difference: 6.66669964e-09 + Max relative difference: 2.85715698e-09 + x: array([1. , 2.333333333]) + y: array([1. , 2.33333334]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import ndarray + from numpy.lib import iscomplexobj, real, imag + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = ('Arrays are not almost equal to %d decimals' % decimal) + return build_err_msg([actual, desired], err_msg, verbose=verbose, + header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) \ + or isinstance(desired, (ndarray, tuple, list)): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(_build_err_msg()) + else: + if not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if abs(desired - actual) >= 1.5 * 10.0**(-decimal): + raise AssertionError(_build_err_msg()) + + +def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, + ... significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, + ... significant=8) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-21 + DESIRED: 1.2345672e-21 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired == actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + with np.errstate(invalid='ignore'): + scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired/scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual/scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg( + [actual, desired], err_msg, + header='Items are not equal to %d significant digits:' % significant, + verbose=verbose) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): + raise AssertionError(msg) + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + precision=6, equal_nan=True, equal_inf=True): + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_ + + x = np.asanyarray(x) + y = np.asanyarray(y) + + # original array for output formatting + ox, oy = x, y + + def isnumber(x): + return x.dtype.char in '?bhilqpBHILQPefdgFDG' + + def istime(x): + return x.dtype.char in "Mm" + + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + __tracebackhide__ = True # Hide traceback for py.test + + x_id = func(x) + y_id = func(y) + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on `masked` array scalars can return masked arrays, so we + # use != True + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to bool_() and + # use isinstance(..., bool) checks + # (3) subclasses with bare-bones __array_function__ implementations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting such subclasses, but it's nice to + # support them if possible. + if bool_(x_id == y_id).all() != True: + msg = build_err_msg([x, y], + err_msg + '\nx and y %s location mismatch:' + % (hasval), verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + # If there is a scalar, then here we know the array has the same + # flag as it everywhere, so we should return the scalar flag. + if isinstance(x_id, bool) or x_id.ndim == 0: + return bool_(x_id) + elif isinstance(y_id, bool) or y_id.ndim == 0: + return bool_(y_id) + else: + return y_id + + try: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + f'\n(shapes {x.shape}, {y.shape} mismatch)', + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + + flagged = bool_(False) + if isnumber(x) and isnumber(y): + if equal_nan: + flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') + + if equal_inf: + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == +inf, + hasval='+inf') + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == -inf, + hasval='-inf') + + elif istime(x) and istime(y): + # If one is datetime64 and the other timedelta64 there is no point + if equal_nan and x.dtype.type == y.dtype.type: + flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT") + + if flagged.ndim > 0: + x, y = x[~flagged], y[~flagged] + # Only do the comparison if actual values are left + if x.size == 0: + return + elif flagged: + # no sense doing comparison if everything is flagged. + return + + val = comparison(x, y) + + if isinstance(val, bool): + cond = val + reduced = array([val]) + else: + reduced = val.ravel() + cond = reduced.all() + + # The below comparison is a hack to ensure that fully masked + # results, for which val.ravel().all() returns np.ma.masked, + # do not trigger a failure (np.ma.masked != True evaluates as + # np.ma.masked, which is falsy). + if cond != True: + n_mismatch = reduced.size - reduced.sum(dtype=intp) + n_elements = flagged.size if flagged.ndim != 0 else reduced.size + percent_mismatch = 100 * n_mismatch / n_elements + remarks = [ + 'Mismatched elements: {} / {} ({:.3g}%)'.format( + n_mismatch, n_elements, percent_mismatch)] + + with errstate(invalid='ignore', divide='ignore'): + # ignore errors for non-numeric types + with contextlib.suppress(TypeError): + error = abs(x - y) + max_abs_error = max(error) + if getattr(error, 'dtype', object_) == object_: + remarks.append('Max absolute difference: ' + + str(max_abs_error)) + else: + remarks.append('Max absolute difference: ' + + array2string(max_abs_error)) + + # note: this definition of relative error matches that one + # used by assert_allclose (found in np.isclose) + # Filter values where the divisor would be zero + nonzero = bool_(y != 0) + if all(~nonzero): + max_rel_error = array(inf) + else: + max_rel_error = max(error[nonzero] / abs(y[nonzero])) + if getattr(error, 'dtype', object_) == object_: + remarks.append('Max relative difference: ' + + str(max_rel_error)) + else: + remarks.append('Max relative difference: ' + + array2string(max_rel_error)) + + err_msg += '\n' + '\n'.join(remarks) + msg = build_err_msg([ox, oy], err_msg, + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + except ValueError: + import traceback + efmt = traceback.format_exc() + header = f'error during assertion:\n\n{efmt}\n\n{header}' + + msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise ValueError(msg) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal (but see the Notes for the special + handling of a scalar). An exception is raised at shape mismatch or + conflicting values. In contrast to the standard usage in numpy, NaNs + are compared like numbers, no assertion is raised if both objects have + NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Notes + ----- + When one of `x` and `y` is a scalar and the other is array_like, the + function checks that each element of the array_like object is equal to + the scalar. + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Assert fails with numerical imprecision with floats: + + >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference: 4.4408921e-16 + Max relative difference: 1.41357986e-16 + x: array([1. , 3.141593, nan]) + y: array([1. , 3.141593, nan]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + As mentioned in the Notes section, `assert_array_equal` has special + handling for scalars. Here the test checks that each value in `x` is 3: + + >>> x = np.full((2, 5), fill_value=3) + >>> np.testing.assert_array_equal(x, 3) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, + verbose=verbose, header='Arrays are not equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and that the elements of ``actual`` and + ``desired`` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation did up to rounding vagaries. An exception is raised + at shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + ... [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference: 6.e-05 + Max relative difference: 2.57136612e-05 + x: array([1. , 2.33333, nan]) + y: array([1. , 2.33339, nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + x and y nan location mismatch: + x: array([1. , 2.33333, nan]) + y: array([1. , 2.33333, 5. ]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import number, float_, result_type, array + from numpy.core.numerictypes import issubdtype + from numpy.core.fromnumeric import any as npany + + def compare(x, y): + try: + if npany(gisinf(x)) or npany( gisinf(y)): + xinfid = gisinf(x) + yinfid = gisinf(y) + if not (xinfid == yinfid).all(): + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except (TypeError, NotImplementedError): + pass + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.) + y = np.asanyarray(y, dtype) + z = abs(x - y) + + if not issubdtype(z.dtype, number): + z = z.astype(float_) # handle object arrays + + return z < 1.5 * 10.0**(-decimal) + + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header=('Arrays are not almost equal to %d decimals' % decimal), + precision=decimal) + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects, check that the shape is equal and all + elements of the first object are strictly smaller than those of the + second object. An exception is raised at shape mismatch or incorrectly + ordered values. Shape mismatch does not raise if an object has zero + dimension. In contrast to the standard usage in numpy, NaNs are + compared, no assertion is raised if both objects have NaNs in the same + positions. + + + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + + + Examples + -------- + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference: 1. + Max relative difference: 0.5 + x: array([ 1., 1., nan]) + y: array([ 1., 2., nan]) + + >>> np.testing.assert_array_less([1.0, 4.0], 3) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + + Mismatched elements: 1 / 2 (50%) + Max absolute difference: 2. + Max relative difference: 0.66666667 + x: array([1., 4.]) + y: array(3) + + >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + + (shapes (3,), (1,) mismatch) + x: array([1., 2., 3.]) + y: array([4]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, + verbose=verbose, + header='Arrays are not less-ordered', + equal_inf=False) + + +def runstring(astr, dict): + exec(astr, dict) + + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "", line 1, in + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + __tracebackhide__ = True # Hide traceback for py.test + import difflib + + if not isinstance(actual, str): + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if desired == actual: + return + + diff = list(difflib.Differ().compare(actual.splitlines(True), + desired.splitlines(True))) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(' '): + continue + if d1.startswith('- '): + l = [d1] + d2 = diff.pop(0) + if d2.startswith('? '): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith('+ '): + raise AssertionError(repr(d2)) + l.append(d2) + if diff: + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) + if d2[2:] == d1[2:]: + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}" + if actual != desired: + raise AssertionError(msg) + + +def rundocs(filename=None, raise_on_error=True): + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for `numpy.lib`: + + >>> np.lib.test(doctests=True) # doctest: +SKIP + """ + from numpy.compat import npy_load_module + import doctest + if filename is None: + f = sys._getframe(1) + filename = f.f_globals['__file__'] + name = os.path.splitext(os.path.basename(filename))[0] + m = npy_load_module(name, filename) + + tests = doctest.DocTestFinder().find(m) + runner = doctest.DocTestRunner(verbose=False) + + msg = [] + if raise_on_error: + out = lambda s: msg.append(s) + else: + out = None + + for test in tests: + runner.run(test, out=out) + + if runner.failures > 0 and raise_on_error: + raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + + +def raises(*args): + """Decorator to check for raised exceptions. + + The decorated test function must raise one of the passed exceptions to + pass. If you want to test many assertions about exceptions in a single + test, you may want to use `assert_raises` instead. + + .. warning:: + This decorator is nose specific, do not use it if you are using a + different test framework. + + Parameters + ---------- + args : exceptions + The test passes if any of the passed exceptions is raised. + + Raises + ------ + AssertionError + + Examples + -------- + + Usage:: + + @raises(TypeError, ValueError) + def test_raises_type_error(): + raise TypeError("This test passes") + + @raises(Exception) + def test_that_fails_by_passing(): + pass + + """ + nose = import_nose() + return nose.tools.raises(*args) + +# +# assert_raises and assert_raises_regex are taken from unittest. +# +import unittest + + +class _Dummy(unittest.TestCase): + def nop(self): + pass + +_d = _Dummy('nop') + +def assert_raises(*args, **kwargs): + """ + assert_raises(exception_class, callable, *args, **kwargs) + assert_raises(exception_class) + + Fail unless an exception of class exception_class is thrown + by callable when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + Alternatively, `assert_raises` can be used as a context manager: + + >>> from numpy.testing import assert_raises + >>> with assert_raises(ZeroDivisionError): + ... 1 / 0 + + is equivalent to + + >>> def div(x, y): + ... return x / y + >>> assert_raises(ZeroDivisionError, div, 1, 0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaises(*args,**kwargs) + + +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): + """ + assert_raises_regex(exception_class, expected_regexp, callable, *args, + **kwargs) + assert_raises_regex(exception_class, expected_regexp) + + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Alternatively, can be used as a context manager like `assert_raises`. + + Name of this function adheres to Python 3.2+ reference, but should work in + all versions down to 2.6. + + Notes + ----- + .. versionadded:: 1.9.0 + + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith('_'): + setattr(cls, funcname, decorator(function)) + return + + +def measure(code_str, times=1, label=None): + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> times = 10 + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times) + >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP + Time for a single execution : 0.005 s + + """ + frame = sys._getframe(1) + locs, globs = frame.f_locals, frame.f_globals + + code = compile(code_str, f'Test name: {label} ', 'exec') + i = 0 + elapsed = jiffies() + while i < times: + i += 1 + exec(code, globs, locs) + elapsed = jiffies() - elapsed + return 0.01*elapsed + + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + if not HAS_REFCOUNT: + return True + + import gc + import numpy as np + + b = np.arange(100*100).reshape(100, 100) + c = b + i = 1 + + gc.disable() + try: + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + assert_(sys.getrefcount(i) >= rc) + finally: + gc.enable() + del d # for pyflakes + + +def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, + err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note + that ``allclose`` has different default values). It compares the difference + between `actual` and `desired` to ``atol + rtol * abs(desired)``. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + equal_nan : bool, optional. + If True, NaNs will compare equal. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + def compare(x, y): + return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, + equal_nan=equal_nan) + + actual, desired = np.asanyarray(actual), np.asanyarray(desired) + header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}' + assert_array_compare(compare, actual, desired, err_msg=str(err_msg), + verbose=verbose, header=header, equal_nan=equal_nan) + + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) + Traceback (most recent call last): + ... + AssertionError: X and Y are not equal to 1 ULP (max is 2) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x-y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = "X and Y are not equal to %d ULP" % nulp + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) + raise AssertionError(msg) + + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError("Arrays are not almost equal up to %g " + "ULP (max difference is %g ULP)" % + (maxulp, np.max(ret))) + return ret + + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + dtype : dtype, optional + Data-type to convert `x` and `y` to if given. Default is None. + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) + 1.0 + """ + import numpy as np + if dtype: + x = np.asarray(x, dtype=dtype) + y = np.asarray(y, dtype=dtype) + else: + x = np.asarray(x) + y = np.asarray(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array([x], dtype=t) + y = np.array([y], dtype=t) + + x[np.isnan(x)] = np.nan + y[np.isnan(y)] = np.nan + + if not x.shape == y.shape: + raise ValueError("x and y do not have the same shape: %s - %s" % + (x.shape, y.shape)) + + def _diff(rx, ry, vdt): + diff = np.asarray(rx-ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx < 0] + else: + if rx < 0: + rx = comp - rx + + return rx + + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation + of x.""" + import numpy as np + if x.dtype == np.float16: + return _integer_repr(x, np.int16, np.int16(-2**15)) + elif x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-2**31)) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-2**63)) + else: + raise ValueError(f'Unsupported dtype {x.dtype}') + + +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with suppress_warnings() as sup: + l = sup.record(warning_class) + yield + if not len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError("No warning raised" + name_str) + + +def assert_warns(warning_class, *args, **kwargs): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught. + + If called with all arguments other than the warning class omitted, may be + used as a context manager: + + with assert_warns(SomeWarning): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable, optional + Callable to test + *args : Arguments + Arguments for `func`. + **kwargs : Kwargs + Keyword arguments for `func`. + + Returns + ------- + The value returned by `func`. + + Examples + -------- + >>> import warnings + >>> def deprecated_func(num): + ... warnings.warn("Please upgrade", DeprecationWarning) + ... return num*num + >>> with np.testing.assert_warns(DeprecationWarning): + ... assert deprecated_func(4) == 16 + >>> # or passing a func + >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) + >>> assert ret == 16 + """ + if not args: + return _assert_warns_context(warning_class) + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + yield + if len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError(f'Got warnings{name_str}: {l}') + + +def assert_no_warnings(*args, **kwargs): + """ + Fail if the given callable produces any warnings. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_warnings(): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) + + +def _gen_alignment_data(dtype=float32, type='binary', max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' + bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == 'unary': + inp = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') + d = inp() + yield d, d, ufmt % (o, o, s, dtype, 'in place') + yield out[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'out of place') + yield inp()[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'aliased') + yield inp()[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'aliased') + if type == 'binary': + inp1 = lambda: arange(s, dtype=dtype)[o:] + inp2 = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'out of place') + d = inp1() + yield d, d, inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place1') + d = inp2() + yield d, inp1(), d, bfmt % \ + (o, o, o, s, dtype, 'in place2') + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'out of place') + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'aliased') + + +class IgnoreException(Exception): + "Ignoring this exception due to disabled feature" + pass + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) + + +class clear_and_catch_warnings(warnings.catch_warnings): + """ Context manager that resets warning registry for catching warnings + + Warnings can be slippery, because, whenever a warning is triggered, Python + adds a ``__warningregistry__`` member to the *calling* module. This makes + it impossible to retrigger the warning in this module, whatever you put in + the warnings filters. This context manager accepts a sequence of `modules` + as a keyword argument to its constructor and: + + * stores and removes any ``__warningregistry__`` entries in given `modules` + on entry; + * resets ``__warningregistry__`` to its previous state on exit. + + This makes it possible to trigger any warning afresh inside the context + manager without disturbing the state of warnings outside. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + Parameters + ---------- + record : bool, optional + Specifies whether warnings should be captured by a custom + implementation of ``warnings.showwarning()`` and be appended to a list + returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + modules : sequence, optional + Sequence of modules for which to reset warnings registry on entry and + restore on exit. To work correctly, all 'ignore' filters should + filter by one of these modules. + + Examples + -------- + >>> import warnings + >>> with np.testing.clear_and_catch_warnings( + ... modules=[np.core.fromnumeric]): + ... warnings.simplefilter('always') + ... warnings.filterwarnings('ignore', module='np.core.fromnumeric') + ... # do something that raises a warning but ignore those in + ... # np.core.fromnumeric + """ + class_modules = () + + def __init__(self, record=False, modules=()): + self.modules = set(modules).union(self.class_modules) + self._warnreg_copies = {} + super().__init__(record=record) + + def __enter__(self): + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod_reg = mod.__warningregistry__ + self._warnreg_copies[mod] = mod_reg.copy() + mod_reg.clear() + return super().__enter__() + + def __exit__(self, *exc_info): + super().__exit__(*exc_info) + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod.__warningregistry__.clear() + if mod in self._warnreg_copies: + mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +class suppress_warnings: + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + https://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + + With a context manager:: + + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Some text") + sup.filter(module=np.ma.core) + log = sup.record(FutureWarning, "Does this occur?") + command_giving_warnings() + # The FutureWarning was given once, the filtered warnings were + # ignored. All other warnings abide outside settings (may be + # printed/error) + assert_(len(log) == 1) + assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator:: + + sup = np.testing.suppress_warnings() + sup.filter(module=np.ma.core) # module must match exactly + @sup + def some_function(): + # do something which causes a warning in np.ma.core + pass + """ + def __init__(self, forwarding_rule="always"): + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings( + "always", category=category, message=message) + else: + module_regex = module.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=category, message=message, + module=module_regex) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, + record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter(category=category, message=message, module=module, + record=True) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings( + "always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=cat, message=mess, + module=module_regex) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning(self, message, category, filename, lineno, + *args, use_warnmsg=None, **kwargs): + for cat, _, pattern, mod, rec in ( + self._suppressions + self._tmp_suppressions)[::-1]: + if (issubclass(category, cat) and + pattern.match(message.args[0]) is not None): + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, + *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, + **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func + + +@contextlib.contextmanager +def _assert_no_gc_cycles_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + + # not meaningful to test if there is no refcounting + if not HAS_REFCOUNT: + yield + return + + assert_(gc.isenabled()) + gc.disable() + gc_debug = gc.get_debug() + try: + for i in range(100): + if gc.collect() == 0: + break + else: + raise RuntimeError( + "Unable to fully collect garbage - perhaps a __del__ method " + "is creating more reference cycles?") + + gc.set_debug(gc.DEBUG_SAVEALL) + yield + # gc.collect returns the number of unreachable objects in cycles that + # were found -- we are checking that no cycles were created in the context + n_objects_in_cycles = gc.collect() + objects_in_cycles = gc.garbage[:] + finally: + del gc.garbage[:] + gc.set_debug(gc_debug) + gc.enable() + + if n_objects_in_cycles: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError( + "Reference cycles were found{}: {} objects were collected, " + "of which {} are shown below:{}" + .format( + name_str, + n_objects_in_cycles, + len(objects_in_cycles), + ''.join( + "\n {} object with id={}:\n {}".format( + type(o).__name__, + id(o), + pprint.pformat(o).replace('\n', '\n ') + ) for o in objects_in_cycles + ) + ) + ) + + +def assert_no_gc_cycles(*args, **kwargs): + """ + Fail if the given callable produces any reference cycles. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_gc_cycles(): + do_something() + + .. versionadded:: 1.15.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + Nothing. The result is deliberately discarded to ensure that all cycles + are found. + + """ + if not args: + return _assert_no_gc_cycles_context() + + func = args[0] + args = args[1:] + with _assert_no_gc_cycles_context(name=func.__name__): + func(*args, **kwargs) + +def break_cycles(): + """ + Break reference cycles by calling gc.collect + Objects can call other objects' methods (for instance, another object's + __del__) inside their own __del__. On PyPy, the interpreter only runs + between calls to gc.collect, so multiple calls are needed to completely + release all cycles. + """ + + gc.collect() + if IS_PYPY: + # interpreter runs now, to call deleted objects' __del__ methods + gc.collect() + # two more, just to make sure + gc.collect() + gc.collect() + + +def requires_memory(free_bytes): + """Decorator to skip a test if not enough memory is available""" + import pytest + + def decorator(func): + @wraps(func) + def wrapper(*a, **kw): + msg = check_free_memory(free_bytes) + if msg is not None: + pytest.skip(msg) + + try: + return func(*a, **kw) + except MemoryError: + # Probably ran out of memory regardless: don't regard as failure + pytest.xfail("MemoryError raised") + + return wrapper + + return decorator + + +def check_free_memory(free_bytes): + """ + Check whether `free_bytes` amount of memory is currently free. + Returns: None if enough memory available, otherwise error message + """ + env_var = 'NPY_AVAILABLE_MEM' + env_value = os.environ.get(env_var) + if env_value is not None: + try: + mem_free = _parse_size(env_value) + except ValueError as exc: + raise ValueError(f'Invalid environment variable {env_var}: {exc}') + + msg = (f'{free_bytes/1e9} GB memory required, but environment variable ' + f'NPY_AVAILABLE_MEM={env_value} set') + else: + mem_free = _get_mem_available() + + if mem_free is None: + msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM " + "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run " + "the test.") + mem_free = -1 + else: + msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available' + + return msg if mem_free < free_bytes else None + + +def _parse_size(size_str): + """Convert memory size strings ('12 GB' etc.) to float""" + suffixes = {'': 1, 'b': 1, + 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4, + 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4, + 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4} + + size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format( + '|'.join(suffixes.keys())), re.I) + + m = size_re.match(size_str.lower()) + if not m or m.group(2) not in suffixes: + raise ValueError(f'value {size_str!r} not a valid size') + return int(float(m.group(1)) * suffixes[m.group(2)]) + + +def _get_mem_available(): + """Return available memory in bytes, or None if unknown.""" + try: + import psutil + return psutil.virtual_memory().available + except (ImportError, AttributeError): + pass + + if sys.platform.startswith('linux'): + info = {} + with open('/proc/meminfo', 'r') as f: + for line in f: + p = line.split() + info[p[0].strip(':').lower()] = int(p[1]) * 1024 + + if 'memavailable' in info: + # Linux >= 3.14 + return info['memavailable'] + else: + return info['memfree'] + info['cached'] + + return None + + +def _no_tracing(func): + """ + Decorator to temporarily turn off tracing for the duration of a test. + Needed in tests that check refcounting, otherwise the tracing itself + influences the refcounts + """ + if not hasattr(sys, 'gettrace'): + return func + else: + @wraps(func) + def wrapper(*args, **kwargs): + original_trace = sys.gettrace() + try: + sys.settrace(None) + return func(*args, **kwargs) + finally: + sys.settrace(original_trace) + return wrapper + diff --git a/MLPY/Lib/site-packages/numpy/testing/print_coercion_tables.py b/MLPY/Lib/site-packages/numpy/testing/print_coercion_tables.py new file mode 100644 index 0000000000000000000000000000000000000000..a7946c200c7540b5055565817bd26e00843431f8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/print_coercion_tables.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python3 +"""Prints type-coercion tables for the built-in NumPy types + +""" +import numpy as np +from collections import namedtuple + +# Generic object that can be added, but doesn't do anything else +class GenericObject: + def __init__(self, v): + self.v = v + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + dtype = np.dtype('O') + +def print_cancast_table(ntypes): + print('X', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + print(row, end=' ') + for col in ntypes: + if np.can_cast(row, col, "equiv"): + cast = "#" + elif np.can_cast(row, col, "safe"): + cast = "=" + elif np.can_cast(row, col, "same_kind"): + cast = "~" + elif np.can_cast(row, col, "unsafe"): + cast = "." + else: + cast = " " + print(cast, end=' ') + print() + +def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): + print('+', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + if row == 'O': + rowtype = GenericObject + else: + rowtype = np.obj2sctype(row) + + print(row, end=' ') + for col in ntypes: + if col == 'O': + coltype = GenericObject + else: + coltype = np.obj2sctype(col) + try: + if firstarray: + rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) + else: + rowvalue = rowtype(inputfirstvalue) + colvalue = coltype(inputsecondvalue) + if use_promote_types: + char = np.promote_types(rowvalue.dtype, colvalue.dtype).char + else: + value = np.add(rowvalue, colvalue) + if isinstance(value, np.ndarray): + char = value.dtype.char + else: + char = np.dtype(type(value)).char + except ValueError: + char = '!' + except OverflowError: + char = '@' + except TypeError: + char = '#' + print(char, end=' ') + print() + + +def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): + """Prints new casts, the values given are default "can-cast" values, not + actual ones. + """ + from numpy.core._multiarray_tests import get_all_cast_information + + cast_table = { + 0 : "#", # No cast (classify as equivalent here) + 1 : "#", # equivalent casting + 2 : "=", # safe casting + 3 : "~", # same-kind casting + 4 : ".", # unsafe casting + } + flags_table = { + 0 : "▗", 7: "█", + 1: "▚", 2: "▐", 4: "▄", + 3: "▜", 5: "▙", + 6: "▟", + } + + cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"]) + no_cast_info = cast_info(" ", " ", " ") + + casts = get_all_cast_information() + table = {} + dtypes = set() + for cast in casts: + dtypes.add(cast["from"]) + dtypes.add(cast["to"]) + + if cast["from"] not in table: + table[cast["from"]] = {} + to_dict = table[cast["from"]] + + can_cast = cast_table[cast["casting"]] + legacy = "L" if cast["legacy"] else "." + flags = 0 + if cast["requires_pyapi"]: + flags |= 1 + if cast["supports_unaligned"]: + flags |= 2 + if cast["no_floatingpoint_errors"]: + flags |= 4 + + flags = flags_table[flags] + to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags) + + # The np.dtype(x.type) is a bit strange, because dtype classes do + # not expose much yet. + types = np.typecodes["All"] + def sorter(x): + # This is a bit weird hack, to get a table as close as possible to + # the one printing all typecodes (but expecting user-dtypes). + dtype = np.dtype(x.type) + try: + indx = types.index(dtype.char) + except ValueError: + indx = np.inf + return (indx, dtype.char) + + dtypes = sorted(dtypes, key=sorter) + + def print_table(field="can_cast"): + print('X', end=' ') + for dt in dtypes: + print(np.dtype(dt.type).char, end=' ') + print() + for from_dt in dtypes: + print(np.dtype(from_dt.type).char, end=' ') + row = table.get(from_dt, {}) + for to_dt in dtypes: + print(getattr(row.get(to_dt, no_cast_info), field), end=' ') + print() + + if can_cast: + # Print the actual table: + print() + print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe") + print() + print_table("can_cast") + + if legacy: + print() + print("L denotes a legacy cast . a non-legacy one.") + print() + print_table("legacy") + + if flags: + print() + print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, " + f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors") + print() + print_table("flags") + + +if __name__ == '__main__': + print("can cast") + print_cancast_table(np.typecodes['All']) + print() + print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") + print() + print("scalar + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, False) + print() + print("scalar + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, False) + print() + print("array + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, True) + print() + print("array + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, True) + print() + print("promote_types") + print_coercion_table(np.typecodes['All'], 0, 0, False, True) + print("New casting type promotion:") + print_new_cast_table(can_cast=True, legacy=True, flags=True) diff --git a/MLPY/Lib/site-packages/numpy/testing/setup.py b/MLPY/Lib/site-packages/numpy/testing/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..a5d7c23eaca31eb6279e5046e5462f92cd0f1993 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/setup.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('testing', parent_package, top_path) + + config.add_subpackage('_private') + config.add_subpackage('tests') + config.add_data_files('*.pyi') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(maintainer="NumPy Developers", + maintainer_email="numpy-dev@numpy.org", + description="NumPy test module", + url="https://www.numpy.org", + license="NumPy License (BSD Style)", + configuration=configuration, + ) diff --git a/MLPY/Lib/site-packages/numpy/testing/tests/__init__.py b/MLPY/Lib/site-packages/numpy/testing/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6acb5d87977b3960311dc5cf25675aea10b88f2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/tests/__pycache__/test_doctesting.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/tests/__pycache__/test_doctesting.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1be840648e708a18b4c181872d201b04df1fc50 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/tests/__pycache__/test_doctesting.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3000624f6ca23fc39ef4db0ee883c85d8be56184 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/testing/tests/test_doctesting.py b/MLPY/Lib/site-packages/numpy/testing/tests/test_doctesting.py new file mode 100644 index 0000000000000000000000000000000000000000..2c70527ef2b1998ec11384694c7ffe7b7abcc51b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/tests/test_doctesting.py @@ -0,0 +1,57 @@ +""" Doctests for NumPy-specific nose/doctest modifications + +""" +#FIXME: None of these tests is run, because 'check' is not a recognized +# testing prefix. + +# try the #random directive on the output line +def check_random_directive(): + ''' + >>> 2+2 + #random: may vary on your system + ''' + +# check the implicit "import numpy as np" +def check_implicit_np(): + ''' + >>> np.array([1,2,3]) + array([1, 2, 3]) + ''' + +# there's some extraneous whitespace around the correct responses +def check_whitespace_enabled(): + ''' + # whitespace after the 3 + >>> 1+2 + 3 + + # whitespace before the 7 + >>> 3+4 + 7 + ''' + +def check_empty_output(): + """ Check that no output does not cause an error. + + This is related to nose bug 445; the numpy plugin changed the + doctest-result-variable default and therefore hit this bug: + http://code.google.com/p/python-nose/issues/detail?id=445 + + >>> a = 10 + """ + +def check_skip(): + """ Check skip directive + + The test below should not run + + >>> 1/0 #doctest: +SKIP + """ + + +if __name__ == '__main__': + # Run tests outside numpy test rig + import nose + from numpy.testing.noseclasses import NumpyDoctest + argv = ['', __file__, '--with-numpydoctest'] + nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()]) diff --git a/MLPY/Lib/site-packages/numpy/testing/tests/test_utils.py b/MLPY/Lib/site-packages/numpy/testing/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3e8f15971cec9774d184832f7dc10db9b90ae7f9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/tests/test_utils.py @@ -0,0 +1,1614 @@ +import warnings +import sys +import os +import itertools +import pytest +import weakref + +import numpy as np +from numpy.testing import ( + assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_array_less, build_err_msg, raises, + assert_raises, assert_warns, assert_no_warnings, assert_allclose, + assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, + clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_, + tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT + ) +from numpy.core.overrides import ARRAY_FUNCTION_ENABLED + + +class _GenericTest: + + def _test_equal(self, a, b): + self._assert_func(a, b) + + def _test_not_equal(self, a, b): + with assert_raises(AssertionError): + self._assert_func(a, b) + + def test_array_rank1_eq(self): + """Test two equal array of rank 1 are found equal.""" + a = np.array([1, 2]) + b = np.array([1, 2]) + + self._test_equal(a, b) + + def test_array_rank1_noteq(self): + """Test two different array of rank 1 are found not equal.""" + a = np.array([1, 2]) + b = np.array([2, 2]) + + self._test_not_equal(a, b) + + def test_array_rank2_eq(self): + """Test two equal array of rank 2 are found equal.""" + a = np.array([[1, 2], [3, 4]]) + b = np.array([[1, 2], [3, 4]]) + + self._test_equal(a, b) + + def test_array_diffshape(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array([1, 2]) + b = np.array([[1, 2], [1, 2]]) + + self._test_not_equal(a, b) + + def test_objarray(self): + """Test object arrays.""" + a = np.array([1, 1], dtype=object) + self._test_equal(a, 1) + + def test_array_likes(self): + self._test_equal([1, 2, 3], (1, 2, 3)) + + +class TestArrayEqual(_GenericTest): + + def setup(self): + self._assert_func = assert_array_equal + + def test_generic_rank1(self): + """Test rank 1 array for all dtypes.""" + def foo(t): + a = np.empty(2, t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_0_ndim_array(self): + x = np.array(473963742225900817127911193656584771) + y = np.array(18535119325151578301457182298393896) + assert_raises(AssertionError, self._assert_func, x, y) + + y = x + self._assert_func(x, y) + + x = np.array(43) + y = np.array(10) + assert_raises(AssertionError, self._assert_func, x, y) + + y = x + self._assert_func(x, y) + + def test_generic_rank3(self): + """Test rank 3 array for all dtypes.""" + def foo(t): + a = np.empty((4, 2, 3), t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_nan_array(self): + """Test arrays with nan values in them.""" + a = np.array([1, 2, np.nan]) + b = np.array([1, 2, np.nan]) + + self._test_equal(a, b) + + c = np.array([1, 2, 3]) + self._test_not_equal(c, b) + + def test_string_arrays(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array(['floupi', 'floupa']) + b = np.array(['floupi', 'floupa']) + + self._test_equal(a, b) + + c = np.array(['floupipi', 'floupa']) + + self._test_not_equal(c, b) + + def test_recarrays(self): + """Test record arrays.""" + a = np.empty(2, [('floupi', float), ('floupa', float)]) + a['floupi'] = [1, 2] + a['floupa'] = [1, 2] + b = a.copy() + + self._test_equal(a, b) + + c = np.empty(2, [('floupipi', float), ('floupa', float)]) + c['floupipi'] = a['floupi'].copy() + c['floupa'] = a['floupa'].copy() + + with suppress_warnings() as sup: + l = sup.record(FutureWarning, message="elementwise == ") + self._test_not_equal(c, b) + assert_equal(len(l), 1) + + def test_masked_nan_inf(self): + # Regression test for gh-11121 + a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False]) + b = np.array([3., np.nan, 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False]) + b = np.array([np.inf, 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_overrides_eq(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return bool(np.equal(self, other).all()) + + def __ne__(self, other): + return not self == other + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + assert_(type(a == a), bool) + assert_(a == a) + assert_(a != b) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + @pytest.mark.skipif( + not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__') + def test_subclass_that_does_not_implement_npall(self): + class MyArray(np.ndarray): + def __array_function__(self, *args, **kwargs): + return NotImplemented + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + with assert_raises(TypeError): + np.all(a) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + +class TestBuildErrorMessage: + + def test_build_err_msg_defaults(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' + '2.00003, 3.00004])') + assert_equal(a, b) + + def test_build_err_msg_no_verbose(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, verbose=False) + b = '\nItems are not equal: There is a mismatch' + assert_equal(a, b) + + def test_build_err_msg_custom_names(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) + b = ('\nItems are not equal: There is a mismatch\n FOO: array([' + '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' + '3.00004])') + assert_equal(a, b) + + def test_build_err_msg_custom_precision(self): + x = np.array([1.000000001, 2.00002, 3.00003]) + y = np.array([1.000000002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, precision=10) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' + '1.000000002, 2.00003 , 3.00004 ])') + assert_equal(a, b) + + +class TestEqual(TestArrayEqual): + + def setup(self): + self._assert_func = assert_equal + + def test_nan_items(self): + self._assert_func(np.nan, np.nan) + self._assert_func([np.nan], [np.nan]) + self._test_not_equal(np.nan, [np.nan]) + self._test_not_equal(np.nan, 1) + + def test_inf_items(self): + self._assert_func(np.inf, np.inf) + self._assert_func([np.inf], [np.inf]) + self._test_not_equal(np.inf, [np.inf]) + + def test_datetime(self): + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "s") + ) + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "m") + ) + + # gh-10081 + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "s") + ) + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "m") + ) + + def test_nat_items(self): + # not a datetime + nadt_no_unit = np.datetime64("NaT") + nadt_s = np.datetime64("NaT", "s") + nadt_d = np.datetime64("NaT", "ns") + # not a timedelta + natd_no_unit = np.timedelta64("NaT") + natd_s = np.timedelta64("NaT", "s") + natd_d = np.timedelta64("NaT", "ns") + + dts = [nadt_no_unit, nadt_s, nadt_d] + tds = [natd_no_unit, natd_s, natd_d] + for a, b in itertools.product(dts, dts): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, tds): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, dts): + self._test_not_equal(a, b) + self._test_not_equal(a, [b]) + self._test_not_equal([a], [b]) + self._test_not_equal([a], np.datetime64("2017-01-01", "s")) + self._test_not_equal([b], np.datetime64("2017-01-01", "s")) + self._test_not_equal([a], np.timedelta64(123, "s")) + self._test_not_equal([b], np.timedelta64(123, "s")) + + def test_non_numeric(self): + self._assert_func('ab', 'ab') + self._test_not_equal('ab', 'abb') + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_negative_zero(self): + self._test_not_equal(np.PZERO, np.NZERO) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + + def test_object(self): + #gh-12942 + import datetime + a = np.array([datetime.datetime(2000, 1, 1), + datetime.datetime(2000, 1, 2)]) + self._test_not_equal(a, a[::-1]) + + +class TestArrayAlmostEqual(_GenericTest): + + def setup(self): + self._assert_func = assert_array_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_simple(self): + x = np.array([1234.2222]) + y = np.array([1234.2223]) + + self._assert_func(x, y, decimal=3) + self._assert_func(x, y, decimal=4) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, decimal=5)) + + def test_nan(self): + anan = np.array([np.nan]) + aone = np.array([1]) + ainf = np.array([np.inf]) + self._assert_func(anan, anan) + assert_raises(AssertionError, + lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, + lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, + lambda: self._assert_func(ainf, anan)) + + def test_inf(self): + a = np.array([[1., 2.], [3., 4.]]) + b = a.copy() + a[0, 0] = np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + b[0, 0] = -np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + + def test_subclass(self): + a = np.array([[1., 2.], [3., 4.]]) + b = np.ma.masked_array([[1., 2.], [0., 4.]], + [[False, False], [True, False]]) + self._assert_func(a, b) + self._assert_func(b, a) + self._assert_func(b, b) + + # Test fully masked as well (see gh-11123). + a = np.ma.MaskedArray(3.5, mask=True) + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.masked + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array([1., 2., 3.]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array(1.) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestAlmostEqual(_GenericTest): + + def setup(self): + self._assert_func = assert_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_nan_item(self): + self._assert_func(np.nan, np.nan) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, np.inf)) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, np.nan)) + + def test_inf_item(self): + self._assert_func(np.inf, np.inf) + self._assert_func(-np.inf, -np.inf) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(-np.inf, np.inf)) + + def test_simple_item(self): + self._test_not_equal(1, 2) + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + z = np.array([complex(1, 2), complex(np.nan, 1)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + self._test_not_equal(x, z) + + def test_error_message(self): + """Check the message is formatted correctly for the decimal value. + Also check the message when input includes inf or nan (gh12200)""" + x = np.array([1.00000000001, 2.00000000002, 3.00003]) + y = np.array([1.00000000002, 2.00000000003, 3.00004]) + + # Test with a different amount of decimal digits + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y, decimal=12) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.e-05') + assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') + assert_equal( + msgs[6], + ' x: array([1.00000000001, 2.00000000002, 3.00003 ])') + assert_equal( + msgs[7], + ' y: array([1.00000000002, 2.00000000003, 3.00004 ])') + + # With the default value of decimal digits, only the 3rd element + # differs. Note that we only check for the formatting of the arrays + # themselves. + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)') + assert_equal(msgs[4], 'Max absolute difference: 1.e-05') + assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') + assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])') + assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])') + + # Check the error message when input includes inf + x = np.array([np.inf, 0]) + y = np.array([np.inf, 1]) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 1.') + assert_equal(msgs[6], ' x: array([inf, 0.])') + assert_equal(msgs[7], ' y: array([inf, 1.])') + + # Check the error message when dividing by zero + x = np.array([1, 2]) + y = np.array([0, 0]) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 2') + assert_equal(msgs[5], 'Max relative difference: inf') + + def test_error_message_2(self): + """Check the message is formatted correctly when either x or y is a scalar.""" + x = 2 + y = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 1.') + + y = 2 + x = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 0.5') + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestApproxEqual: + + def setup(self): + self._assert_func = assert_approx_equal + + def test_simple_0d_arrays(self): + x = np.array(1234.22) + y = np.array(1234.23) + + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_simple_items(self): + x = 1234.22 + y = 1234.23 + + self._assert_func(x, y, significant=4) + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_nan_array(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_items(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + +class TestArrayAssertLess: + + def setup(self): + self._assert_func = assert_array_less + + def test_simple_arrays(self): + x = np.array([1.1, 2.2]) + y = np.array([1.2, 2.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 2.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank2(self): + x = np.array([[1.1, 2.2], [3.3, 4.4]]) + y = np.array([[1.2, 2.3], [3.4, 4.5]]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([[1.0, 2.3], [3.4, 4.5]]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank3(self): + x = np.ones(shape=(2, 2, 2)) + y = np.ones(shape=(2, 2, 2))+1 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y[0, 0, 0] = 0 + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_simple_items(self): + x = 1.1 + y = 2.2 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([2.2, 3.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 3.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_nan_noncompare(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(aone, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_noncompare_array(self): + x = np.array([1.1, 2.2, 3.3]) + anan = np.array(np.nan) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + x = np.array([1.1, 2.2, np.nan]) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + y = np.array([1.0, 2.0, np.nan]) + + self._assert_func(y, x) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_inf_compare(self): + aone = np.array(1) + ainf = np.array(np.inf) + + self._assert_func(aone, ainf) + self._assert_func(-ainf, aone) + self._assert_func(-ainf, ainf) + assert_raises(AssertionError, lambda: self._assert_func(ainf, aone)) + assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf)) + + def test_inf_compare_array(self): + x = np.array([1.1, 2.2, np.inf]) + ainf = np.array(np.inf) + + assert_raises(AssertionError, lambda: self._assert_func(x, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, x)) + assert_raises(AssertionError, lambda: self._assert_func(x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x)) + self._assert_func(-ainf, x) + + +@pytest.mark.skip(reason="The raises decorator depends on Nose") +class TestRaises: + + def setup(self): + class MyException(Exception): + pass + + self.e = MyException + + def raises_exception(self, e): + raise e + + def does_not_raise_exception(self): + pass + + def test_correct_catch(self): + raises(self.e)(self.raises_exception)(self.e) # raises? + + def test_wrong_exception(self): + try: + raises(self.e)(self.raises_exception)(RuntimeError) # raises? + except RuntimeError: + return + else: + raise AssertionError("should have caught RuntimeError") + + def test_catch_no_raise(self): + try: + raises(self.e)(self.does_not_raise_exception)() # raises? + except AssertionError: + return + else: + raise AssertionError("should have raised an AssertionError") + + +class TestWarns: + + def test_warn(self): + def f(): + warnings.warn("yo") + return 3 + + before_filters = sys.modules['warnings'].filters[:] + assert_equal(assert_warns(UserWarning, f), 3) + after_filters = sys.modules['warnings'].filters + + assert_raises(AssertionError, assert_no_warnings, f) + assert_equal(assert_no_warnings(lambda x: x, 1), 1) + + # Check that the warnings state is unchanged + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_context_manager(self): + + before_filters = sys.modules['warnings'].filters[:] + with assert_warns(UserWarning): + warnings.warn("yo") + after_filters = sys.modules['warnings'].filters + + def no_warnings(): + with assert_no_warnings(): + warnings.warn("yo") + + assert_raises(AssertionError, no_warnings) + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_warn_wrong_warning(self): + def f(): + warnings.warn("yo", DeprecationWarning) + + failed = False + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + try: + # Should raise a DeprecationWarning + assert_warns(UserWarning, f) + failed = True + except DeprecationWarning: + pass + + if failed: + raise AssertionError("wrong warning caught by assert_warn") + + +class TestAssertAllclose: + + def test_simple(self): + x = 1e-3 + y = 1e-9 + + assert_allclose(x, y, atol=1) + assert_raises(AssertionError, assert_allclose, x, y) + + a = np.array([x, y, x, y]) + b = np.array([x, y, x, x]) + + assert_allclose(a, b, atol=1) + assert_raises(AssertionError, assert_allclose, a, b) + + b[-1] = y * (1 + 1e-8) + assert_allclose(a, b) + assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9) + + assert_allclose(6, 10, rtol=0.5) + assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5) + + def test_min_int(self): + a = np.array([np.iinfo(np.int_).min], dtype=np.int_) + # Should not raise: + assert_allclose(a, a) + + def test_report_fail_percentage(self): + a = np.array([1, 1, 1, 1]) + b = np.array([1, 1, 1, 2]) + + with pytest.raises(AssertionError) as exc_info: + assert_allclose(a, b) + msg = str(exc_info.value) + assert_('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference: 1\n' + 'Max relative difference: 0.5' in msg) + + def test_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + # Should not raise: + assert_allclose(a, b, equal_nan=True) + + def test_not_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + + def test_equal_nan_default(self): + # Make sure equal_nan default behavior remains unchanged. (All + # of these functions use assert_array_compare under the hood.) + # None of these should raise. + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_array_equal(a, b) + assert_array_almost_equal(a, b) + assert_array_less(a, b) + assert_allclose(a, b) + + def test_report_max_relative_error(self): + a = np.array([0, 1]) + b = np.array([0, 2]) + + with pytest.raises(AssertionError) as exc_info: + assert_allclose(a, b) + msg = str(exc_info.value) + assert_('Max relative difference: 0.5' in msg) + + def test_timedelta(self): + # see gh-18286 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert_allclose(a, a) + + +class TestArrayAlmostEqualNulp: + + def test_float64_pass(self): + # The number of units of least precision + # In this case, use a few places above the lowest level (ie nulp=1) + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + # Addition + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + # Subtraction + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float64_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint64(0xffffffff) + nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64) + nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones. + nan1_f64 = nan1_i64.view(np.float64) + nan2_f64 = nan2_i64.view(np.float64) + assert_array_max_ulp(nan1_f64, nan2_f64, 0) + + def test_float32_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float32_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float32_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint32(0xffff) + nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32) + nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones. + nan1_f32 = nan1_i32.view(np.float32) + nan2_f32 = nan2_i32.view(np.float32) + assert_array_max_ulp(nan1_f32, nan2_f32, 0) + + def test_float16_pass(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float16_fail(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float16_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint16(0xff) + nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16) + nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones. + nan1_f16 = nan1_i16.view(np.float16) + nan2_f16 = nan2_i16.view(np.float16) + assert_array_max_ulp(nan1_f16, nan2_f16, 0) + + def test_complex128_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x*eps*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x - x*epsneg*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + def test_complex128_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x*eps*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x - x*epsneg*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + def test_complex64_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x + x*eps*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x - x*epsneg*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + def test_complex64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x + x*eps*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x - x*epsneg*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + +class TestULP: + + def test_equal(self): + x = np.random.randn(10) + assert_array_max_ulp(x, x, maxulp=0) + + def test_single(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float32) + x += 0.01 * np.random.randn(10).astype(np.float32) + eps = np.finfo(np.float32).eps + assert_array_max_ulp(x, x+eps, maxulp=20) + + def test_double(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float64) + x += 0.01 * np.random.randn(10).astype(np.float64) + eps = np.finfo(np.float64).eps + assert_array_max_ulp(x, x+eps, maxulp=200) + + def test_inf(self): + for dt in [np.float32, np.float64]: + inf = np.array([np.inf]).astype(dt) + big = np.array([np.finfo(dt).max]) + assert_array_max_ulp(inf, big, maxulp=200) + + def test_nan(self): + # Test that nan is 'far' from small, tiny, inf, max and min + for dt in [np.float32, np.float64]: + if dt == np.float32: + maxulp = 1e6 + else: + maxulp = 1e12 + inf = np.array([np.inf]).astype(dt) + nan = np.array([np.nan]).astype(dt) + big = np.array([np.finfo(dt).max]) + tiny = np.array([np.finfo(dt).tiny]) + zero = np.array([np.PZERO]).astype(dt) + nzero = np.array([np.NZERO]).astype(dt) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, inf, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, big, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, tiny, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, zero, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, nzero, + maxulp=maxulp)) + + +class TestStringEqual: + def test_simple(self): + assert_string_equal("hello", "hello") + assert_string_equal("hello\nmultiline", "hello\nmultiline") + + with pytest.raises(AssertionError) as exc_info: + assert_string_equal("foo\nbar", "hello\nbar") + msg = str(exc_info.value) + assert_equal(msg, "Differences in strings:\n- foo\n+ hello") + + assert_raises(AssertionError, + lambda: assert_string_equal("foo", "hello")) + + def test_regex(self): + assert_string_equal("a+*b", "a+*b") + + assert_raises(AssertionError, + lambda: assert_string_equal("aaa", "a+b")) + + +def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None): + try: + mod_warns = mod.__warningregistry__ + except AttributeError: + # the lack of a __warningregistry__ + # attribute means that no warning has + # occurred; this can be triggered in + # a parallel test scenario, while in + # a serial test scenario an initial + # warning (and therefore the attribute) + # are always created first + mod_warns = {} + + num_warns = len(mod_warns) + # Python 3.4 appears to clear any pre-existing warnings of the same type, + # when raising warnings inside a catch_warnings block. So, there is a + # warning generated by the tests within the context manager, but no + # previous warnings. + if 'version' in mod_warns: + # Python 3 adds a 'version' entry to the registry, + # do not count it. + num_warns -= 1 + + # Behavior of warnings is Python version dependent. Adjust the + # expected result to compensate. In particular, Python 3.7 does + # not make an entry for ignored warnings. + if sys.version_info[:2] >= (3, 7): + if py37 is not None: + n_in_context = py37 + else: + if py34 is not None: + n_in_context = py34 + assert_equal(num_warns, n_in_context) + +def test_warn_len_equal_call_scenarios(): + # assert_warn_len_equal is called under + # varying circumstances depending on serial + # vs. parallel test scenarios; this test + # simply aims to probe both code paths and + # check that no assertion is uncaught + + # parallel scenario -- no warning issued yet + class mod: + pass + + mod_inst = mod() + + assert_warn_len_equal(mod=mod_inst, + n_in_context=0) + + # serial test scenario -- the __warningregistry__ + # attribute should be present + class mod: + def __init__(self): + self.__warningregistry__ = {'warning1':1, + 'warning2':2} + + mod_inst = mod() + assert_warn_len_equal(mod=mod_inst, + n_in_context=2) + + +def _get_fresh_mod(): + # Get this module, with warning registry empty + my_mod = sys.modules[__name__] + try: + my_mod.__warningregistry__.clear() + except AttributeError: + # will not have a __warningregistry__ unless warning has been + # raised in the module at some point + pass + return my_mod + + +def test_clear_and_catch_warnings(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + # Without specified modules, don't clear warnings during context + # Python 3.7 catch_warnings doesn't make an entry for 'ignore'. + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 1, py37=0) + # Confirm that specifying module keeps old warning, does not add new + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 1, py37=0) + # Another warning, no module spec does add to warnings dict, except on + # Python 3.4 (see comments in `assert_warn_len_equal`) + # Python 3.7 catch_warnings doesn't make an entry for 'ignore'. + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 2, py34=1, py37=0) + + +def test_suppress_warnings_module(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning 2", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + # Test module based warning suppression: + assert_warn_len_equal(my_mod, 0) + with suppress_warnings() as sup: + sup.record(UserWarning) + # suppress warning from other module (may have .pyc ending), + # if apply_along_axis is moved, had to be changed. + sup.filter(module=np.lib.shape_base) + warnings.warn("Some warning") + warn_other_module() + # Check that the suppression did test the file correctly (this module + # got filtered) + assert_equal(len(sup.log), 1) + assert_equal(sup.log[0].message.args[0], "Some warning") + assert_warn_len_equal(my_mod, 0, py37=0) + sup = suppress_warnings() + # Will have to be changed if apply_along_axis is moved: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules, don't clear warnings during context + # Python 3.7 does not add ignored warnings. + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 1, py37=0) + +def test_suppress_warnings_type(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + # Test module based warning suppression: + with suppress_warnings() as sup: + sup.filter(UserWarning) + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + sup.filter(UserWarning) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules, don't clear warnings during context + # Python 3.7 does not add ignored warnings. + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 1, py37=0) + + +def test_suppress_warnings_decorate_no_record(): + sup = suppress_warnings() + sup.filter(UserWarning) + + @sup + def warn(category): + warnings.warn('Some warning', category) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + warn(UserWarning) # should be supppressed + warn(RuntimeWarning) + assert_equal(len(w), 1) + + +def test_suppress_warnings_record(): + sup = suppress_warnings() + log1 = sup.record() + + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2),1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Do it again, with the same context to see if some warnings survived: + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Test nested: + with suppress_warnings() as sup: + sup.record() + with suppress_warnings() as sup2: + sup2.record(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + assert_equal(len(sup2.log), 1) + assert_equal(len(sup.log), 1) + + +def test_suppress_warnings_forwarding(): + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("always"): + for i in range(2): + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("location"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("module"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("once"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some other warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + +def test_tempdir(): + with tempdir() as tdir: + fpath = os.path.join(tdir, 'tmp') + with open(fpath, 'w'): + pass + assert_(not os.path.isdir(tdir)) + + raised = False + try: + with tempdir() as tdir: + raise ValueError() + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isdir(tdir)) + + +def test_temppath(): + with temppath() as fpath: + with open(fpath, 'w'): + pass + assert_(not os.path.isfile(fpath)) + + raised = False + try: + with temppath() as fpath: + raise ValueError() + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isfile(fpath)) + + +class my_cacw(clear_and_catch_warnings): + + class_modules = (sys.modules[__name__],) + + +def test_clear_and_catch_warnings_inherit(): + # Test can subclass and add default modules + my_mod = _get_fresh_mod() + with my_cacw(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestAssertNoGcCycles: + """ Test assert_no_gc_cycles """ + def test_passes(self): + def no_cycle(): + b = [] + b.append([]) + return b + + with assert_no_gc_cycles(): + no_cycle() + + assert_no_gc_cycles(no_cycle) + + def test_asserts(self): + def make_cycle(): + a = [] + a.append(a) + a.append(a) + return a + + with assert_raises(AssertionError): + with assert_no_gc_cycles(): + make_cycle() + + with assert_raises(AssertionError): + assert_no_gc_cycles(make_cycle) + + @pytest.mark.slow + def test_fails(self): + """ + Test that in cases where the garbage cannot be collected, we raise an + error, instead of hanging forever trying to clear it. + """ + + class ReferenceCycleInDel: + """ + An object that not only contains a reference cycle, but creates new + cycles whenever it's garbage-collected and its __del__ runs + """ + make_cycle = True + + def __init__(self): + self.cycle = self + + def __del__(self): + # break the current cycle so that `self` can be freed + self.cycle = None + + if ReferenceCycleInDel.make_cycle: + # but create a new one so that the garbage collector has more + # work to do. + ReferenceCycleInDel() + + try: + w = weakref.ref(ReferenceCycleInDel()) + try: + with assert_raises(RuntimeError): + # this will be unable to get a baseline empty garbage + assert_no_gc_cycles(lambda: None) + except AssertionError: + # the above test is only necessary if the GC actually tried to free + # our object anyway, which python 2.7 does not. + if w() is not None: + pytest.skip("GC does not call __del__ on cyclic objects") + raise + + finally: + # make sure that we stop creating reference cycles + ReferenceCycleInDel.make_cycle = False diff --git a/MLPY/Lib/site-packages/numpy/testing/utils.py b/MLPY/Lib/site-packages/numpy/testing/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..372ba01f852f93e58940405253624925c4950321 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/testing/utils.py @@ -0,0 +1,28 @@ +""" +Back compatibility utils module. It will import the appropriate +set of tools + +""" +import warnings + +# 2018-04-04, numpy 1.15.0 ImportWarning +# 2019-09-18, numpy 1.18.0 DeprecatonWarning (changed) +warnings.warn("Importing from numpy.testing.utils is deprecated " + "since 1.15.0, import from numpy.testing instead.", + DeprecationWarning, stacklevel=2) + +from ._private.utils import * + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'raises', 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', + '_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles' + ] diff --git a/MLPY/Lib/site-packages/numpy/tests/__init__.py b/MLPY/Lib/site-packages/numpy/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..232905cc9a182c7fe2c004ca965b868d6b0ed872 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_ctypeslib.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_ctypeslib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..529ab6c24fc28a3321cbb7a5db33687260964bfd Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_ctypeslib.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_matlib.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_matlib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e97412689585a2e3e83746858cde6a148b266792 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_matlib.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_numpy_version.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_numpy_version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1645a6c8dab53472c152e2a051c486af29b08c4a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_numpy_version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_public_api.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_public_api.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1858929aa177fa029039312cf1cc80b88b53793 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_public_api.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_reloading.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_reloading.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80906f0b0825d9faf104c86e35afe90d40ce36d3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_reloading.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_scripts.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_scripts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eddd631efa655b3838fb1ad314588123e8f16ca0 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_scripts.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_warnings.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_warnings.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..619723495e61c03b902e030d2ec9ab8de634887c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/tests/__pycache__/test_warnings.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/tests/test_ctypeslib.py b/MLPY/Lib/site-packages/numpy/tests/test_ctypeslib.py new file mode 100644 index 0000000000000000000000000000000000000000..6c0488d391a62375437bd2383b307543ce8ae7e0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/tests/test_ctypeslib.py @@ -0,0 +1,365 @@ +import sys +import pytest +import weakref + +import numpy as np +from numpy.ctypeslib import ndpointer, load_library, as_array +from numpy.distutils.misc_util import get_shared_lib_extension +from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal + +try: + import ctypes +except ImportError: + ctypes = None +else: + cdll = None + test_cdll = None + if hasattr(sys, 'gettotalrefcount'): + try: + cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__) + except OSError: + pass + try: + test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__) + except OSError: + pass + if cdll is None: + cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__) + if test_cdll is None: + test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__) + + c_forward_pointer = test_cdll.forward_pointer + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +@pytest.mark.skipif(sys.platform == 'cygwin', + reason="Known to fail on cygwin") +class TestLoadLibrary: + def test_basic(self): + try: + # Should succeed + load_library('_multiarray_umath', np.core._multiarray_umath.__file__) + except ImportError as e: + msg = ("ctypes is not available on this python: skipping the test" + " (import error was: %s)" % str(e)) + print(msg) + + def test_basic2(self): + # Regression for #801: load_library with a full library name + # (including extension) does not work. + try: + try: + so = get_shared_lib_extension(is_python_ext=True) + # Should succeed + load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__) + except ImportError: + print("No distutils available, skipping test.") + except ImportError as e: + msg = ("ctypes is not available on this python: skipping the test" + " (import error was: %s)" % str(e)) + print(msg) + + +class TestNdpointer: + def test_dtype(self): + dt = np.intc + p = ndpointer(dtype=dt) + assert_(p.from_param(np.array([1], dt))) + dt = 'i4') + p = ndpointer(dtype=dt) + p.from_param(np.array([1], dt)) + assert_raises(TypeError, p.from_param, + np.array([1], dt.newbyteorder('swap'))) + dtnames = ['x', 'y'] + dtformats = [np.intc, np.float64] + dtdescr = {'names': dtnames, 'formats': dtformats} + dt = np.dtype(dtdescr) + p = ndpointer(dtype=dt) + assert_(p.from_param(np.zeros((10,), dt))) + samedt = np.dtype(dtdescr) + p = ndpointer(dtype=samedt) + assert_(p.from_param(np.zeros((10,), dt))) + dt2 = np.dtype(dtdescr, align=True) + if dt.itemsize != dt2.itemsize: + assert_raises(TypeError, p.from_param, np.zeros((10,), dt2)) + else: + assert_(p.from_param(np.zeros((10,), dt2))) + + def test_ndim(self): + p = ndpointer(ndim=0) + assert_(p.from_param(np.array(1))) + assert_raises(TypeError, p.from_param, np.array([1])) + p = ndpointer(ndim=1) + assert_raises(TypeError, p.from_param, np.array(1)) + assert_(p.from_param(np.array([1]))) + p = ndpointer(ndim=2) + assert_(p.from_param(np.array([[1]]))) + + def test_shape(self): + p = ndpointer(shape=(1, 2)) + assert_(p.from_param(np.array([[1, 2]]))) + assert_raises(TypeError, p.from_param, np.array([[1], [2]])) + p = ndpointer(shape=()) + assert_(p.from_param(np.array(1))) + + def test_flags(self): + x = np.array([[1, 2], [3, 4]], order='F') + p = ndpointer(flags='FORTRAN') + assert_(p.from_param(x)) + p = ndpointer(flags='CONTIGUOUS') + assert_raises(TypeError, p.from_param, x) + p = ndpointer(flags=x.flags.num) + assert_(p.from_param(x)) + assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + + def test_cache(self): + assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) + + # shapes are normalized + assert_(ndpointer(shape=2) is ndpointer(shape=(2,))) + + # 1.12 <= v < 1.16 had a bug that made these fail + assert_(ndpointer(shape=2) is not ndpointer(ndim=2)) + assert_(ndpointer(ndim=2) is not ndpointer(shape=2)) + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available on this python installation") +class TestNdpointerCFunc: + def test_arguments(self): + """ Test that arguments are coerced from arrays """ + c_forward_pointer.restype = ctypes.c_void_p + c_forward_pointer.argtypes = (ndpointer(ndim=2),) + + c_forward_pointer(np.zeros((2, 3))) + # too many dimensions + assert_raises( + ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4))) + + @pytest.mark.parametrize( + 'dt', [ + float, + np.dtype(dict( + formats=['u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16.__ctype_be__) + + dt = np.dtype('u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16) + + def test_subarray(self): + dt = np.dtype((np.int32, (2, 3))) + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, 2 * (3 * ctypes.c_int32)) + + def test_structure(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ]) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_structure_aligned(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ], align=True) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('', ctypes.c_char * 2), # padding + ('b', ctypes.c_uint32), + ]) + + def test_union(self): + dt = np.dtype(dict( + names=['a', 'b'], + offsets=[0, 0], + formats=[np.uint16, np.uint32] + )) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_padded_union(self): + dt = np.dtype(dict( + names=['a', 'b'], + offsets=[0, 0], + formats=[np.uint16, np.uint32], + itemsize=5, + )) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ('', ctypes.c_char * 5), # padding + ]) + + def test_overlapping(self): + dt = np.dtype(dict( + names=['a', 'b'], + offsets=[0, 2], + formats=[np.uint32, np.uint32] + )) + assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt) diff --git a/MLPY/Lib/site-packages/numpy/tests/test_matlib.py b/MLPY/Lib/site-packages/numpy/tests/test_matlib.py new file mode 100644 index 0000000000000000000000000000000000000000..5062179dadf50925747b0755faf0c6cc97a55632 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/tests/test_matlib.py @@ -0,0 +1,58 @@ +import numpy as np +import numpy.matlib +from numpy.testing import assert_array_equal, assert_ + +def test_empty(): + x = numpy.matlib.empty((2,)) + assert_(isinstance(x, np.matrix)) + assert_(x.shape, (1, 2)) + +def test_ones(): + assert_array_equal(numpy.matlib.ones((2, 3)), + np.matrix([[ 1., 1., 1.], + [ 1., 1., 1.]])) + + assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]])) + +def test_zeros(): + assert_array_equal(numpy.matlib.zeros((2, 3)), + np.matrix([[ 0., 0., 0.], + [ 0., 0., 0.]])) + + assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]])) + +def test_identity(): + x = numpy.matlib.identity(2, dtype=int) + assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) + +def test_eye(): + xc = numpy.matlib.eye(3, k=1, dtype=int) + assert_array_equal(xc, np.matrix([[ 0, 1, 0], + [ 0, 0, 1], + [ 0, 0, 0]])) + assert xc.flags.c_contiguous + assert not xc.flags.f_contiguous + + xf = numpy.matlib.eye(3, 4, dtype=int, order='F') + assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0], + [ 0, 1, 0, 0], + [ 0, 0, 1, 0]])) + assert not xf.flags.c_contiguous + assert xf.flags.f_contiguous + +def test_rand(): + x = numpy.matlib.rand(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_randn(): + x = np.matlib.randn(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_repmat(): + a1 = np.arange(4) + x = numpy.matlib.repmat(a1, 2, 2) + y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + assert_array_equal(x, y) diff --git a/MLPY/Lib/site-packages/numpy/tests/test_numpy_version.py b/MLPY/Lib/site-packages/numpy/tests/test_numpy_version.py new file mode 100644 index 0000000000000000000000000000000000000000..029e36470589987032fff7d9e1559015837514de --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/tests/test_numpy_version.py @@ -0,0 +1,44 @@ +""" +Check the numpy version is valid. + +Note that a development version is marked by the presence of 'dev0' or '+' +in the version string, all else is treated as a release. The version string +itself is set from the output of ``git describe`` which relies on tags. + +Examples +-------- + +Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2 +Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0 +Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a + +Note that a release is determined by the version string, which in turn +is controlled by the result of the ``git describe`` command. +""" +import re + +import numpy as np +from numpy.testing import assert_ + + +def test_valid_numpy_version(): + # Verify that the numpy version is a valid one (no .post suffix or other + # nonsense). See gh-6431 for an issue caused by an invalid version. + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9]|)" + dev_suffix = r"(\.dev0|)(\+[0-9]*\.g[0-9a-f]+|)" + if np.version.release: + res = re.match(version_pattern + '$', np.__version__) + else: + res = re.match(version_pattern + dev_suffix + '$', np.__version__) + + assert_(res is not None, np.__version__) + + +def test_short_version(): + # Check numpy.short_version actually exists + if np.version.release: + assert_(np.__version__ == np.version.short_version, + "short_version mismatch in release version") + else: + assert_(np.__version__.split("+")[0] == np.version.short_version, + "short_version mismatch in development version") diff --git a/MLPY/Lib/site-packages/numpy/tests/test_public_api.py b/MLPY/Lib/site-packages/numpy/tests/test_public_api.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee96a6e8ede3b1be64d4c7cce3edd26abbc56dd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/tests/test_public_api.py @@ -0,0 +1,475 @@ +import sys +import subprocess +import pkgutil +import types +import importlib +import warnings + +import numpy as np +import numpy +import pytest + +try: + import ctypes +except ImportError: + ctypes = None + + +def check_dir(module, module_name=None): + """Returns a mapping of all objects with the wrong __module__ attribute.""" + if module_name is None: + module_name = module.__name__ + results = {} + for name in dir(module): + item = getattr(module, name) + if (hasattr(item, '__module__') and hasattr(item, '__name__') + and item.__module__ != module_name): + results[name] = item.__module__ + '.' + item.__name__ + return results + + +def test_numpy_namespace(): + # None of these objects are publicly documented to be part of the main + # NumPy namespace (some are useful though, others need to be cleaned up) + undocumented = { + 'Tester': 'numpy.testing._private.nosetester.NoseTester', + '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', + 'add_docstring': 'numpy.core._multiarray_umath.add_docstring', + 'add_newdoc': 'numpy.core.function_base.add_newdoc', + 'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', + 'byte_bounds': 'numpy.lib.utils.byte_bounds', + 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays', + 'deprecate': 'numpy.lib.utils.deprecate', + 'deprecate_with_doc': 'numpy.lib.utils.deprecate_with_doc', + 'disp': 'numpy.lib.function_base.disp', + 'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose', + 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap', + 'get_include': 'numpy.lib.utils.get_include', + 'mafromtxt': 'numpy.lib.npyio.mafromtxt', + 'ndfromtxt': 'numpy.lib.npyio.ndfromtxt', + 'recfromcsv': 'numpy.lib.npyio.recfromcsv', + 'recfromtxt': 'numpy.lib.npyio.recfromtxt', + 'safe_eval': 'numpy.lib.utils.safe_eval', + 'set_string_function': 'numpy.core.arrayprint.set_string_function', + 'show_config': 'numpy.__config__.show', + 'who': 'numpy.lib.utils.who', + } + if sys.version_info < (3, 7): + # These built-in types are re-exported by numpy. + builtins = { + 'bool': 'builtins.bool', + 'complex': 'builtins.complex', + 'float': 'builtins.float', + 'int': 'builtins.int', + 'long': 'builtins.int', + 'object': 'builtins.object', + 'str': 'builtins.str', + 'unicode': 'builtins.str', + } + allowlist = dict(undocumented, **builtins) + else: + # after 3.7, we override dir to not show these members + allowlist = undocumented + bad_results = check_dir(np) + # pytest gives better error messages with the builtin assert than with + # assert_equal + assert bad_results == allowlist + + +@pytest.mark.parametrize('name', ['testing', 'Tester']) +def test_import_lazy_import(name): + """Make sure we can actually use the modules we lazy load. + + While not exported as part of the public API, it was accessible. With the + use of __getattr__ and __dir__, this isn't always true It can happen that + an infinite recursion may happen. + + This is the only way I found that would force the failure to appear on the + badly implemented code. + + We also test for the presence of the lazily imported modules in dir + + """ + exe = (sys.executable, '-c', "import numpy; numpy." + name) + result = subprocess.check_output(exe) + assert not result + + # Make sure they are still in the __dir__ + assert name in dir(np) + + +def test_dir_testing(): + """Assert that output of dir has only one "testing/tester" + attribute without duplicate""" + assert len(dir(np)) == len(set(dir(np))) + + +def test_numpy_linalg(): + bad_results = check_dir(np.linalg) + assert bad_results == {} + + +def test_numpy_fft(): + bad_results = check_dir(np.fft) + assert bad_results == {} + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +def test_NPY_NO_EXPORT(): + cdll = ctypes.CDLL(np.core._multiarray_tests.__file__) + # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden + f = getattr(cdll, 'test_not_exported', None) + assert f is None, ("'test_not_exported' is mistakenly exported, " + "NPY_NO_EXPORT does not work") + + +# Historically NumPy has not used leading underscores for private submodules +# much. This has resulted in lots of things that look like public modules +# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`), +# but were never intended to be public. The PUBLIC_MODULES list contains +# modules that are either public because they were meant to be, or because they +# contain public functions/objects that aren't present in any other namespace +# for whatever reason and therefore should be treated as public. +# +# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack +# of underscores) but should not be used. For many of those modules the +# current status is fine. For others it may make sense to work on making them +# private, to clean up our public API and avoid confusion. +PUBLIC_MODULES = ['numpy.' + s for s in [ + "ctypeslib", + "distutils", + "distutils.cpuinfo", + "distutils.exec_command", + "distutils.misc_util", + "distutils.log", + "distutils.system_info", + "doc", + "doc.constants", + "doc.ufuncs", + "f2py", + "fft", + "lib", + "lib.format", # was this meant to be public? + "lib.mixins", + "lib.recfunctions", + "lib.scimath", + "lib.stride_tricks", + "linalg", + "ma", + "ma.extras", + "ma.mrecords", + "matlib", + "polynomial", + "polynomial.chebyshev", + "polynomial.hermite", + "polynomial.hermite_e", + "polynomial.laguerre", + "polynomial.legendre", + "polynomial.polynomial", + "random", + "testing", + "typing", + "typing.mypy_plugin", + "version", +]] + + +PUBLIC_ALIASED_MODULES = [ + "numpy.char", + "numpy.emath", + "numpy.rec", +] + + +PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ + "compat", + "compat.py3k", + "conftest", + "core", + "core.arrayprint", + "core.defchararray", + "core.einsumfunc", + "core.fromnumeric", + "core.function_base", + "core.getlimits", + "core.machar", + "core.memmap", + "core.multiarray", + "core.numeric", + "core.numerictypes", + "core.overrides", + "core.records", + "core.shape_base", + "core.umath", + "core.umath_tests", + "distutils.ccompiler", + 'distutils.ccompiler_opt', + "distutils.command", + "distutils.command.autodist", + "distutils.command.bdist_rpm", + "distutils.command.build", + "distutils.command.build_clib", + "distutils.command.build_ext", + "distutils.command.build_py", + "distutils.command.build_scripts", + "distutils.command.build_src", + "distutils.command.config", + "distutils.command.config_compiler", + "distutils.command.develop", + "distutils.command.egg_info", + "distutils.command.install", + "distutils.command.install_clib", + "distutils.command.install_data", + "distutils.command.install_headers", + "distutils.command.sdist", + "distutils.conv_template", + "distutils.core", + "distutils.extension", + "distutils.fcompiler", + "distutils.fcompiler.absoft", + "distutils.fcompiler.compaq", + "distutils.fcompiler.environment", + "distutils.fcompiler.g95", + "distutils.fcompiler.gnu", + "distutils.fcompiler.hpux", + "distutils.fcompiler.ibm", + "distutils.fcompiler.intel", + "distutils.fcompiler.lahey", + "distutils.fcompiler.mips", + "distutils.fcompiler.nag", + "distutils.fcompiler.none", + "distutils.fcompiler.pathf95", + "distutils.fcompiler.pg", + "distutils.fcompiler.nv", + "distutils.fcompiler.sun", + "distutils.fcompiler.vast", + "distutils.fcompiler.fujitsu", + "distutils.from_template", + "distutils.intelccompiler", + "distutils.lib2def", + "distutils.line_endings", + "distutils.mingw32ccompiler", + "distutils.msvccompiler", + "distutils.npy_pkg_config", + "distutils.numpy_distribution", + "distutils.pathccompiler", + "distutils.unixccompiler", + "dual", + "f2py.auxfuncs", + "f2py.capi_maps", + "f2py.cb_rules", + "f2py.cfuncs", + "f2py.common_rules", + "f2py.crackfortran", + "f2py.diagnose", + "f2py.f2py2e", + "f2py.f2py_testing", + "f2py.f90mod_rules", + "f2py.func2subr", + "f2py.rules", + "f2py.use_rules", + "fft.helper", + "lib.arraypad", + "lib.arraysetops", + "lib.arrayterator", + "lib.function_base", + "lib.histograms", + "lib.index_tricks", + "lib.nanfunctions", + "lib.npyio", + "lib.polynomial", + "lib.shape_base", + "lib.twodim_base", + "lib.type_check", + "lib.ufunclike", + "lib.user_array", # note: not in np.lib, but probably should just be deleted + "lib.utils", + "linalg.lapack_lite", + "linalg.linalg", + "ma.bench", + "ma.core", + "ma.testutils", + "ma.timer_comparison", + "matrixlib", + "matrixlib.defmatrix", + "polynomial.polyutils", + "random.mtrand", + "random.bit_generator", + "testing.print_coercion_tables", + "testing.utils", +]] + + +def is_unexpected(name): + """Check if this needs to be considered.""" + if '._' in name or '.tests' in name or '.setup' in name: + return False + + if name in PUBLIC_MODULES: + return False + + if name in PUBLIC_ALIASED_MODULES: + return False + + if name in PRIVATE_BUT_PRESENT_MODULES: + return False + + return True + + +# These are present in a directory with an __init__.py but cannot be imported +# code_generators/ isn't installed, but present for an inplace build +SKIP_LIST = [ + "numpy.core.code_generators", + "numpy.core.code_generators.genapi", + "numpy.core.code_generators.generate_umath", + "numpy.core.code_generators.ufunc_docstrings", + "numpy.core.code_generators.generate_numpy_api", + "numpy.core.code_generators.generate_ufunc_api", + "numpy.core.code_generators.numpy_api", + "numpy.core.cversions", + "numpy.core.generate_numpy_api", + "numpy.distutils.msvc9compiler", +] + + +def test_all_modules_are_expected(): + """ + Test that we don't add anything that looks like a new public module by + accident. Check is based on filenames. + """ + + modnames = [] + for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, + prefix=np.__name__ + '.', + onerror=None): + if is_unexpected(modname) and modname not in SKIP_LIST: + # We have a name that is new. If that's on purpose, add it to + # PUBLIC_MODULES. We don't expect to have to add anything to + # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! + modnames.append(modname) + + if modnames: + raise AssertionError(f'Found unexpected modules: {modnames}') + + +# Stuff that clearly shouldn't be in the API and is detected by the next test +# below +SKIP_LIST_2 = [ + 'numpy.math', + 'numpy.distutils.log.sys', + 'numpy.doc.constants.re', + 'numpy.doc.constants.textwrap', + 'numpy.lib.emath', + 'numpy.lib.math', + 'numpy.matlib.char', + 'numpy.matlib.rec', + 'numpy.matlib.emath', + 'numpy.matlib.math', + 'numpy.matlib.linalg', + 'numpy.matlib.fft', + 'numpy.matlib.random', + 'numpy.matlib.ctypeslib', + 'numpy.matlib.ma', +] + + +def test_all_modules_are_expected_2(): + """ + Method checking all objects. The pkgutil-based method in + `test_all_modules_are_expected` does not catch imports into a namespace, + only filenames. So this test is more thorough, and checks this like: + + import .lib.scimath as emath + + To check if something in a module is (effectively) public, one can check if + there's anything in that namespace that's a public function/object but is + not exposed in a higher-level namespace. For example for a `numpy.lib` + submodule:: + + mod = np.lib.mixins + for obj in mod.__all__: + if obj in np.__all__: + continue + elif obj in np.lib.__all__: + continue + + else: + print(obj) + + """ + + def find_unexpected_members(mod_name): + members = [] + module = importlib.import_module(mod_name) + if hasattr(module, '__all__'): + objnames = module.__all__ + else: + objnames = dir(module) + + for objname in objnames: + if not objname.startswith('_'): + fullobjname = mod_name + '.' + objname + if isinstance(getattr(module, objname), types.ModuleType): + if is_unexpected(fullobjname): + if fullobjname not in SKIP_LIST_2: + members.append(fullobjname) + + return members + + unexpected_members = find_unexpected_members("numpy") + for modname in PUBLIC_MODULES: + unexpected_members.extend(find_unexpected_members(modname)) + + if unexpected_members: + raise AssertionError("Found unexpected object(s) that look like " + "modules: {}".format(unexpected_members)) + + +def test_api_importable(): + """ + Check that all submodules listed higher up in this file can be imported + + Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may + simply need to be removed from the list (deprecation may or may not be + needed - apply common sense). + """ + def check_importable(module_name): + try: + importlib.import_module(module_name) + except (ImportError, AttributeError): + return False + + return True + + module_names = [] + for module_name in PUBLIC_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that cannot be " + "imported: {}".format(module_names)) + + for module_name in PUBLIC_ALIASED_MODULES: + try: + eval(module_name) + except AttributeError: + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that were not " + "found: {}".format(module_names)) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', category=DeprecationWarning) + warnings.filterwarnings('always', category=ImportWarning) + for module_name in PRIVATE_BUT_PRESENT_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules that are not really public but looked " + "public and can not be imported: " + "{}".format(module_names)) diff --git a/MLPY/Lib/site-packages/numpy/tests/test_reloading.py b/MLPY/Lib/site-packages/numpy/tests/test_reloading.py new file mode 100644 index 0000000000000000000000000000000000000000..e795a2cadb4e3e980cb60463fe3e5c66b89ac5e1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/tests/test_reloading.py @@ -0,0 +1,61 @@ +from numpy.testing import assert_raises, assert_warns, assert_, assert_equal +from numpy.compat import pickle + +import sys +import subprocess +import textwrap +from importlib import reload + + +def test_numpy_reloading(): + # gh-7844. Also check that relevant globals retain their identity. + import numpy as np + import numpy._globals + + _NoValue = np._NoValue + VisibleDeprecationWarning = np.VisibleDeprecationWarning + ModuleDeprecationWarning = np.ModuleDeprecationWarning + + with assert_warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) + + assert_raises(RuntimeError, reload, numpy._globals) + with assert_warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) + +def test_novalue(): + import numpy as np + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_equal(repr(np._NoValue), '') + assert_(pickle.loads(pickle.dumps(np._NoValue, + protocol=proto)) is np._NoValue) + + +def test_full_reimport(): + """At the time of writing this, it is *not* truly supported, but + apparently enough users rely on it, for it to be an annoying change + when it started failing previously. + """ + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from pytest import warns + import numpy as np + + for k in list(sys.modules.keys()): + if "numpy" in k: + del sys.modules[k] + + with warns(UserWarning): + import numpy as np + """) + p = subprocess.run([sys.executable, '-c', code]) + diff --git a/MLPY/Lib/site-packages/numpy/tests/test_scripts.py b/MLPY/Lib/site-packages/numpy/tests/test_scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..3d4605d33fe8350c3c85c1fb04eb8096ce116d7f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/tests/test_scripts.py @@ -0,0 +1,46 @@ +""" Test scripts + +Test that we can run executable scripts that have been installed with numpy. +""" +import sys +import os +import pytest +from os.path import join as pathjoin, isfile, dirname +import subprocess + +import numpy as np +from numpy.testing import assert_equal + +is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) + + +def find_f2py_commands(): + if sys.platform == 'win32': + exe_dir = dirname(sys.executable) + if exe_dir.endswith('Scripts'): # virtualenv + return [os.path.join(exe_dir, 'f2py')] + else: + return [os.path.join(exe_dir, "Scripts", 'f2py')] + else: + # Three scripts are installed in Unix-like systems: + # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example, + # if installed with python3.7 the scripts would be named + # 'f2py', 'f2py3', and 'f2py3.7'. + version = sys.version_info + major = str(version.major) + minor = str(version.minor) + return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] + + +@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") +@pytest.mark.xfail(reason="Test is unreliable") +@pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) +def test_f2py(f2py_cmd): + # test that we can run f2py script + stdout = subprocess.check_output([f2py_cmd, '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) + + +def test_pep338(): + stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) diff --git a/MLPY/Lib/site-packages/numpy/tests/test_warnings.py b/MLPY/Lib/site-packages/numpy/tests/test_warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..cef464cae198459bc37c56e8a1c1554916e6a42a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/tests/test_warnings.py @@ -0,0 +1,74 @@ +""" +Tests which scan for certain occurrences in the code, they may not find +all of these occurrences but should catch almost all. +""" +import pytest + +from pathlib import Path +import ast +import tokenize +import numpy + +class ParseCall(ast.NodeVisitor): + def __init__(self): + self.ls = [] + + def visit_Attribute(self, node): + ast.NodeVisitor.generic_visit(self, node) + self.ls.append(node.attr) + + def visit_Name(self, node): + self.ls.append(node.id) + + +class FindFuncs(ast.NodeVisitor): + def __init__(self, filename): + super().__init__() + self.__filename = filename + + def visit_Call(self, node): + p = ParseCall() + p.visit(node.func) + ast.NodeVisitor.generic_visit(self, node) + + if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': + if node.args[0].s == "ignore": + raise AssertionError( + "warnings should have an appropriate stacklevel; found in " + "{} on line {}".format(self.__filename, node.lineno)) + + if p.ls[-1] == 'warn' and ( + len(p.ls) == 1 or p.ls[-2] == 'warnings'): + + if "testing/tests/test_warnings.py" == self.__filename: + # This file + return + + # See if stacklevel exists: + if len(node.args) == 3: + return + args = {kw.arg for kw in node.keywords} + if "stacklevel" in args: + return + raise AssertionError( + "warnings should have an appropriate stacklevel; found in " + "{} on line {}".format(self.__filename, node.lineno)) + + +@pytest.mark.slow +def test_warning_calls(): + # combined "ignore" and stacklevel error + base = Path(numpy.__file__).parent + + for path in base.rglob("*.py"): + if base / "testing" in path.parents: + continue + if path == base / "__init__.py": + continue + if path == base / "random" / "__init__.py": + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g. LANG='C') + with tokenize.open(str(path)) as file: + tree = ast.parse(file.read()) + FindFuncs(path).visit(tree) diff --git a/MLPY/Lib/site-packages/numpy/typing/__init__.py b/MLPY/Lib/site-packages/numpy/typing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..48fcac6f2e1b6850bb22c99f41f35aa12ae4520e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/__init__.py @@ -0,0 +1,384 @@ +""" +============================ +Typing (:mod:`numpy.typing`) +============================ + +.. warning:: + + Some of the types in this module rely on features only present in + the standard library in Python 3.8 and greater. If you want to use + these types in earlier versions of Python, you should install the + typing-extensions_ package. + +Large parts of the NumPy API have PEP-484-style type annotations. In +addition a number of type aliases are available to users, most prominently +the two below: + +- `ArrayLike`: objects that can be converted to arrays +- `DTypeLike`: objects that can be converted to dtypes + +.. _typing-extensions: https://pypi.org/project/typing-extensions/ + +Mypy plugin +----------- + +A mypy_ plugin is distributed in `numpy.typing` for managing a number of +platform-specific annotations. Its function can be split into to parts: + +* Assigning the (platform-dependent) precisions of certain `~numpy.number` subclasses, + including the likes of `~numpy.int_`, `~numpy.intp` and `~numpy.longlong`. + See the documentation on :ref:`scalar types ` for a + comprehensive overview of the affected classes. without the plugin the precision + of all relevant classes will be inferred as `~typing.Any`. +* Removing all extended-precision `~numpy.number` subclasses that are unavailable + for the platform in question. Most notable this includes the likes of + `~numpy.float128` and `~numpy.complex256`. Without the plugin *all* + extended-precision types will, as far as mypy is concerned, be available + to all platforms. + +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + +.. _mypy: http://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html + +Differences from the runtime NumPy API +-------------------------------------- + +NumPy is very flexible. Trying to describe the full range of +possibilities statically would result in types that are not very +helpful. For that reason, the typed NumPy API is often stricter than +the runtime NumPy API. This section describes some notable +differences. + +ArrayLike +~~~~~~~~~ + +The `ArrayLike` type tries to avoid creating object arrays. For +example, + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) + array( at ...>, dtype=object) + +is valid NumPy code which will create a 0-dimensional object +array. Type checkers will complain about the above example when using +the NumPy types however. If you really intended to do the above, then +you can either use a ``# type: ignore`` comment: + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) # type: ignore + +or explicitly type the array like object as `~typing.Any`: + +.. code-block:: python + + >>> from typing import Any + >>> array_like: Any = (x**2 for x in range(10)) + >>> np.array(array_like) + array( at ...>, dtype=object) + +ndarray +~~~~~~~ + +It's possible to mutate the dtype of an array at runtime. For example, +the following code is valid: + +.. code-block:: python + + >>> x = np.array([1, 2]) + >>> x.dtype = np.bool_ + +This sort of mutation is not allowed by the types. Users who want to +write statically typed code should instead use the `numpy.ndarray.view` +method to create a view of the array with a different dtype. + +DTypeLike +~~~~~~~~~ + +The `DTypeLike` type tries to avoid creation of dtype objects using +dictionary of fields like below: + +.. code-block:: python + + >>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)}) + +Although this is valid NumPy code, the type checker will complain about it, +since its usage is discouraged. +Please see : :ref:`Data type objects ` + +Number precision +~~~~~~~~~~~~~~~~ + +The precision of `numpy.number` subclasses is treated as a covariant generic +parameter (see :class:`~NBitBase`), simplifying the annotating of processes +involving precision-based casting. + +.. code-block:: python + + >>> from typing import TypeVar + >>> import numpy as np + >>> import numpy.typing as npt + + >>> T = TypeVar("T", bound=npt.NBitBase) + >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": + ... ... + +Consequently, the likes of `~numpy.float16`, `~numpy.float32` and +`~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to +runtime, they're not necessarily considered as sub-classes. + +Timedelta64 +~~~~~~~~~~~ + +The `~numpy.timedelta64` class is not considered a subclass of `~numpy.signedinteger`, +the former only inheriting from `~numpy.generic` while static type checking. + +0D arrays +~~~~~~~~~ + +During runtime numpy aggressively casts any passed 0D arrays into their +corresponding `~numpy.generic` instance. Until the introduction of shape +typing (see :pep:`646`) it is unfortunately not possible to make the +necessary distinction between 0D and >0D arrays. While thus not strictly +correct, all operations are that can potentially perform a 0D-array -> scalar +cast are currently annotated as exclusively returning an `ndarray`. + +If it is known in advance that an operation _will_ perform a +0D-array -> scalar cast, then one can consider manually remedying the +situation with either `typing.cast` or a ``# type: ignore`` comment. + +API +--- + +""" +# NOTE: The API section will be appended with additional entries +# further down in this file + +from typing import TYPE_CHECKING, List, Any + +if TYPE_CHECKING: + import sys + if sys.version_info >= (3, 8): + from typing import final + else: + from typing_extensions import final +else: + def final(f): return f + +if not TYPE_CHECKING: + __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] +else: + # Ensure that all objects within this module are accessible while + # static type checking. This includes private ones, as we need them + # for internal use. + # + # Declare to mypy that `__all__` is a list of strings without assigning + # an explicit value + __all__: List[str] + + +@final # Dissallow the creation of arbitrary `NBitBase` subclasses +class NBitBase: + """ + An object representing `numpy.number` precision during static type checking. + + Used exclusively for the purpose static type checking, `NBitBase` + represents the base of a hierarchical set of subclasses. + Each subsequent subclass is herein used for representing a lower level + of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. + + Examples + -------- + Below is a typical usage example: `NBitBase` is herein used for annotating a + function that takes a float and integer of arbitrary precision as arguments + and returns a new float of whichever precision is largest + (*e.g.* ``np.float16 + np.int64 -> np.float64``). + + .. code-block:: python + + >>> from __future__ import annotations + >>> from typing import TypeVar, Union, TYPE_CHECKING + >>> import numpy as np + >>> import numpy.typing as npt + + >>> T1 = TypeVar("T1", bound=npt.NBitBase) + >>> T2 = TypeVar("T2", bound=npt.NBitBase) + + >>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[Union[T1, T2]]: + ... return a + b + + >>> a = np.float16() + >>> b = np.int64() + >>> out = add(a, b) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.floating[numpy.typing._16Bit*] + ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] + ... # note: out: numpy.floating[numpy.typing._64Bit*] + + """ + + def __init_subclass__(cls) -> None: + allowed_names = { + "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", + "_64Bit", "_32Bit", "_16Bit", "_8Bit", + } + if cls.__name__ not in allowed_names: + raise TypeError('cannot inherit from final class "NBitBase"') + super().__init_subclass__() + + +# Silence errors about subclassing a `@final`-decorated class +class _256Bit(NBitBase): ... # type: ignore[misc] +class _128Bit(_256Bit): ... # type: ignore[misc] +class _96Bit(_128Bit): ... # type: ignore[misc] +class _80Bit(_96Bit): ... # type: ignore[misc] +class _64Bit(_80Bit): ... # type: ignore[misc] +class _32Bit(_64Bit): ... # type: ignore[misc] +class _16Bit(_32Bit): ... # type: ignore[misc] +class _8Bit(_16Bit): ... # type: ignore[misc] + +from ._nbit import ( + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitInt, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, +) +from ._char_codes import ( + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, +) +from ._scalars import ( + _CharLike_co, + _BoolLike_co, + _UIntLike_co, + _IntLike_co, + _FloatLike_co, + _ComplexLike_co, + _TD64Like_co, + _NumberLike_co, + _ScalarLike_co, + _VoidLike_co, +) +from ._shape import _Shape, _ShapeLike +from ._dtype_like import ( + DTypeLike as DTypeLike, + _SupportsDType, + _VoidDTypeLike, + _DTypeLikeBool, + _DTypeLikeUInt, + _DTypeLikeInt, + _DTypeLikeFloat, + _DTypeLikeComplex, + _DTypeLikeTD64, + _DTypeLikeDT64, + _DTypeLikeObject, + _DTypeLikeVoid, + _DTypeLikeStr, + _DTypeLikeBytes, + _DTypeLikeComplex_co, +) +from ._array_like import ( + ArrayLike as ArrayLike, + _ArrayLike, + _NestedSequence, + _RecursiveSequence, + _SupportsArray, + _ArrayLikeInt, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + _ArrayLikeObject_co, + _ArrayLikeVoid_co, + _ArrayLikeStr_co, + _ArrayLikeBytes_co, +) +from ._generic_alias import ( + NDArray as NDArray, + _GenericAlias, +) + +if TYPE_CHECKING: + from ._ufunc import ( + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, + ) +else: + _UFunc_Nin1_Nout1 = Any + _UFunc_Nin2_Nout1 = Any + _UFunc_Nin1_Nout2 = Any + _UFunc_Nin2_Nout2 = Any + _GUFunc_Nin2_Nout1 = Any + +# Clean up the namespace +del TYPE_CHECKING, final, List, Any + +if __doc__ is not None: + from ._add_docstring import _docstrings + __doc__ += _docstrings + __doc__ += '\n.. autoclass:: numpy.typing.NBitBase\n' + del _docstrings + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef38a8ebbbac3ff17d70aa2e90ce542446ae4a00 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_add_docstring.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_add_docstring.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..026fb586270e27542f5f2a539bcc52b4c86c54df Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_add_docstring.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_array_like.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_array_like.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..112933e39b7378f6477221bed53b17a7659a449a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_array_like.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_callable.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_callable.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f846913f8cf5a306bcedd1abb32bf6ad24c62ed3 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_callable.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_char_codes.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_char_codes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ae7bdacf060fac1759b7c4f628444a72e9b81c8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_char_codes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_dtype_like.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_dtype_like.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf05e4e789ce785cbf3dbd6bef7b9793d0aeba89 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_dtype_like.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_extended_precision.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_extended_precision.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e62ebee289dffda7ba32a017302cd34da4b7e98 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_extended_precision.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_generic_alias.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_generic_alias.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b04bbb4a8de4a92387ed3cdd0dd019f64784072 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_generic_alias.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_nbit.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_nbit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00eb5b964ed364c7d91e7b424d3fe59961ca712e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_nbit.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_scalars.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_scalars.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0318ed7c2777992ce4f91bb3c487de0258c276fc Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_scalars.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/_shape.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_shape.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c7a43337e632bbd4a34dd40b44faa34e7e4463c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/_shape.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/mypy_plugin.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/mypy_plugin.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78ea1ea66fbcaa79d44f3da289b7164f38bcaa06 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/mypy_plugin.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/__pycache__/setup.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/__pycache__/setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a6feb23debae88150d731441acbb44aee188bb7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/__pycache__/setup.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/_add_docstring.py b/MLPY/Lib/site-packages/numpy/typing/_add_docstring.py new file mode 100644 index 0000000000000000000000000000000000000000..aaac97c234cfb79a4fce2c5fad9ade4182d908a9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_add_docstring.py @@ -0,0 +1,143 @@ +"""A module for creating docstrings for sphinx ``data`` domains.""" + +import re +import textwrap + +from ._generic_alias import NDArray + +_docstrings_list = [] + + +def add_newdoc(name: str, value: str, doc: str) -> None: + """Append ``_docstrings_list`` with a docstring for `name`. + + Parameters + ---------- + name : str + The name of the object. + value : str + A string-representation of the object. + doc : str + The docstring of the object. + + """ + _docstrings_list.append((name, value, doc)) + + +def _parse_docstrings() -> str: + """Convert all docstrings in ``_docstrings_list`` into a single + sphinx-legible text block. + + """ + type_list_ret = [] + for name, value, doc in _docstrings_list: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + indent = "" + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + if prev == "Examples": + indent = "" + new_lines.append(f'{m.group(1)}.. rubric:: {prev}') + else: + indent = 4 * " " + new_lines.append(f'{m.group(1)}.. admonition:: {prev}') + new_lines.append("") + else: + new_lines.append(f"{indent}{line}") + s = "\n".join(new_lines) + + # Done. + type_list_ret.append(f""".. data:: {name}\n :value: {value}\n {s}""") + return "\n".join(type_list_ret) + + +add_newdoc('ArrayLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced into an `~numpy.ndarray`. + + Among others this includes the likes of: + + * Scalars. + * (Nested) sequences. + * Objects implementing the `~class.__array__` protocol. + + See Also + -------- + :term:`array_like`: + Any scalar or sequence that can be interpreted as an ndarray. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_array(a: npt.ArrayLike) -> np.ndarray: + ... return np.array(a) + + """) + +add_newdoc('DTypeLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced into a `~numpy.dtype`. + + Among others this includes the likes of: + + * :class:`type` objects. + * Character codes or the names of :class:`type` objects. + * Objects with the ``.dtype`` attribute. + + See Also + -------- + :ref:`Specifying and constructing data types ` + A comprehensive overview of all objects that can be coerced into data types. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_dtype(d: npt.DTypeLike) -> np.dtype: + ... return np.dtype(d) + + """) + +add_newdoc('NDArray', repr(NDArray), + """ + A :term:`generic ` version of + `np.ndarray[Any, np.dtype[+ScalarType]] `. + + Can be used during runtime for typing arrays with a given dtype + and unspecified shape. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) + + """) + +_docstrings = _parse_docstrings() diff --git a/MLPY/Lib/site-packages/numpy/typing/_array_like.py b/MLPY/Lib/site-packages/numpy/typing/_array_like.py new file mode 100644 index 0000000000000000000000000000000000000000..f2513c6e172b9b8490da96aa41027336c415cb5e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_array_like.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +import sys +from typing import ( + Any, + overload, + Sequence, + TYPE_CHECKING, + Union, + TypeVar, + Generic, +) + +from numpy import ( + ndarray, + dtype, + generic, + bool_, + unsignedinteger, + integer, + floating, + complexfloating, + number, + timedelta64, + datetime64, + object_, + void, + str_, + bytes_, +) +from ._dtype_like import DTypeLike + +if sys.version_info >= (3, 8): + from typing import Protocol + HAVE_PROTOCOL = True +else: + try: + from typing_extensions import Protocol + except ImportError: + HAVE_PROTOCOL = False + else: + HAVE_PROTOCOL = True + +_T = TypeVar("_T") +_ScalarType = TypeVar("_ScalarType", bound=generic) +_DType = TypeVar("_DType", bound="dtype[Any]") +_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]") + +if TYPE_CHECKING or HAVE_PROTOCOL: + # The `_SupportsArray` protocol only cares about the default dtype + # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned + # array. + # Concrete implementations of the protocol are responsible for adding + # any and all remaining overloads + class _SupportsArray(Protocol[_DType_co]): + def __array__(self) -> ndarray[Any, _DType_co]: ... +else: + class _SupportsArray(Generic[_DType_co]): + pass + +# TODO: Wait for support for recursive types +_NestedSequence = Union[ + _T, + Sequence[_T], + Sequence[Sequence[_T]], + Sequence[Sequence[Sequence[_T]]], + Sequence[Sequence[Sequence[Sequence[_T]]]], +] +_RecursiveSequence = Sequence[Sequence[Sequence[Sequence[Sequence[Any]]]]] + +# A union representing array-like objects; consists of two typevars: +# One representing types that can be parametrized w.r.t. `np.dtype` +# and another one for the rest +_ArrayLike = Union[ + _NestedSequence[_SupportsArray[_DType]], + _NestedSequence[_T], +] + +# TODO: support buffer protocols once +# +# https://bugs.python.org/issue27501 +# +# is resolved. See also the mypy issue: +# +# https://github.com/python/typing/issues/593 +ArrayLike = Union[ + _RecursiveSequence, + _ArrayLike[ + dtype, + Union[bool, int, float, complex, str, bytes] + ], +] + +# `ArrayLike_co`: array-like objects that can be coerced into `X` +# given the casting rules `same_kind` +_ArrayLikeBool_co = _ArrayLike[ + "dtype[bool_]", + bool, +] +_ArrayLikeUInt_co = _ArrayLike[ + "dtype[Union[bool_, unsignedinteger[Any]]]", + bool, +] +_ArrayLikeInt_co = _ArrayLike[ + "dtype[Union[bool_, integer[Any]]]", + Union[bool, int], +] +_ArrayLikeFloat_co = _ArrayLike[ + "dtype[Union[bool_, integer[Any], floating[Any]]]", + Union[bool, int, float], +] +_ArrayLikeComplex_co = _ArrayLike[ + "dtype[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]", + Union[bool, int, float, complex], +] +_ArrayLikeNumber_co = _ArrayLike[ + "dtype[Union[bool_, number[Any]]]", + Union[bool, int, float, complex], +] +_ArrayLikeTD64_co = _ArrayLike[ + "dtype[Union[bool_, integer[Any], timedelta64]]", + Union[bool, int], +] +_ArrayLikeDT64_co = _NestedSequence[_SupportsArray["dtype[datetime64]"]] +_ArrayLikeObject_co = _NestedSequence[_SupportsArray["dtype[object_]"]] + +_ArrayLikeVoid_co = _NestedSequence[_SupportsArray["dtype[void]"]] +_ArrayLikeStr_co = _ArrayLike[ + "dtype[str_]", + str, +] +_ArrayLikeBytes_co = _ArrayLike[ + "dtype[bytes_]", + bytes, +] + +_ArrayLikeInt = _ArrayLike[ + "dtype[integer[Any]]", + int, +] diff --git a/MLPY/Lib/site-packages/numpy/typing/_callable.py b/MLPY/Lib/site-packages/numpy/typing/_callable.py new file mode 100644 index 0000000000000000000000000000000000000000..50a9b66882d7237cd28d6f30785a6fc713f82fbe --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_callable.py @@ -0,0 +1,364 @@ +""" +A module with various ``typing.Protocol`` subclasses that implement +the ``__call__`` magic method. + +See the `Mypy documentation`_ on protocols for more details. + +.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols + +""" + +from __future__ import annotations + +import sys +from typing import ( + Union, + TypeVar, + overload, + Any, + Tuple, + NoReturn, + TYPE_CHECKING, +) + +from numpy import ( + ndarray, + dtype, + generic, + bool_, + timedelta64, + number, + integer, + unsignedinteger, + signedinteger, + int8, + int_, + floating, + float64, + complexfloating, + complex128, +) +from ._nbit import _NBitInt, _NBitDouble +from ._scalars import ( + _BoolLike_co, + _IntLike_co, + _FloatLike_co, + _ComplexLike_co, + _NumberLike_co, +) +from . import NBitBase +from ._array_like import ArrayLike +from ._generic_alias import NDArray + +if sys.version_info >= (3, 8): + from typing import Protocol + HAVE_PROTOCOL = True +else: + try: + from typing_extensions import Protocol + except ImportError: + HAVE_PROTOCOL = False + else: + HAVE_PROTOCOL = True + +if TYPE_CHECKING or HAVE_PROTOCOL: + _T1 = TypeVar("_T1") + _T2 = TypeVar("_T2") + _2Tuple = Tuple[_T1, _T1] + + _NBit1 = TypeVar("_NBit1", bound=NBitBase) + _NBit2 = TypeVar("_NBit2", bound=NBitBase) + + _IntType = TypeVar("_IntType", bound=integer) + _FloatType = TypeVar("_FloatType", bound=floating) + _NumberType = TypeVar("_NumberType", bound=number) + _NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number) + _GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) + + class _BoolOp(Protocol[_GenericType_co]): + @overload + def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, __other: int) -> int_: ... + @overload + def __call__(self, __other: float) -> float64: ... + @overload + def __call__(self, __other: complex) -> complex128: ... + @overload + def __call__(self, __other: _NumberType) -> _NumberType: ... + + class _BoolBitOp(Protocol[_GenericType_co]): + @overload + def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, __other: int) -> int_: ... + @overload + def __call__(self, __other: _IntType) -> _IntType: ... + + class _BoolSub(Protocol): + # Note that `__other: bool_` is absent here + @overload + def __call__(self, __other: bool) -> NoReturn: ... + @overload # platform dependent + def __call__(self, __other: int) -> int_: ... + @overload + def __call__(self, __other: float) -> float64: ... + @overload + def __call__(self, __other: complex) -> complex128: ... + @overload + def __call__(self, __other: _NumberType) -> _NumberType: ... + + class _BoolTrueDiv(Protocol): + @overload + def __call__(self, __other: Union[float, _IntLike_co]) -> float64: ... + @overload + def __call__(self, __other: complex) -> complex128: ... + @overload + def __call__(self, __other: _NumberType) -> _NumberType: ... + + class _BoolMod(Protocol): + @overload + def __call__(self, __other: _BoolLike_co) -> int8: ... + @overload # platform dependent + def __call__(self, __other: int) -> int_: ... + @overload + def __call__(self, __other: float) -> float64: ... + @overload + def __call__(self, __other: _IntType) -> _IntType: ... + @overload + def __call__(self, __other: _FloatType) -> _FloatType: ... + + class _BoolDivMod(Protocol): + @overload + def __call__(self, __other: _BoolLike_co) -> _2Tuple[int8]: ... + @overload # platform dependent + def __call__(self, __other: int) -> _2Tuple[int_]: ... + @overload + def __call__(self, __other: float) -> _2Tuple[floating[Union[_NBit1, _NBitDouble]]]: ... + @overload + def __call__(self, __other: _IntType) -> _2Tuple[_IntType]: ... + @overload + def __call__(self, __other: _FloatType) -> _2Tuple[_FloatType]: ... + + class _TD64Div(Protocol[_NumberType_co]): + @overload + def __call__(self, __other: timedelta64) -> _NumberType_co: ... + @overload + def __call__(self, __other: _BoolLike_co) -> NoReturn: ... + @overload + def __call__(self, __other: _FloatLike_co) -> timedelta64: ... + + class _IntTrueDiv(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> floating[_NBit1]: ... + @overload + def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ... + @overload + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: complex + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__(self, __other: integer[_NBit2]) -> floating[Union[_NBit1, _NBit2]]: ... + + class _UnsignedIntOp(Protocol[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + @overload + def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ... + @overload + def __call__( + self, __other: Union[int, signedinteger[Any]] + ) -> Any: ... + @overload + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: complex + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: unsignedinteger[_NBit2] + ) -> unsignedinteger[Union[_NBit1, _NBit2]]: ... + + class _UnsignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ... + @overload + def __call__(self, __other: int) -> signedinteger[Any]: ... + @overload + def __call__(self, __other: signedinteger[Any]) -> signedinteger[Any]: ... + @overload + def __call__( + self, __other: unsignedinteger[_NBit2] + ) -> unsignedinteger[Union[_NBit1, _NBit2]]: ... + + class _UnsignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ... + @overload + def __call__( + self, __other: Union[int, signedinteger[Any]] + ) -> Any: ... + @overload + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: unsignedinteger[_NBit2] + ) -> unsignedinteger[Union[_NBit1, _NBit2]]: ... + + class _UnsignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__( + self, __other: Union[int, signedinteger[Any]] + ) -> _2Tuple[Any]: ... + @overload + def __call__(self, __other: float) -> _2Tuple[floating[Union[_NBit1, _NBitDouble]]]: ... + @overload + def __call__( + self, __other: unsignedinteger[_NBit2] + ) -> _2Tuple[unsignedinteger[Union[_NBit1, _NBit2]]]: ... + + class _SignedIntOp(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ... + @overload + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: complex + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: signedinteger[_NBit2] + ) -> signedinteger[Union[_NBit1, _NBit2]]: ... + + class _SignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ... + @overload + def __call__( + self, __other: signedinteger[_NBit2] + ) -> signedinteger[Union[_NBit1, _NBit2]]: ... + + class _SignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, __other: int) -> signedinteger[Union[_NBit1, _NBitInt]]: ... + @overload + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: signedinteger[_NBit2] + ) -> signedinteger[Union[_NBit1, _NBit2]]: ... + + class _SignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__(self, __other: int) -> _2Tuple[signedinteger[Union[_NBit1, _NBitInt]]]: ... + @overload + def __call__(self, __other: float) -> _2Tuple[floating[Union[_NBit1, _NBitDouble]]]: ... + @overload + def __call__( + self, __other: signedinteger[_NBit2] + ) -> _2Tuple[signedinteger[Union[_NBit1, _NBit2]]]: ... + + class _FloatOp(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> floating[_NBit1]: ... + @overload + def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ... + @overload + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: complex + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: Union[integer[_NBit2], floating[_NBit2]] + ) -> floating[Union[_NBit1, _NBit2]]: ... + + class _FloatMod(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> floating[_NBit1]: ... + @overload + def __call__(self, __other: int) -> floating[Union[_NBit1, _NBitInt]]: ... + @overload + def __call__(self, __other: float) -> floating[Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, __other: Union[integer[_NBit2], floating[_NBit2]] + ) -> floating[Union[_NBit1, _NBit2]]: ... + + class _FloatDivMod(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> _2Tuple[floating[_NBit1]]: ... + @overload + def __call__(self, __other: int) -> _2Tuple[floating[Union[_NBit1, _NBitInt]]]: ... + @overload + def __call__(self, __other: float) -> _2Tuple[floating[Union[_NBit1, _NBitDouble]]]: ... + @overload + def __call__( + self, __other: Union[integer[_NBit2], floating[_NBit2]] + ) -> _2Tuple[floating[Union[_NBit1, _NBit2]]]: ... + + class _ComplexOp(Protocol[_NBit1]): + @overload + def __call__(self, __other: bool) -> complexfloating[_NBit1, _NBit1]: ... + @overload + def __call__(self, __other: int) -> complexfloating[Union[_NBit1, _NBitInt], Union[_NBit1, _NBitInt]]: ... + @overload + def __call__( + self, __other: Union[float, complex] + ) -> complexfloating[Union[_NBit1, _NBitDouble], Union[_NBit1, _NBitDouble]]: ... + @overload + def __call__( + self, + __other: Union[ + integer[_NBit2], + floating[_NBit2], + complexfloating[_NBit2, _NBit2], + ] + ) -> complexfloating[Union[_NBit1, _NBit2], Union[_NBit1, _NBit2]]: ... + + class _NumberOp(Protocol): + def __call__(self, __other: _NumberLike_co) -> Any: ... + + class _ComparisonOp(Protocol[_T1, _T2]): + @overload + def __call__(self, __other: _T1) -> bool_: ... + @overload + def __call__(self, __other: _T2) -> NDArray[bool_]: ... + +else: + _BoolOp = Any + _BoolBitOp = Any + _BoolSub = Any + _BoolTrueDiv = Any + _BoolMod = Any + _BoolDivMod = Any + _TD64Div = Any + _IntTrueDiv = Any + _UnsignedIntOp = Any + _UnsignedIntBitOp = Any + _UnsignedIntMod = Any + _UnsignedIntDivMod = Any + _SignedIntOp = Any + _SignedIntBitOp = Any + _SignedIntMod = Any + _SignedIntDivMod = Any + _FloatOp = Any + _FloatMod = Any + _FloatDivMod = Any + _ComplexOp = Any + _NumberOp = Any + _ComparisonOp = Any diff --git a/MLPY/Lib/site-packages/numpy/typing/_char_codes.py b/MLPY/Lib/site-packages/numpy/typing/_char_codes.py new file mode 100644 index 0000000000000000000000000000000000000000..1529f01af4ba9b07cd10f0d6a4c9080ac918edcd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_char_codes.py @@ -0,0 +1,175 @@ +import sys +from typing import Any, TYPE_CHECKING + +if sys.version_info >= (3, 8): + from typing import Literal + HAVE_LITERAL = True +else: + try: + from typing_extensions import Literal + except ImportError: + HAVE_LITERAL = False + else: + HAVE_LITERAL = True + +if TYPE_CHECKING or HAVE_LITERAL: + _BoolCodes = Literal["?", "=?", "?", "bool", "bool_", "bool8"] + + _UInt8Codes = Literal["uint8", "u1", "=u1", "u1"] + _UInt16Codes = Literal["uint16", "u2", "=u2", "u2"] + _UInt32Codes = Literal["uint32", "u4", "=u4", "u4"] + _UInt64Codes = Literal["uint64", "u8", "=u8", "u8"] + + _Int8Codes = Literal["int8", "i1", "=i1", "i1"] + _Int16Codes = Literal["int16", "i2", "=i2", "i2"] + _Int32Codes = Literal["int32", "i4", "=i4", "i4"] + _Int64Codes = Literal["int64", "i8", "=i8", "i8"] + + _Float16Codes = Literal["float16", "f2", "=f2", "f2"] + _Float32Codes = Literal["float32", "f4", "=f4", "f4"] + _Float64Codes = Literal["float64", "f8", "=f8", "f8"] + + _Complex64Codes = Literal["complex64", "c8", "=c8", "c8"] + _Complex128Codes = Literal["complex128", "c16", "=c16", "c16"] + + _ByteCodes = Literal["byte", "b", "=b", "b"] + _ShortCodes = Literal["short", "h", "=h", "h"] + _IntCCodes = Literal["intc", "i", "=i", "i"] + _IntPCodes = Literal["intp", "int0", "p", "=p", "p"] + _IntCodes = Literal["long", "int", "int_", "l", "=l", "l"] + _LongLongCodes = Literal["longlong", "q", "=q", "q"] + + _UByteCodes = Literal["ubyte", "B", "=B", "B"] + _UShortCodes = Literal["ushort", "H", "=H", "H"] + _UIntCCodes = Literal["uintc", "I", "=I", "I"] + _UIntPCodes = Literal["uintp", "uint0", "P", "=P", "P"] + _UIntCodes = Literal["uint", "L", "=L", "L"] + _ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"] + + _HalfCodes = Literal["half", "e", "=e", "e"] + _SingleCodes = Literal["single", "f", "=f", "f"] + _DoubleCodes = Literal["double", "float", "float_", "d", "=d", "d"] + _LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "g"] + + _CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "F"] + _CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "D"] + _CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "G"] + + _StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "U"] + _BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "S"] + _VoidCodes = Literal["void", "void0", "V", "=V", "V"] + _ObjectCodes = Literal["object", "object_", "O", "=O", "O"] + + _DT64Codes = Literal[ + "datetime64", "=datetime64", "datetime64", + "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]", + "datetime64[M]", "=datetime64[M]", "datetime64[M]", + "datetime64[W]", "=datetime64[W]", "datetime64[W]", + "datetime64[D]", "=datetime64[D]", "datetime64[D]", + "datetime64[h]", "=datetime64[h]", "datetime64[h]", + "datetime64[m]", "=datetime64[m]", "datetime64[m]", + "datetime64[s]", "=datetime64[s]", "datetime64[s]", + "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]", + "datetime64[us]", "=datetime64[us]", "datetime64[us]", + "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]", + "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]", + "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]", + "datetime64[as]", "=datetime64[as]", "datetime64[as]", + "M", "=M", "M", + "M8", "=M8", "M8", + "M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "=M8[as]", "M8[as]", + ] + _TD64Codes = Literal[ + "timedelta64", "=timedelta64", "timedelta64", + "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]", + "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]", + "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]", + "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]", + "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]", + "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]", + "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]", + "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]", + "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]", + "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]", + "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]", + "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]", + "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]", + "m", "=m", "m", + "m8", "=m8", "m8", + "m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "=m8[as]", "m8[as]", + ] + +else: + _BoolCodes = Any + + _UInt8Codes = Any + _UInt16Codes = Any + _UInt32Codes = Any + _UInt64Codes = Any + + _Int8Codes = Any + _Int16Codes = Any + _Int32Codes = Any + _Int64Codes = Any + + _Float16Codes = Any + _Float32Codes = Any + _Float64Codes = Any + + _Complex64Codes = Any + _Complex128Codes = Any + + _ByteCodes = Any + _ShortCodes = Any + _IntCCodes = Any + _IntPCodes = Any + _IntCodes = Any + _LongLongCodes = Any + + _UByteCodes = Any + _UShortCodes = Any + _UIntCCodes = Any + _UIntPCodes = Any + _UIntCodes = Any + _ULongLongCodes = Any + + _HalfCodes = Any + _SingleCodes = Any + _DoubleCodes = Any + _LongDoubleCodes = Any + + _CSingleCodes = Any + _CDoubleCodes = Any + _CLongDoubleCodes = Any + + _StrCodes = Any + _BytesCodes = Any + _VoidCodes = Any + _ObjectCodes = Any + + _DT64Codes = Any + _TD64Codes = Any diff --git a/MLPY/Lib/site-packages/numpy/typing/_dtype_like.py b/MLPY/Lib/site-packages/numpy/typing/_dtype_like.py new file mode 100644 index 0000000000000000000000000000000000000000..e4e355649f9ce6698a9a1d002dd4f3379cfa6af1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_dtype_like.py @@ -0,0 +1,251 @@ +import sys +from typing import ( + Any, + List, + Sequence, + Tuple, + Union, + Type, + TypeVar, + Generic, + TYPE_CHECKING, +) + +import numpy as np +from ._shape import _ShapeLike + +if sys.version_info >= (3, 8): + from typing import Protocol, TypedDict + HAVE_PROTOCOL = True +else: + try: + from typing_extensions import Protocol, TypedDict + except ImportError: + HAVE_PROTOCOL = False + else: + HAVE_PROTOCOL = True + +from ._char_codes import ( + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, +) + +_DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype) + +_DTypeLikeNested = Any # TODO: wait for support for recursive types + +if TYPE_CHECKING or HAVE_PROTOCOL: + # Mandatory keys + class _DTypeDictBase(TypedDict): + names: Sequence[str] + formats: Sequence[_DTypeLikeNested] + + # Mandatory + optional keys + class _DTypeDict(_DTypeDictBase, total=False): + offsets: Sequence[int] + titles: Sequence[Any] # Only `str` elements are usable as indexing aliases, but all objects are legal + itemsize: int + aligned: bool + + + # A protocol for anything with the dtype attribute + class _SupportsDType(Protocol[_DType_co]): + @property + def dtype(self) -> _DType_co: ... + +else: + _DTypeDict = Any + + class _SupportsDType(Generic[_DType_co]): + pass + + +# Would create a dtype[np.void] +_VoidDTypeLike = Union[ + # (flexible_dtype, itemsize) + Tuple[_DTypeLikeNested, int], + # (fixed_dtype, shape) + Tuple[_DTypeLikeNested, _ShapeLike], + # [(field_name, field_dtype, field_shape), ...] + # + # The type here is quite broad because NumPy accepts quite a wide + # range of inputs inside the list; see the tests for some + # examples. + List[Any], + # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., + # 'itemsize': ...} + _DTypeDict, + # (base_dtype, new_dtype) + Tuple[_DTypeLikeNested, _DTypeLikeNested], +] + +# Anything that can be coerced into numpy.dtype. +# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html +DTypeLike = Union[ + np.dtype, + # default data type (float64) + None, + # array-scalar types and generic types + type, # TODO: enumerate these when we add type hints for numpy scalars + # anything with a dtype attribute + _SupportsDType[np.dtype], + # character codes, type strings or comma-separated fields, e.g., 'float64' + str, + _VoidDTypeLike, +] + +# NOTE: while it is possible to provide the dtype as a dict of +# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), +# this syntax is officially discourged and +# therefore not included in the Union defining `DTypeLike`. +# +# See https://github.com/numpy/numpy/issues/16891 for more details. + +# Aliases for commonly used dtype-like objects. +# Note that the precision of `np.number` subclasses is ignored herein. +_DTypeLikeBool = Union[ + Type[bool], + Type[np.bool_], + "np.dtype[np.bool_]", + "_SupportsDType[np.dtype[np.bool_]]", + _BoolCodes, +] +_DTypeLikeUInt = Union[ + Type[np.unsignedinteger], + "np.dtype[np.unsignedinteger]", + "_SupportsDType[np.dtype[np.unsignedinteger]]", + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, +] +_DTypeLikeInt = Union[ + Type[int], + Type[np.signedinteger], + "np.dtype[np.signedinteger]", + "_SupportsDType[np.dtype[np.signedinteger]]", + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, +] +_DTypeLikeFloat = Union[ + Type[float], + Type[np.floating], + "np.dtype[np.floating]", + "_SupportsDType[np.dtype[np.floating]]", + _Float16Codes, + _Float32Codes, + _Float64Codes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, +] +_DTypeLikeComplex = Union[ + Type[complex], + Type[np.complexfloating], + "np.dtype[np.complexfloating]", + "_SupportsDType[np.dtype[np.complexfloating]]", + _Complex64Codes, + _Complex128Codes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, +] +_DTypeLikeDT64 = Union[ + Type[np.timedelta64], + "np.dtype[np.timedelta64]", + "_SupportsDType[np.dtype[np.timedelta64]]", + _TD64Codes, +] +_DTypeLikeTD64 = Union[ + Type[np.datetime64], + "np.dtype[np.datetime64]", + "_SupportsDType[np.dtype[np.datetime64]]", + _DT64Codes, +] +_DTypeLikeStr = Union[ + Type[str], + Type[np.str_], + "np.dtype[np.str_]", + "_SupportsDType[np.dtype[np.str_]]", + _StrCodes, +] +_DTypeLikeBytes = Union[ + Type[bytes], + Type[np.bytes_], + "np.dtype[np.bytes_]", + "_SupportsDType[np.dtype[np.bytes_]]", + _BytesCodes, +] +_DTypeLikeVoid = Union[ + Type[np.void], + "np.dtype[np.void]", + "_SupportsDType[np.dtype[np.void]]", + _VoidCodes, + _VoidDTypeLike, +] +_DTypeLikeObject = Union[ + type, + "np.dtype[np.object_]", + "_SupportsDType[np.dtype[np.object_]]", + _ObjectCodes, +] + +_DTypeLikeComplex_co = Union[ + _DTypeLikeBool, + _DTypeLikeUInt, + _DTypeLikeInt, + _DTypeLikeFloat, + _DTypeLikeComplex, +] diff --git a/MLPY/Lib/site-packages/numpy/typing/_extended_precision.py b/MLPY/Lib/site-packages/numpy/typing/_extended_precision.py new file mode 100644 index 0000000000000000000000000000000000000000..ae6873b204cfaa09a7e41125f5ccb155266ec335 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_extended_precision.py @@ -0,0 +1,42 @@ +"""A module with platform-specific extended precision `numpy.number` subclasses. + +The subclasses are defined here (instead of ``__init__.pyi``) such +that they can be imported conditionally via the numpy's mypy plugin. +""" + +from typing import TYPE_CHECKING, Any + +import numpy as np +from . import ( + _80Bit, + _96Bit, + _128Bit, + _256Bit, +) + +if TYPE_CHECKING: + uint128 = np.unsignedinteger[_128Bit] + uint256 = np.unsignedinteger[_256Bit] + int128 = np.signedinteger[_128Bit] + int256 = np.signedinteger[_256Bit] + float80 = np.floating[_80Bit] + float96 = np.floating[_96Bit] + float128 = np.floating[_128Bit] + float256 = np.floating[_256Bit] + complex160 = np.complexfloating[_80Bit, _80Bit] + complex192 = np.complexfloating[_96Bit, _96Bit] + complex256 = np.complexfloating[_128Bit, _128Bit] + complex512 = np.complexfloating[_256Bit, _256Bit] +else: + uint128 = Any + uint256 = Any + int128 = Any + int256 = Any + float80 = Any + float96 = Any + float128 = Any + float256 = Any + complex160 = Any + complex192 = Any + complex256 = Any + complex512 = Any diff --git a/MLPY/Lib/site-packages/numpy/typing/_generic_alias.py b/MLPY/Lib/site-packages/numpy/typing/_generic_alias.py new file mode 100644 index 0000000000000000000000000000000000000000..48930a88929a19f7cd8eeb6d408112be6880858d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_generic_alias.py @@ -0,0 +1,208 @@ +from __future__ import annotations + +import sys +import types +from typing import ( + Any, + ClassVar, + FrozenSet, + Generator, + Iterable, + Iterator, + List, + NoReturn, + Tuple, + Type, + TypeVar, + TYPE_CHECKING, +) + +import numpy as np + +__all__ = ["_GenericAlias", "NDArray"] + +_T = TypeVar("_T", bound="_GenericAlias") + + +def _to_str(obj: object) -> str: + """Helper function for `_GenericAlias.__repr__`.""" + if obj is Ellipsis: + return '...' + elif isinstance(obj, type) and not isinstance(obj, _GENERIC_ALIAS_TYPE): + if obj.__module__ == 'builtins': + return obj.__qualname__ + else: + return f'{obj.__module__}.{obj.__qualname__}' + else: + return repr(obj) + + +def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]: + """Search for all typevars and typevar-containing objects in `args`. + + Helper function for `_GenericAlias.__init__`. + + """ + for i in args: + if hasattr(i, "__parameters__"): + yield from i.__parameters__ + elif isinstance(i, TypeVar): + yield i + + +def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T: + """Recursivelly replace all typevars with those from `parameters`. + + Helper function for `_GenericAlias.__getitem__`. + + """ + args = [] + for i in alias.__args__: + if isinstance(i, TypeVar): + value: Any = next(parameters) + elif isinstance(i, _GenericAlias): + value = _reconstruct_alias(i, parameters) + elif hasattr(i, "__parameters__"): + prm_tup = tuple(next(parameters) for _ in i.__parameters__) + value = i[prm_tup] + else: + value = i + args.append(value) + + cls = type(alias) + return cls(alias.__origin__, tuple(args)) + + +class _GenericAlias: + """A python-based backport of the `types.GenericAlias` class. + + E.g. for ``t = list[int]``, ``t.__origin__`` is ``list`` and + ``t.__args__`` is ``(int,)``. + + See Also + -------- + :pep:`585` + The PEP responsible for introducing `types.GenericAlias`. + + """ + + __slots__ = ("__weakref__", "_origin", "_args", "_parameters", "_hash") + + @property + def __origin__(self) -> type: + return super().__getattribute__("_origin") + + @property + def __args__(self) -> Tuple[Any, ...]: + return super().__getattribute__("_args") + + @property + def __parameters__(self) -> Tuple[TypeVar, ...]: + """Type variables in the ``GenericAlias``.""" + return super().__getattribute__("_parameters") + + def __init__(self, origin: type, args: Any) -> None: + self._origin = origin + self._args = args if isinstance(args, tuple) else (args,) + self._parameters = tuple(_parse_parameters(args)) + + @property + def __call__(self) -> type: + return self.__origin__ + + def __reduce__(self: _T) -> Tuple[Type[_T], Tuple[type, Tuple[Any, ...]]]: + cls = type(self) + return cls, (self.__origin__, self.__args__) + + def __mro_entries__(self, bases: Iterable[object]) -> Tuple[type]: + return (self.__origin__,) + + def __dir__(self) -> List[str]: + """Implement ``dir(self)``.""" + cls = type(self) + dir_origin = set(dir(self.__origin__)) + return sorted(cls._ATTR_EXCEPTIONS | dir_origin) + + def __hash__(self) -> int: + """Return ``hash(self)``.""" + # Attempt to use the cached hash + try: + return super().__getattribute__("_hash") + except AttributeError: + self._hash: int = hash(self.__origin__) ^ hash(self.__args__) + return super().__getattribute__("_hash") + + def __instancecheck__(self, obj: object) -> NoReturn: + """Check if an `obj` is an instance.""" + raise TypeError("isinstance() argument 2 cannot be a " + "parameterized generic") + + def __subclasscheck__(self, cls: type) -> NoReturn: + """Check if a `cls` is a subclass.""" + raise TypeError("issubclass() argument 2 cannot be a " + "parameterized generic") + + def __repr__(self) -> str: + """Return ``repr(self)``.""" + args = ", ".join(_to_str(i) for i in self.__args__) + origin = _to_str(self.__origin__) + return f"{origin}[{args}]" + + def __getitem__(self: _T, key: Any) -> _T: + """Return ``self[key]``.""" + key_tup = key if isinstance(key, tuple) else (key,) + + if len(self.__parameters__) == 0: + raise TypeError(f"There are no type variables left in {self}") + elif len(key_tup) > len(self.__parameters__): + raise TypeError(f"Too many arguments for {self}") + elif len(key_tup) < len(self.__parameters__): + raise TypeError(f"Too few arguments for {self}") + + key_iter = iter(key_tup) + return _reconstruct_alias(self, key_iter) + + def __eq__(self, value: object) -> bool: + """Return ``self == value``.""" + if not isinstance(value, _GENERIC_ALIAS_TYPE): + return NotImplemented + return ( + self.__origin__ == value.__origin__ and + self.__args__ == value.__args__ + ) + + _ATTR_EXCEPTIONS: ClassVar[FrozenSet[str]] = frozenset({ + "__origin__", + "__args__", + "__parameters__", + "__mro_entries__", + "__reduce__", + "__reduce_ex__", + }) + + def __getattribute__(self, name: str) -> Any: + """Return ``getattr(self, name)``.""" + # Pull the attribute from `__origin__` unless its + # name is in `_ATTR_EXCEPTIONS` + cls = type(self) + if name in cls._ATTR_EXCEPTIONS: + return super().__getattribute__(name) + return getattr(self.__origin__, name) + + +# See `_GenericAlias.__eq__` +if sys.version_info >= (3, 9): + _GENERIC_ALIAS_TYPE = (_GenericAlias, types.GenericAlias) +else: + _GENERIC_ALIAS_TYPE = (_GenericAlias,) + +ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True) + +if TYPE_CHECKING: + NDArray = np.ndarray[Any, np.dtype[ScalarType]] +elif sys.version_info >= (3, 9): + _DType = types.GenericAlias(np.dtype, (ScalarType,)) + NDArray = types.GenericAlias(np.ndarray, (Any, _DType)) +else: + _DType = _GenericAlias(np.dtype, (ScalarType,)) + NDArray = _GenericAlias(np.ndarray, (Any, _DType)) diff --git a/MLPY/Lib/site-packages/numpy/typing/_nbit.py b/MLPY/Lib/site-packages/numpy/typing/_nbit.py new file mode 100644 index 0000000000000000000000000000000000000000..53be8c03eb297b1aff62cfd581580094230d1a5d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_nbit.py @@ -0,0 +1,16 @@ +"""A module with the precisions of platform-specific `~numpy.number`s.""" + +from typing import Any + +# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin +_NBitByte = Any +_NBitShort = Any +_NBitIntC = Any +_NBitIntP = Any +_NBitInt = Any +_NBitLongLong = Any + +_NBitHalf = Any +_NBitSingle = Any +_NBitDouble = Any +_NBitLongDouble = Any diff --git a/MLPY/Lib/site-packages/numpy/typing/_scalars.py b/MLPY/Lib/site-packages/numpy/typing/_scalars.py new file mode 100644 index 0000000000000000000000000000000000000000..7919d8d8866ff5783ef0dfa2a24d9db9680df123 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_scalars.py @@ -0,0 +1,30 @@ +from typing import Union, Tuple, Any + +import numpy as np + +# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and +# `np.bytes_` are already subclasses of their builtin counterpart + +_CharLike_co = Union[str, bytes] + +# The 6 `Like_co` type-aliases below represent all scalars that can be +# coerced into `` (with the casting rule `same_kind`) +_BoolLike_co = Union[bool, np.bool_] +_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger] +_IntLike_co = Union[_BoolLike_co, int, np.integer] +_FloatLike_co = Union[_IntLike_co, float, np.floating] +_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating] +_TD64Like_co = Union[_IntLike_co, np.timedelta64] + +_NumberLike_co = Union[int, float, complex, np.number, np.bool_] +_ScalarLike_co = Union[ + int, + float, + complex, + str, + bytes, + np.generic, +] + +# `_VoidLike_co` is technically not a scalar, but it's close enough +_VoidLike_co = Union[Tuple[Any, ...], np.void] diff --git a/MLPY/Lib/site-packages/numpy/typing/_shape.py b/MLPY/Lib/site-packages/numpy/typing/_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..2e354691cd8124126c5b183365b2f1d545f28c04 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_shape.py @@ -0,0 +1,15 @@ +import sys +from typing import Sequence, Tuple, Union, Any + +if sys.version_info >= (3, 8): + from typing import SupportsIndex +else: + try: + from typing_extensions import SupportsIndex + except ImportError: + SupportsIndex = Any + +_Shape = Tuple[int, ...] + +# Anything that can be coerced to a shape tuple +_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] diff --git a/MLPY/Lib/site-packages/numpy/typing/_ufunc.pyi b/MLPY/Lib/site-packages/numpy/typing/_ufunc.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c6fd4c7bd28c3a5e8cce133a74bf2c0699a493ab --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/_ufunc.pyi @@ -0,0 +1,405 @@ +"""A module with private type-check-only `numpy.ufunc` subclasses. + +The signatures of the ufuncs are too varied to reasonably type +with a single class. So instead, `ufunc` has been expanded into +four private subclasses, one for each combination of +`~ufunc.nin` and `~ufunc.nout`. + +""" + +from typing import ( + Any, + Generic, + List, + Optional, + overload, + Tuple, + TypeVar, + Union, +) + +from numpy import ufunc, _Casting, _OrderKACF +from numpy.typing import NDArray + +from ._shape import _ShapeLike +from ._scalars import _ScalarLike_co +from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._dtype_like import DTypeLike + +from typing_extensions import Literal, SupportsIndex + +_T = TypeVar("_T") +_2Tuple = Tuple[_T, _T] +_3Tuple = Tuple[_T, _T, _T] +_4Tuple = Tuple[_T, _T, _T, _T] + +_NTypes = TypeVar("_NTypes", bound=int) +_IDType = TypeVar("_IDType", bound=Any) +_NameType = TypeVar("_NameType", bound=str) + +# NOTE: In reality `extobj` should be a length of list 3 containing an +# int, an int, and a callable, but there's no way to properly express +# non-homogenous lists. +# Use `Any` over `Union` to avoid issues related to lists invariance. + +# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for +# ufuncs that don't accept two input arguments and return one output argument. +# In such cases the respective methods are simply typed as `None`. + +# NOTE: Similarly, `at` won't be defined for ufuncs that return +# multiple outputs; in such cases `at` is typed as `None` + +# NOTE: If 2 output types are returned then `out` must be a +# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable + +class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def signature(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + out: None = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _2Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + out: Union[None, NDArray[Any], Tuple[NDArray[Any]]] = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _2Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> NDArray[Any]: ... + + def at( + self, + __a: NDArray[Any], + __indices: _ArrayLikeInt_co, + ) -> None: ... + +class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + out: None = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: Union[None, NDArray[Any], Tuple[NDArray[Any]]] = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> NDArray[Any]: ... + + def at( + self, + __a: NDArray[Any], + __indices: _ArrayLikeInt_co, + __b: ArrayLike, + ) -> None: ... + + def reduce( + self, + array: ArrayLike, + axis: Optional[_ShapeLike] = ..., + dtype: DTypeLike = ..., + out: Optional[NDArray[Any]] = ..., + keepdims: bool = ..., + initial: Any = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: Optional[NDArray[Any]] = ..., + ) -> NDArray[Any]: ... + + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: Optional[NDArray[Any]] = ..., + ) -> NDArray[Any]: ... + + # Expand `**kwargs` into explicit keyword-only arguments + @overload + def outer( + self, + __A: _ScalarLike_co, + __B: _ScalarLike_co, + *, + out: None = ..., + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> Any: ... + @overload + def outer( # type: ignore[misc] + self, + __A: ArrayLike, + __B: ArrayLike, + *, + out: Union[None, NDArray[Any], Tuple[NDArray[Any]]] = ..., + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> NDArray[Any]: ... + +class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + @property + def at(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __out1: Optional[NDArray[Any]] = ..., + __out2: Optional[NDArray[Any]] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + +class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[4]: ... + @property + def signature(self) -> None: ... + @property + def at(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _4Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + __out1: Optional[NDArray[Any]] = ..., + __out2: Optional[NDArray[Any]] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: Optional[_ArrayLikeBool_co] = ..., + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _4Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + + # NOTE: In practice the only gufunc in the main name is `matmul`, + # so we can use its signature here + @property + def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + @property + def at(self) -> None: ... + + # Scalar for 1D array-likes; ndarray otherwise + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: None = ..., + *, + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + axes: List[_2Tuple[SupportsIndex]] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: Union[NDArray[Any], Tuple[NDArray[Any]]], + *, + casting: _Casting = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: Union[str, _3Tuple[Optional[str]]] = ..., + extobj: List[Any] = ..., + axes: List[_2Tuple[SupportsIndex]] = ..., + ) -> NDArray[Any]: ... diff --git a/MLPY/Lib/site-packages/numpy/typing/mypy_plugin.py b/MLPY/Lib/site-packages/numpy/typing/mypy_plugin.py new file mode 100644 index 0000000000000000000000000000000000000000..bd6979360b2767502c1429c54b07b1c600a384ca --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/mypy_plugin.py @@ -0,0 +1,131 @@ +"""A module containing `numpy`-specific plugins for mypy.""" + +from __future__ import annotations + +import typing as t + +import numpy as np + +try: + import mypy.types + from mypy.types import Type + from mypy.plugin import Plugin, AnalyzeTypeContext + from mypy.nodes import MypyFile, ImportFrom, Statement + from mypy.build import PRI_MED + + _HookFunc = t.Callable[[AnalyzeTypeContext], Type] + MYPY_EX: t.Optional[ModuleNotFoundError] = None +except ModuleNotFoundError as ex: + MYPY_EX = ex + +__all__: t.List[str] = [] + + +def _get_precision_dict() -> t.Dict[str, str]: + names = [ + ("_NBitByte", np.byte), + ("_NBitShort", np.short), + ("_NBitIntC", np.intc), + ("_NBitIntP", np.intp), + ("_NBitInt", np.int_), + ("_NBitLongLong", np.longlong), + + ("_NBitHalf", np.half), + ("_NBitSingle", np.single), + ("_NBitDouble", np.double), + ("_NBitLongDouble", np.longdouble), + ] + ret = {} + for name, typ in names: + n: int = 8 * typ().dtype.itemsize + ret[f'numpy.typing._nbit.{name}'] = f"numpy._{n}Bit" + return ret + + +def _get_extended_precision_list() -> t.List[str]: + extended_types = [np.ulonglong, np.longlong, np.longdouble, np.clongdouble] + extended_names = { + "uint128", + "uint256", + "int128", + "int256", + "float80", + "float96", + "float128", + "float256", + "complex160", + "complex192", + "complex256", + "complex512", + } + return [i.__name__ for i in extended_types if i.__name__ in extended_names] + + +#: A dictionary mapping type-aliases in `numpy.typing._nbit` to +#: concrete `numpy.typing.NBitBase` subclasses. +_PRECISION_DICT: t.Final = _get_precision_dict() + +#: A list with the names of all extended precision `np.number` subclasses. +_EXTENDED_PRECISION_LIST: t.Final = _get_extended_precision_list() + + +def _hook(ctx: AnalyzeTypeContext) -> Type: + """Replace a type-alias with a concrete ``NBitBase`` subclass.""" + typ, _, api = ctx + name = typ.name.split(".")[-1] + name_new = _PRECISION_DICT[f"numpy.typing._nbit.{name}"] + return api.named_type(name_new) + + +if t.TYPE_CHECKING or MYPY_EX is None: + def _index(iterable: t.Iterable[Statement], id: str) -> int: + """Identify the first ``ImportFrom`` instance the specified `id`.""" + for i, value in enumerate(iterable): + if getattr(value, "id", None) == id: + return i + else: + raise ValueError("Failed to identify a `ImportFrom` instance " + f"with the following id: {id!r}") + + class _NumpyPlugin(Plugin): + """A plugin for assigning platform-specific `numpy.number` precisions.""" + + def get_type_analyze_hook(self, fullname: str) -> t.Optional[_HookFunc]: + """Set the precision of platform-specific `numpy.number` subclasses. + + For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`. + """ + if fullname in _PRECISION_DICT: + return _hook + return None + + def get_additional_deps(self, file: MypyFile) -> t.List[t.Tuple[int, str, int]]: + """Import platform-specific extended-precision `numpy.number` subclasses. + + For example: `numpy.float96`, `numpy.float128` and `numpy.complex256`. + """ + ret = [(PRI_MED, file.fullname, -1)] + if file.fullname == "numpy": + # Import ONLY the extended precision types available to the + # platform in question + imports = ImportFrom( + "numpy.typing._extended_precision", 0, + names=[(v, v) for v in _EXTENDED_PRECISION_LIST], + ) + imports.is_top_level = True + + # Replace the much broader extended-precision import + # (defined in `numpy/__init__.pyi`) with a more specific one + for lst in [file.defs, file.imports]: # type: t.List[Statement] + i = _index(lst, "numpy.typing._extended_precision") + lst[i] = imports + return ret + + def plugin(version: str) -> t.Type[_NumpyPlugin]: + """An entry-point for mypy.""" + return _NumpyPlugin + +else: + def plugin(version: str) -> t.Type[_NumpyPlugin]: + """An entry-point for mypy.""" + raise MYPY_EX diff --git a/MLPY/Lib/site-packages/numpy/typing/setup.py b/MLPY/Lib/site-packages/numpy/typing/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c0ef50ad149471f91bd297f315f13e7fc30657 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/setup.py @@ -0,0 +1,12 @@ +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('typing', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_dir('tests/data') + config.add_data_files('*.pyi') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/__init__.py b/MLPY/Lib/site-packages/numpy/typing/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3f3af40e57e872c554b9b56d3c403db07ce1630 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_generic_alias.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_generic_alias.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f185049e4963e61a54e0686077dd1795036dd78 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_generic_alias.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_isfile.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_isfile.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cadcb87f1c9432657f84ff41709c51b83bdb0a9f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_isfile.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_runtime.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_runtime.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66245cf1f14b246a58a51ce7fce79eed0786e1ae Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_runtime.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_typing.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_typing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..269e50ab3d7c5eec9816c068588d8be028089e40 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_typing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_typing_extensions.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_typing_extensions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6df2cc03459352e8ea72ef9671e902d2fa7489f5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/__pycache__/test_typing_extensions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/arithmetic.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/arithmetic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efb2fdc11264f8c37c1a0a190a4e32070b0bde90 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/arithmetic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/array_constructors.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/array_constructors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a27ca1b8035e1e78bcb5169c3d90fc8f115e219 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/array_constructors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/array_like.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/array_like.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc39cc50d8c15769096272661701b6b059f15c9c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/array_like.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/arrayprint.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/arrayprint.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..819b10c86aaeeb87952889c169b43a572f4a7ea2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/arrayprint.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/arrayterator.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/arrayterator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8187af9e3a1a5f3c4985c5d0199c4b79fae442a1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/arrayterator.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/bitwise_ops.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/bitwise_ops.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d3af4459c90486dbed56a40e3334a56fe43bc4f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/bitwise_ops.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/comparisons.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/comparisons.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4618fc5351956cc14fa3bed9c48af797e270b082 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/comparisons.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/constants.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/constants.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5238889a3bdf7ff1be5ad98cf61ef99c0c5fb885 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/constants.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/datasource.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/datasource.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbc95b2857197ea52e0cd2c73feb855de7c4aaaf Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/datasource.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/dtype.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/dtype.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdf8b6bd8c1971e5461f292e8973fb33a2f0690a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/dtype.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/einsumfunc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/einsumfunc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38719b4741c751ccd61fa32a90e33ef2576d3e0f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/einsumfunc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/flatiter.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/flatiter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76be8979460677d722ae1ef1555d6e7b0427f3db Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/flatiter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/fromnumeric.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/fromnumeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..091b5f819e61c160a14a24fe4b89114107964f9b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/fromnumeric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/index_tricks.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/index_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1f8099ea4873ef7f1f716a0865278ce16abde25 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/index_tricks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/lib_utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/lib_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2eae2725a1de3f3382db7535dd95c780966726e Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/lib_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/lib_version.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/lib_version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48d2ded0e6c1067f0a48a5af00194db31b797203 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/lib_version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/modules.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/modules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5c311587fc044f470e7bc2e7afb83936fb4c0fc Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/modules.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ndarray.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ndarray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07226d29a85bcd826dbbf1e9e1fd4cff7ed036d2 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ndarray.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ndarray_misc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ndarray_misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab9bfb45a48778c8c5d977a17a56035c3dc2a772 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ndarray_misc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/numerictypes.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/numerictypes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b286f7344c8acdbc25a73a8d8da39889db8467b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/numerictypes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/random.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/random.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..184131505a455f57a5f2a203e4b0c3280bc6c4e1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/random.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/scalars.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/scalars.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56238ecc2268b1287d317561ac925cf9bee648eb Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/scalars.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ufunc_config.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ufunc_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82bfeafede06173015c8c170a0ab28ccbb19d647 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ufunc_config.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ufunclike.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ufunclike.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..735f9d8b67767136c4c04074666c688a0c62f439 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ufunclike.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ufuncs.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ufuncs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af15fa3511621dee0ae40a0f4e327d6303dcb26f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/ufuncs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/warnings_and_errors.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/warnings_and_errors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c1e068252749d7877413439831854d526d14c90 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/__pycache__/warnings_and_errors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/arithmetic.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..01eca3e176bcc4e2c4f6260fadf86064ef7d2553 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/arithmetic.py @@ -0,0 +1,120 @@ +from typing import List, Any +import numpy as np + +b_ = np.bool_() +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +AR_b: np.ndarray[Any, np.dtype[np.bool_]] +AR_u: np.ndarray[Any, np.dtype[np.uint32]] +AR_i: np.ndarray[Any, np.dtype[np.int64]] +AR_f: np.ndarray[Any, np.dtype[np.float64]] +AR_c: np.ndarray[Any, np.dtype[np.complex128]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] + +ANY: Any + +AR_LIKE_b: List[bool] +AR_LIKE_u: List[np.uint32] +AR_LIKE_i: List[int] +AR_LIKE_f: List[float] +AR_LIKE_c: List[complex] +AR_LIKE_m: List[np.timedelta64] +AR_LIKE_M: List[np.datetime64] + +# Array subtraction + +# NOTE: mypys `NoReturn` errors are, unfortunately, not that great +_1 = AR_b - AR_LIKE_b # E: Need type annotation +_2 = AR_LIKE_b - AR_b # E: Need type annotation + +AR_f - AR_LIKE_m # E: Unsupported operand types +AR_f - AR_LIKE_M # E: Unsupported operand types +AR_c - AR_LIKE_m # E: Unsupported operand types +AR_c - AR_LIKE_M # E: Unsupported operand types + +AR_m - AR_LIKE_f # E: Unsupported operand types +AR_M - AR_LIKE_f # E: Unsupported operand types +AR_m - AR_LIKE_c # E: Unsupported operand types +AR_M - AR_LIKE_c # E: Unsupported operand types + +AR_m - AR_LIKE_M # E: Unsupported operand types +AR_LIKE_m - AR_M # E: Unsupported operand types + +# array floor division + +AR_M // AR_LIKE_b # E: Unsupported operand types +AR_M // AR_LIKE_u # E: Unsupported operand types +AR_M // AR_LIKE_i # E: Unsupported operand types +AR_M // AR_LIKE_f # E: Unsupported operand types +AR_M // AR_LIKE_c # E: Unsupported operand types +AR_M // AR_LIKE_m # E: Unsupported operand types +AR_M // AR_LIKE_M # E: Unsupported operand types + +AR_b // AR_LIKE_M # E: Unsupported operand types +AR_u // AR_LIKE_M # E: Unsupported operand types +AR_i // AR_LIKE_M # E: Unsupported operand types +AR_f // AR_LIKE_M # E: Unsupported operand types +AR_c // AR_LIKE_M # E: Unsupported operand types +AR_m // AR_LIKE_M # E: Unsupported operand types +AR_M // AR_LIKE_M # E: Unsupported operand types + +_3 = AR_m // AR_LIKE_b # E: Need type annotation +AR_m // AR_LIKE_c # E: Unsupported operand types + +AR_b // AR_LIKE_m # E: Unsupported operand types +AR_u // AR_LIKE_m # E: Unsupported operand types +AR_i // AR_LIKE_m # E: Unsupported operand types +AR_f // AR_LIKE_m # E: Unsupported operand types +AR_c // AR_LIKE_m # E: Unsupported operand types + +# Array multiplication + +AR_b *= AR_LIKE_u # E: incompatible type +AR_b *= AR_LIKE_i # E: incompatible type +AR_b *= AR_LIKE_f # E: incompatible type +AR_b *= AR_LIKE_c # E: incompatible type +AR_b *= AR_LIKE_m # E: incompatible type + +AR_u *= AR_LIKE_i # E: incompatible type +AR_u *= AR_LIKE_f # E: incompatible type +AR_u *= AR_LIKE_c # E: incompatible type +AR_u *= AR_LIKE_m # E: incompatible type + +AR_i *= AR_LIKE_f # E: incompatible type +AR_i *= AR_LIKE_c # E: incompatible type +AR_i *= AR_LIKE_m # E: incompatible type + +AR_f *= AR_LIKE_c # E: incompatible type +AR_f *= AR_LIKE_m # E: incompatible type + +# Array power + +AR_b **= AR_LIKE_b # E: incompatible type +AR_b **= AR_LIKE_u # E: incompatible type +AR_b **= AR_LIKE_i # E: incompatible type +AR_b **= AR_LIKE_f # E: incompatible type +AR_b **= AR_LIKE_c # E: incompatible type + +AR_u **= AR_LIKE_i # E: incompatible type +AR_u **= AR_LIKE_f # E: incompatible type +AR_u **= AR_LIKE_c # E: incompatible type + +AR_i **= AR_LIKE_f # E: incompatible type +AR_i **= AR_LIKE_c # E: incompatible type + +AR_f **= AR_LIKE_c # E: incompatible type + +# Scalars + +b_ - b_ # E: No overload variant + +dt + dt # E: Unsupported operand types +td - dt # E: Unsupported operand types +td % 1 # E: Unsupported operand types +td / dt # E: No overload +td % dt # E: Unsupported operand types + +-b_ # E: Unsupported operand type ++b_ # E: Unsupported operand type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/array_constructors.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/array_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..45748b34c3a11023f14792156d2c3e746af7348f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/array_constructors.py @@ -0,0 +1,31 @@ +import numpy as np + +a: np.ndarray +generator = (i for i in range(10)) + +np.require(a, requirements=1) # E: No overload variant +np.require(a, requirements="TEST") # E: incompatible type + +np.zeros("test") # E: incompatible type +np.zeros() # E: Missing positional argument + +np.ones("test") # E: incompatible type +np.ones() # E: Missing positional argument + +np.array(0, float, True) # E: Too many positional + +np.linspace(None, 'bob') # E: No overload variant +np.linspace(0, 2, num=10.0) # E: No overload variant +np.linspace(0, 2, endpoint='True') # E: No overload variant +np.linspace(0, 2, retstep=b'False') # E: No overload variant +np.linspace(0, 2, dtype=0) # E: No overload variant +np.linspace(0, 2, axis=None) # E: No overload variant + +np.logspace(None, 'bob') # E: Argument 1 +np.logspace(0, 2, base=None) # E: Argument "base" + +np.geomspace(None, 'bob') # E: Argument 1 + +np.stack(generator) # E: No overload variant +np.hstack({1, 2}) # E: incompatible type +np.vstack(1) # E: incompatible type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/array_like.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/array_like.py new file mode 100644 index 0000000000000000000000000000000000000000..ea20922580fe8093f6b91907a04ae5220453a485 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/array_like.py @@ -0,0 +1,16 @@ +import numpy as np +from numpy.typing import ArrayLike + + +class A: + pass + + +x1: ArrayLike = (i for i in range(10)) # E: Incompatible types in assignment +x2: ArrayLike = A() # E: Incompatible types in assignment +x3: ArrayLike = {1: "foo", 2: "bar"} # E: Incompatible types in assignment + +scalar = np.int64(1) +scalar.__array__(dtype=np.float64) # E: No overload variant +array = np.array([1]) +array.__array__(dtype=np.float64) # E: No overload variant diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/arrayprint.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/arrayprint.py new file mode 100644 index 0000000000000000000000000000000000000000..f1b3f9586bd10e571095f982053b98cdc4bb19fd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/arrayprint.py @@ -0,0 +1,13 @@ +from typing import Callable, Any +import numpy as np + +AR: np.ndarray +func1: Callable[[Any], str] +func2: Callable[[np.integer[Any]], str] + +np.array2string(AR, style=None) # E: Unexpected keyword argument +np.array2string(AR, legacy="1.14") # E: incompatible type +np.array2string(AR, sign="*") # E: incompatible type +np.array2string(AR, floatmode="default") # E: incompatible type +np.array2string(AR, formatter={"A": func1}) # E: incompatible type +np.array2string(AR, formatter={"float": func2}) # E: Incompatible types diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/arrayterator.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/arrayterator.py new file mode 100644 index 0000000000000000000000000000000000000000..c75610487c2030d6ff05780d71c6499ebd737351 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/arrayterator.py @@ -0,0 +1,14 @@ +from typing import Any +import numpy as np + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] +ar_iter = np.lib.Arrayterator(AR_i8) + +np.lib.Arrayterator(np.int64()) # E: incompatible type +ar_iter.shape = (10, 5) # E: is read-only +ar_iter[None] # E: Invalid index type +ar_iter[None, 1] # E: Invalid index type +ar_iter[np.intp()] # E: Invalid index type +ar_iter[np.intp(), ...] # E: Invalid index type +ar_iter[AR_i8] # E: Invalid index type +ar_iter[AR_i8, :] # E: Invalid index type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/bitwise_ops.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/bitwise_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2326b1346ee4fde9cb05e7353cd4979292f3ff45 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/bitwise_ops.py @@ -0,0 +1,20 @@ +import numpy as np + +i8 = np.int64() +i4 = np.int32() +u8 = np.uint64() +b_ = np.bool_() +i = int() + +f8 = np.float64() + +b_ >> f8 # E: No overload variant +i8 << f8 # E: No overload variant +i | f8 # E: Unsupported operand types +i8 ^ f8 # E: No overload variant +u8 & f8 # E: No overload variant +~f8 # E: Unsupported operand type + +# mypys' error message for `NoReturn` is unfortunately pretty bad +# TODO: Reenable this once we add support for numerical precision for `number`s +# a = u8 | 0 # E: Need type annotation diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/comparisons.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/comparisons.py new file mode 100644 index 0000000000000000000000000000000000000000..19df510d614bef4bcb7aa3df4a86302c5901a631 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/comparisons.py @@ -0,0 +1,28 @@ +from typing import Any +import numpy as np + +AR_i: np.ndarray[Any, np.dtype[np.int64]] +AR_f: np.ndarray[Any, np.dtype[np.float64]] +AR_c: np.ndarray[Any, np.dtype[np.complex128]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] + +AR_f > AR_m # E: Unsupported operand types +AR_c > AR_m # E: Unsupported operand types + +AR_m > AR_f # E: Unsupported operand types +AR_m > AR_c # E: Unsupported operand types + +AR_i > AR_M # E: Unsupported operand types +AR_f > AR_M # E: Unsupported operand types +AR_m > AR_M # E: Unsupported operand types + +AR_M > AR_i # E: Unsupported operand types +AR_M > AR_f # E: Unsupported operand types +AR_M > AR_m # E: Unsupported operand types + +# Unfortunately `NoReturn` errors are not the most descriptive +_1 = AR_i > str() # E: Need type annotation +_2 = AR_i > bytes() # E: Need type annotation +_3 = str() > AR_M # E: Need type annotation +_4 = bytes() > AR_M # E: Need type annotation diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/constants.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..7ae9ddbf6751128d66aea84a8a21308a499f520c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/constants.py @@ -0,0 +1,6 @@ +import numpy as np + +np.Inf = np.Inf # E: Cannot assign to final +np.ALLOW_THREADS = np.ALLOW_THREADS # E: Cannot assign to final +np.little_endian = np.little_endian # E: Cannot assign to final +np.UFUNC_PYVALS_NAME = np.UFUNC_PYVALS_NAME # E: Cannot assign to final diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/datasource.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..b74f2a4a74090ecd5db981f0f8052fb5379e118a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/datasource.py @@ -0,0 +1,15 @@ +from pathlib import Path +import numpy as np + +path: Path +d1: np.DataSource + +d1.abspath(path) # E: incompatible type +d1.abspath(b"...") # E: incompatible type + +d1.exists(path) # E: incompatible type +d1.exists(b"...") # E: incompatible type + +d1.open(path, "r") # E: incompatible type +d1.open(b"...", encoding="utf8") # E: incompatible type +d1.open(None, newline="/n") # E: incompatible type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/dtype.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..5fbe545d4b85df8736fb46baf78f04b023107785 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/dtype.py @@ -0,0 +1,20 @@ +import numpy as np + + +class Test1: + not_dtype = np.dtype(float) + + +class Test2: + dtype = float + + +np.dtype(Test1()) # E: No overload variant of "dtype" matches +np.dtype(Test2()) # E: incompatible type + +np.dtype( # E: No overload variant of "dtype" matches + { + "field1": (float, 1), + "field2": (int, 3), + } +) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/einsumfunc.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/einsumfunc.py new file mode 100644 index 0000000000000000000000000000000000000000..69a096e31bb370cd9f050ff16a18a0e9c314bddf --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/einsumfunc.py @@ -0,0 +1,15 @@ +from typing import List, Any +import numpy as np + +AR_i: np.ndarray[Any, np.dtype[np.int64]] +AR_f: np.ndarray[Any, np.dtype[np.float64]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_O: np.ndarray[Any, np.dtype[np.object_]] +AR_U: np.ndarray[Any, np.dtype[np.str_]] + +np.einsum("i,i->i", AR_i, AR_m) # E: incompatible type +np.einsum("i,i->i", AR_O, AR_O) # E: incompatible type +np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # E: incompatible type +np.einsum("i,i->i", AR_i, AR_i, dtype=np.timedelta64, casting="unsafe") # E: No overload variant +np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # E: Value of type variable "_ArrayType" of "einsum" cannot be +np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # E: No overload variant diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/flatiter.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/flatiter.py new file mode 100644 index 0000000000000000000000000000000000000000..d47dea9526510cbe54db28aba24f3b3046fd1c6d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/flatiter.py @@ -0,0 +1,25 @@ +from typing import Any + +import numpy as np +from numpy.typing import _SupportsArray + + +class Index: + def __index__(self) -> int: + ... + + +a: "np.flatiter[np.ndarray]" +supports_array: _SupportsArray + +a.base = Any # E: Property "base" defined in "flatiter" is read-only +a.coords = Any # E: Property "coords" defined in "flatiter" is read-only +a.index = Any # E: Property "index" defined in "flatiter" is read-only +a.copy(order='C') # E: Unexpected keyword argument + +# NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` +# does not accept objects with the `__array__` or `__index__` protocols; +# boolean indexing is just plain broken (gh-17175) +a[np.bool_()] # E: No overload variant of "__getitem__" +a[Index()] # E: No overload variant of "__getitem__" +a[supports_array] # E: No overload variant of "__getitem__" diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/fromnumeric.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/fromnumeric.py new file mode 100644 index 0000000000000000000000000000000000000000..cbdd62a8f5ac4766292fe0a73e176db588173859 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/fromnumeric.py @@ -0,0 +1,154 @@ +"""Tests for :mod:`numpy.core.fromnumeric`.""" + +import numpy as np + +A = np.array(True, ndmin=2, dtype=bool) +A.setflags(write=False) + +a = np.bool_(True) + +np.take(a, None) # E: incompatible type +np.take(a, axis=1.0) # E: incompatible type +np.take(A, out=1) # E: incompatible type +np.take(A, mode="bob") # E: incompatible type + +np.reshape(a, None) # E: Argument 2 to "reshape" has incompatible type +np.reshape(A, 1, order="bob") # E: Argument "order" to "reshape" has incompatible type + +np.choose(a, None) # E: incompatible type +np.choose(a, out=1.0) # E: incompatible type +np.choose(A, mode="bob") # E: incompatible type + +np.repeat(a, None) # E: Argument 2 to "repeat" has incompatible type +np.repeat(A, 1, axis=1.0) # E: Argument "axis" to "repeat" has incompatible type + +np.swapaxes(A, None, 1) # E: Argument 2 to "swapaxes" has incompatible type +np.swapaxes(A, 1, [0]) # E: Argument 3 to "swapaxes" has incompatible type + +np.transpose(A, axes=1.0) # E: Argument "axes" to "transpose" has incompatible type + +np.partition(a, None) # E: Argument 2 to "partition" has incompatible type +np.partition( + a, 0, axis="bob" # E: Argument "axis" to "partition" has incompatible type +) +np.partition( + A, 0, kind="bob" # E: Argument "kind" to "partition" has incompatible type +) +np.partition( + A, 0, order=range(5) # E: Argument "order" to "partition" has incompatible type +) + +np.argpartition( + a, None # E: incompatible type +) +np.argpartition( + a, 0, axis="bob" # E: incompatible type +) +np.argpartition( + A, 0, kind="bob" # E: incompatible type +) +np.argpartition( + A, 0, order=range(5) # E: Argument "order" to "argpartition" has incompatible type +) + +np.sort(A, axis="bob") # E: Argument "axis" to "sort" has incompatible type +np.sort(A, kind="bob") # E: Argument "kind" to "sort" has incompatible type +np.sort(A, order=range(5)) # E: Argument "order" to "sort" has incompatible type + +np.argsort(A, axis="bob") # E: Argument "axis" to "argsort" has incompatible type +np.argsort(A, kind="bob") # E: Argument "kind" to "argsort" has incompatible type +np.argsort(A, order=range(5)) # E: Argument "order" to "argsort" has incompatible type + +np.argmax(A, axis="bob") # E: No overload variant of "argmax" matches argument type +np.argmax(A, kind="bob") # E: No overload variant of "argmax" matches argument type + +np.argmin(A, axis="bob") # E: No overload variant of "argmin" matches argument type +np.argmin(A, kind="bob") # E: No overload variant of "argmin" matches argument type + +np.searchsorted( # E: No overload variant of "searchsorted" matches argument type + A[0], 0, side="bob" +) +np.searchsorted( # E: No overload variant of "searchsorted" matches argument type + A[0], 0, sorter=1.0 +) + +np.resize(A, 1.0) # E: Argument 2 to "resize" has incompatible type + +np.squeeze(A, 1.0) # E: No overload variant of "squeeze" matches argument type + +np.diagonal(A, offset=None) # E: Argument "offset" to "diagonal" has incompatible type +np.diagonal(A, axis1="bob") # E: Argument "axis1" to "diagonal" has incompatible type +np.diagonal(A, axis2=[]) # E: Argument "axis2" to "diagonal" has incompatible type + +np.trace(A, offset=None) # E: Argument "offset" to "trace" has incompatible type +np.trace(A, axis1="bob") # E: Argument "axis1" to "trace" has incompatible type +np.trace(A, axis2=[]) # E: Argument "axis2" to "trace" has incompatible type + +np.ravel(a, order="bob") # E: Argument "order" to "ravel" has incompatible type + +np.compress( + [True], A, axis=1.0 # E: Argument "axis" to "compress" has incompatible type +) + +np.clip(a, 1, 2, out=1) # E: No overload variant of "clip" matches argument type +np.clip(1, None, None) # E: No overload variant of "clip" matches argument type + +np.sum(a, axis=1.0) # E: incompatible type +np.sum(a, keepdims=1.0) # E: incompatible type +np.sum(a, initial=[1]) # E: incompatible type + +np.all(a, axis=1.0) # E: No overload variant +np.all(a, keepdims=1.0) # E: No overload variant +np.all(a, out=1.0) # E: No overload variant + +np.any(a, axis=1.0) # E: No overload variant +np.any(a, keepdims=1.0) # E: No overload variant +np.any(a, out=1.0) # E: No overload variant + +np.cumsum(a, axis=1.0) # E: incompatible type +np.cumsum(a, dtype=1.0) # E: incompatible type +np.cumsum(a, out=1.0) # E: incompatible type + +np.ptp(a, axis=1.0) # E: incompatible type +np.ptp(a, keepdims=1.0) # E: incompatible type +np.ptp(a, out=1.0) # E: incompatible type + +np.amax(a, axis=1.0) # E: incompatible type +np.amax(a, keepdims=1.0) # E: incompatible type +np.amax(a, out=1.0) # E: incompatible type +np.amax(a, initial=[1.0]) # E: incompatible type +np.amax(a, where=[1.0]) # E: incompatible type + +np.amin(a, axis=1.0) # E: incompatible type +np.amin(a, keepdims=1.0) # E: incompatible type +np.amin(a, out=1.0) # E: incompatible type +np.amin(a, initial=[1.0]) # E: incompatible type +np.amin(a, where=[1.0]) # E: incompatible type + +np.prod(a, axis=1.0) # E: incompatible type +np.prod(a, out=False) # E: incompatible type +np.prod(a, keepdims=1.0) # E: incompatible type +np.prod(a, initial=int) # E: incompatible type +np.prod(a, where=1.0) # E: incompatible type + +np.cumprod(a, axis=1.0) # E: Argument "axis" to "cumprod" has incompatible type +np.cumprod(a, out=False) # E: Argument "out" to "cumprod" has incompatible type + +np.size(a, axis=1.0) # E: Argument "axis" to "size" has incompatible type + +np.around(a, decimals=1.0) # E: incompatible type +np.around(a, out=type) # E: incompatible type + +np.mean(a, axis=1.0) # E: incompatible type +np.mean(a, out=False) # E: incompatible type +np.mean(a, keepdims=1.0) # E: incompatible type + +np.std(a, axis=1.0) # E: incompatible type +np.std(a, out=False) # E: incompatible type +np.std(a, ddof='test') # E: incompatible type +np.std(a, keepdims=1.0) # E: incompatible type + +np.var(a, axis=1.0) # E: incompatible type +np.var(a, out=False) # E: incompatible type +np.var(a, ddof='test') # E: incompatible type +np.var(a, keepdims=1.0) # E: incompatible type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/index_tricks.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/index_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..331bc34f106ebe09e45cd65d0dd3bb696495ca6e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/index_tricks.py @@ -0,0 +1,14 @@ +from typing import List +import numpy as np + +AR_LIKE_i: List[int] +AR_LIKE_f: List[float] + +np.unravel_index(AR_LIKE_f, (1, 2, 3)) # E: incompatible type +np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # E: No overload variant +np.mgrid[1] # E: Invalid index type +np.mgrid[...] # E: Invalid index type +np.ogrid[1] # E: Invalid index type +np.ogrid[...] # E: Invalid index type +np.fill_diagonal(AR_LIKE_f, 2) # E: incompatible type +np.diag_indices(1.0) # E: incompatible type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/lib_utils.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/lib_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..36310423be6af15c779c77fa2518bdaa23d92971 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/lib_utils.py @@ -0,0 +1,13 @@ +import numpy as np + +np.deprecate(1) # E: No overload variant + +np.deprecate_with_doc(1) # E: incompatible type + +np.byte_bounds(1) # E: incompatible type + +np.who(1) # E: incompatible type + +np.lookfor(None) # E: incompatible type + +np.safe_eval(None) # E: incompatible type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/lib_version.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/lib_version.py new file mode 100644 index 0000000000000000000000000000000000000000..8c91db7b4d198866bd4ecb5d9e9064101feb74ad --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/lib_version.py @@ -0,0 +1,6 @@ +from numpy.lib import NumpyVersion + +version: NumpyVersion + +NumpyVersion(b"1.8.0") # E: incompatible type +version >= b"1.8.0" # E: Unsupported operand types diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/modules.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..c541b93b2efb23282e1a29589348f951cfa860e6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/modules.py @@ -0,0 +1,19 @@ +import numpy as np + +np.testing.bob # E: Module has no attribute +np.bob # E: Module has no attribute + +# Stdlib modules in the namespace by accident +np.warnings # E: Module has no attribute +np.sys # E: Module has no attribute +np.os # E: Module has no attribute +np.math # E: Module has no attribute + +# Public sub-modules that are not imported to their parent module by default; +# e.g. one must first execute `import numpy.lib.recfunctions` +np.lib.recfunctions # E: Module has no attribute +np.ma.mrecords # E: Module has no attribute + +np.__NUMPY_SETUP__ # E: Module has no attribute +np.__deprecated_attrs__ # E: Module has no attribute +np.__expired_functions__ # E: Module has no attribute diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ndarray.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ndarray.py new file mode 100644 index 0000000000000000000000000000000000000000..ae5f495cc8dd965c542fdce7830904b231ea7ce1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ndarray.py @@ -0,0 +1,11 @@ +import numpy as np + +# Ban setting dtype since mutating the type of the array in place +# makes having ndarray be generic over dtype impossible. Generally +# users should use `ndarray.view` in this situation anyway. See +# +# https://github.com/numpy/numpy-stubs/issues/7 +# +# for more context. +float_array = np.array([1.0]) +float_array.dtype = np.bool_ # E: Property "dtype" defined in "ndarray" is read-only diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ndarray_misc.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ndarray_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..9f1d35c717047f663e5288099a161fc8b3ade31b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ndarray_misc.py @@ -0,0 +1,37 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +from typing import Any +import numpy as np + +f8: np.float64 +AR_f8: np.ndarray[Any, np.dtype[np.float64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] +AR_b: np.ndarray[Any, np.dtype[np.bool_]] + +ctypes_obj = AR_f8.ctypes + +reveal_type(ctypes_obj.get_data()) # E: has no attribute +reveal_type(ctypes_obj.get_shape()) # E: has no attribute +reveal_type(ctypes_obj.get_strides()) # E: has no attribute +reveal_type(ctypes_obj.get_as_parameter()) # E: has no attribute + +f8.argpartition(0) # E: has no attribute +f8.diagonal() # E: has no attribute +f8.dot(1) # E: has no attribute +f8.nonzero() # E: has no attribute +f8.partition(0) # E: has no attribute +f8.put(0, 2) # E: has no attribute +f8.setfield(2, np.float64) # E: has no attribute +f8.sort() # E: has no attribute +f8.trace() # E: has no attribute + +AR_M.__int__() # E: Invalid self argument +AR_M.__float__() # E: Invalid self argument +AR_M.__complex__() # E: Invalid self argument +AR_b.__index__() # E: Invalid self argument diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/numerictypes.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/numerictypes.py new file mode 100644 index 0000000000000000000000000000000000000000..fb2058691cb76f9bf1b051e83d3299359a200c6c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/numerictypes.py @@ -0,0 +1,13 @@ +import numpy as np + +# Techincally this works, but probably shouldn't. See +# +# https://github.com/numpy/numpy/issues/16366 +# +np.maximum_sctype(1) # E: incompatible type "int" + +np.issubsctype(1, np.int64) # E: incompatible type "int" + +np.issubdtype(1, np.int64) # E: incompatible type "int" + +np.find_common_type(np.int64, np.int64) # E: incompatible type "Type[signedinteger[Any]]" diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/random.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/random.py new file mode 100644 index 0000000000000000000000000000000000000000..b430379b37db2e623ff8b277915fcd0862500614 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/random.py @@ -0,0 +1,61 @@ +import numpy as np +from typing import Any, List + +SEED_FLOAT: float = 457.3 +SEED_ARR_FLOAT: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0, 2, 3, 4]) +SEED_ARRLIKE_FLOAT: List[float] = [1.0, 2.0, 3.0, 4.0] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_STR: str = "String seeding not allowed" +# default rng +np.random.default_rng(SEED_FLOAT) # E: incompatible type +np.random.default_rng(SEED_ARR_FLOAT) # E: incompatible type +np.random.default_rng(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.default_rng(SEED_STR) # E: incompatible type + +# Seed Sequence +np.random.SeedSequence(SEED_FLOAT) # E: incompatible type +np.random.SeedSequence(SEED_ARR_FLOAT) # E: incompatible type +np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.SeedSequence(SEED_SEED_SEQ) # E: incompatible type +np.random.SeedSequence(SEED_STR) # E: incompatible type + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence() +seed_seq.spawn(11.5) # E: incompatible type +seed_seq.generate_state(3.14) # E: incompatible type +seed_seq.generate_state(3, np.uint8) # E: incompatible type +seed_seq.generate_state(3, "uint8") # E: incompatible type +seed_seq.generate_state(3, "u1") # E: incompatible type +seed_seq.generate_state(3, np.uint16) # E: incompatible type +seed_seq.generate_state(3, "uint16") # E: incompatible type +seed_seq.generate_state(3, "u2") # E: incompatible type +seed_seq.generate_state(3, np.int32) # E: incompatible type +seed_seq.generate_state(3, "int32") # E: incompatible type +seed_seq.generate_state(3, "i4") # E: incompatible type + +# Bit Generators +np.random.MT19937(SEED_FLOAT) # E: incompatible type +np.random.MT19937(SEED_ARR_FLOAT) # E: incompatible type +np.random.MT19937(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.MT19937(SEED_STR) # E: incompatible type + +np.random.PCG64(SEED_FLOAT) # E: incompatible type +np.random.PCG64(SEED_ARR_FLOAT) # E: incompatible type +np.random.PCG64(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.PCG64(SEED_STR) # E: incompatible type + +np.random.Philox(SEED_FLOAT) # E: incompatible type +np.random.Philox(SEED_ARR_FLOAT) # E: incompatible type +np.random.Philox(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.Philox(SEED_STR) # E: incompatible type + +np.random.SFC64(SEED_FLOAT) # E: incompatible type +np.random.SFC64(SEED_ARR_FLOAT) # E: incompatible type +np.random.SFC64(SEED_ARRLIKE_FLOAT) # E: incompatible type +np.random.SFC64(SEED_STR) # E: incompatible type + +# Generator +np.random.Generator(None) # E: incompatible type +np.random.Generator(12333283902830213) # E: incompatible type +np.random.Generator("OxFEEDF00D") # E: incompatible type +np.random.Generator([123, 234]) # E: incompatible type +np.random.Generator(np.array([123, 234], dtype="u4")) # E: incompatible type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/scalars.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/scalars.py new file mode 100644 index 0000000000000000000000000000000000000000..a98b310c803818546c3bed7798003ce0430531b6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/scalars.py @@ -0,0 +1,94 @@ +import sys +import numpy as np + +f2: np.float16 +f8: np.float64 +c8: np.complex64 + +# Construction + +np.float32(3j) # E: incompatible type + +# Technically the following examples are valid NumPy code. But they +# are not considered a best practice, and people who wish to use the +# stubs should instead do +# +# np.array([1.0, 0.0, 0.0], dtype=np.float32) +# np.array([], dtype=np.complex64) +# +# See e.g. the discussion on the mailing list +# +# https://mail.python.org/pipermail/numpy-discussion/2020-April/080566.html +# +# and the issue +# +# https://github.com/numpy/numpy-stubs/issues/41 +# +# for more context. +np.float32([1.0, 0.0, 0.0]) # E: incompatible type +np.complex64([]) # E: incompatible type + +np.complex64(1, 2) # E: Too many arguments +# TODO: protocols (can't check for non-existent protocols w/ __getattr__) + +np.datetime64(0) # E: non-matching overload + +class A: + def __float__(self): + return 1.0 + + +np.int8(A()) # E: incompatible type +np.int16(A()) # E: incompatible type +np.int32(A()) # E: incompatible type +np.int64(A()) # E: incompatible type +np.uint8(A()) # E: incompatible type +np.uint16(A()) # E: incompatible type +np.uint32(A()) # E: incompatible type +np.uint64(A()) # E: incompatible type + +np.void("test") # E: incompatible type + +np.generic(1) # E: Cannot instantiate abstract class +np.number(1) # E: Cannot instantiate abstract class +np.integer(1) # E: Cannot instantiate abstract class +np.inexact(1) # E: Cannot instantiate abstract class +np.character("test") # E: Cannot instantiate abstract class +np.flexible(b"test") # E: Cannot instantiate abstract class + +np.float64(value=0.0) # E: Unexpected keyword argument +np.int64(value=0) # E: Unexpected keyword argument +np.uint64(value=0) # E: Unexpected keyword argument +np.complex128(value=0.0j) # E: Unexpected keyword argument +np.str_(value='bob') # E: No overload variant +np.bytes_(value=b'test') # E: No overload variant +np.void(value=b'test') # E: Unexpected keyword argument +np.bool_(value=True) # E: Unexpected keyword argument +np.datetime64(value="2019") # E: No overload variant +np.timedelta64(value=0) # E: Unexpected keyword argument + +np.bytes_(b"hello", encoding='utf-8') # E: No overload variant +np.str_("hello", encoding='utf-8') # E: No overload variant + +complex(np.bytes_("1")) # E: No overload variant + +f8.item(1) # E: incompatible type +f8.item((0, 1)) # E: incompatible type +f8.squeeze(axis=1) # E: incompatible type +f8.squeeze(axis=(0, 1)) # E: incompatible type +f8.transpose(1) # E: incompatible type + +def func(a: np.float32) -> None: ... + +func(f2) # E: incompatible type +func(f8) # E: incompatible type + +round(c8) # E: No overload variant + +c8.__getnewargs__() # E: Invalid self argument +f2.__getnewargs__() # E: Invalid self argument +f2.is_integer() # E: Invalid self argument +f2.hex() # E: Invalid self argument +np.float16.fromhex("0x0.0p+0") # E: Invalid self argument +f2.__trunc__() # E: Invalid self argument +f2.__getformat__("float") # E: Invalid self argument diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ufunc_config.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ufunc_config.py new file mode 100644 index 0000000000000000000000000000000000000000..899fab01eb11982d55548cd4548322ebd55f60b7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ufunc_config.py @@ -0,0 +1,21 @@ +"""Typing tests for `numpy.core._ufunc_config`.""" + +import numpy as np + +def func1(a: str, b: int, c: float) -> None: ... +def func2(a: str, *, b: int) -> None: ... + +class Write1: + def write1(self, a: str) -> None: ... + +class Write2: + def write(self, a: str, b: str) -> None: ... + +class Write3: + def write(self, *, a: str) -> None: ... + +np.seterrcall(func1) # E: Argument 1 to "seterrcall" has incompatible type +np.seterrcall(func2) # E: Argument 1 to "seterrcall" has incompatible type +np.seterrcall(Write1()) # E: Argument 1 to "seterrcall" has incompatible type +np.seterrcall(Write2()) # E: Argument 1 to "seterrcall" has incompatible type +np.seterrcall(Write3()) # E: Argument 1 to "seterrcall" has incompatible type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ufunclike.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ufunclike.py new file mode 100644 index 0000000000000000000000000000000000000000..cc28a909f5303a061499d222292dd39da3a0cdd2 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ufunclike.py @@ -0,0 +1,21 @@ +from typing import List, Any +import numpy as np + +AR_c: np.ndarray[Any, np.dtype[np.complex128]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] +AR_O: np.ndarray[Any, np.dtype[np.object_]] + +np.fix(AR_c) # E: incompatible type +np.fix(AR_m) # E: incompatible type +np.fix(AR_M) # E: incompatible type + +np.isposinf(AR_c) # E: incompatible type +np.isposinf(AR_m) # E: incompatible type +np.isposinf(AR_M) # E: incompatible type +np.isposinf(AR_O) # E: incompatible type + +np.isneginf(AR_c) # E: incompatible type +np.isneginf(AR_m) # E: incompatible type +np.isneginf(AR_M) # E: incompatible type +np.isneginf(AR_O) # E: incompatible type diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ufuncs.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..9da5166abdcb76fef6408c68bc4abffc8a47db23 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/ufuncs.py @@ -0,0 +1,41 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] + +np.sin.nin + "foo" # E: Unsupported operand types +np.sin(1, foo="bar") # E: No overload variant + +np.abs(None) # E: No overload variant + +np.add(1, 1, 1) # E: No overload variant +np.add(1, 1, axis=0) # E: No overload variant + +np.matmul(AR_f8, AR_f8, where=True) # E: No overload variant + +np.frexp(AR_f8, out=None) # E: No overload variant +np.frexp(AR_f8, out=AR_f8) # E: No overload variant + +np.absolute.outer() # E: "None" not callable +np.frexp.outer() # E: "None" not callable +np.divmod.outer() # E: "None" not callable +np.matmul.outer() # E: "None" not callable + +np.absolute.reduceat() # E: "None" not callable +np.frexp.reduceat() # E: "None" not callable +np.divmod.reduceat() # E: "None" not callable +np.matmul.reduceat() # E: "None" not callable + +np.absolute.reduce() # E: "None" not callable +np.frexp.reduce() # E: "None" not callable +np.divmod.reduce() # E: "None" not callable +np.matmul.reduce() # E: "None" not callable + +np.absolute.accumulate() # E: "None" not callable +np.frexp.accumulate() # E: "None" not callable +np.divmod.accumulate() # E: "None" not callable +np.matmul.accumulate() # E: "None" not callable + +np.frexp.at() # E: "None" not callable +np.divmod.at() # E: "None" not callable +np.matmul.at() # E: "None" not callable diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..7bba06fb9548df67b8f1feaa0d772a57fbda5a4a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.py @@ -0,0 +1,7 @@ +import numpy as np + +np.AxisError(1.0) # E: Argument 1 to "AxisError" has incompatible type +np.AxisError(1, ndim=2.0) # E: Argument "ndim" to "AxisError" has incompatible type +np.AxisError( + 2, msg_prefix=404 # E: Argument "msg_prefix" to "AxisError" has incompatible type +) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/misc/__pycache__/extended_precision.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/misc/__pycache__/extended_precision.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92d966997907db9f3175da4b13bb3c449dbefedb Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/misc/__pycache__/extended_precision.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/misc/extended_precision.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/misc/extended_precision.py new file mode 100644 index 0000000000000000000000000000000000000000..7189809e99f795014f982c61162cc2284f30cc44 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/misc/extended_precision.py @@ -0,0 +1,17 @@ +import numpy as np + +reveal_type(np.uint128()) +reveal_type(np.uint256()) + +reveal_type(np.int128()) +reveal_type(np.int256()) + +reveal_type(np.float80()) +reveal_type(np.float96()) +reveal_type(np.float128()) +reveal_type(np.float256()) + +reveal_type(np.complex160()) +reveal_type(np.complex192()) +reveal_type(np.complex256()) +reveal_type(np.complex512()) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/mypy.ini b/MLPY/Lib/site-packages/numpy/typing/tests/data/mypy.ini new file mode 100644 index 0000000000000000000000000000000000000000..d89596b0051e5fce3096c77b2b41668ffb452709 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/mypy.ini @@ -0,0 +1,9 @@ +[mypy] +plugins = numpy.typing.mypy_plugin +show_absolute_path = True + +[mypy-numpy] +ignore_errors = True + +[mypy-numpy.*] +ignore_errors = True diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8507c39de0f8bab9c1b2a489893a68f5e6fdfc73 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ed1d78189b17d0e6de4978eae18412d41827169 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d36a7e13291a30f47913811394052066b0799204 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cee7880d406864ad15ef395e191233e3651af7c5 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bacc93bb85b05f280c9493781181ad073d2f4bb6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..295ab0136207c9dae632c40310ca91eb06a4202d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee7d2ad7e0bc0135550d5e85febaba6d7e231931 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d79b104479823db68528b97da9974400030f41da Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c52c3221bed4f4607b0f6393c7aacb963fe982c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0042ae6d13621f7d470299e3fdf642e927be126d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9803d68cb82951249126916ce9ef12e53aa3ec81 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3295c780137160a331e0c5b68ee14eb625edb27 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7911f89bc8698c47f45d49ec80d17634a291f76 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12cbdfb3b21abd7bfa97131c6a838e2a77a38e2a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/literal.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/literal.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff02784e1f533579fd452866c3d7a593c74245a6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/literal.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/mod.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/mod.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62498cd167513c5d72100a7cde5606ac7c99c1b8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/mod.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/modules.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/modules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d791833e65d2a28337a95cd9845490468845b88 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/modules.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c86a325648752ee71c3b2fa2f36e738fe2304185 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dcca2bc988701d97c332b2f2c4ac1ec9b678500 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f64ae47003ab540fa5e8e7a102f8c2b1b5c479a8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f70ce80e0140526647acb50ffa22aa45e7ebfda Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9716b86c6fe8a629e3cbe084c178d1c2bbaae6b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa18d792e3aee63be222ab88bb41f29c98d39d41 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/random.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/random.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e8e08dea131b82c4bcc6e6f77364d6e999d8d08 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/random.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5e0f1f6d18d7a6c81994ee6496198681c7ca963 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/simple.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/simple.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f226c70a01e01cb9db259b6df04a67ce399ce4ef Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/simple.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9af2ce11418c656e7ece96061cba8915cbc144a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..332b5e8bdfe2d229f25e764eafb76a700932ee22 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc3289d035997e36392f96d696e2521c6b4ecb55 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..609e39d68f3c5e0ba83e4ef762d6c940049babbb Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..147fca55cb765725148858e6cf631b583ad7cc9c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/arithmetic.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..d82558adf508c830da7fbdb918b12918aa28a9ac --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/arithmetic.py @@ -0,0 +1,610 @@ +from __future__ import annotations + +from typing import Any +import numpy as np + +c16 = np.complex128(1) +f8 = np.float64(1) +i8 = np.int64(1) +u8 = np.uint64(1) + +c8 = np.complex64(1) +f4 = np.float32(1) +i4 = np.int32(1) +u4 = np.uint32(1) + +dt = np.datetime64(1, "D") +td = np.timedelta64(1, "D") + +b_ = np.bool_(1) + +b = bool(1) +c = complex(1) +f = float(1) +i = int(1) + + +class Object: + def __array__(self) -> np.ndarray[Any, np.dtype[np.object_]]: + ret = np.empty((), dtype=object) + ret[()] = self + return ret + + def __sub__(self, value: Any) -> Object: + return self + + def __rsub__(self, value: Any) -> Object: + return self + + def __floordiv__(self, value: Any) -> Object: + return self + + def __rfloordiv__(self, value: Any) -> Object: + return self + + def __mul__(self, value: Any) -> Object: + return self + + def __rmul__(self, value: Any) -> Object: + return self + + def __pow__(self, value: Any) -> Object: + return self + + def __rpow__(self, value: Any) -> Object: + return self + + +AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True]) +AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) +AR_i: np.ndarray[Any, np.dtype[np.int64]] = np.array([1]) +AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) +AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1j]) +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64(1, "D")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64(1, "D")]) +AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([Object()]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] +AR_LIKE_O = [Object()] + +# Array subtractions + +AR_b - AR_LIKE_u +AR_b - AR_LIKE_i +AR_b - AR_LIKE_f +AR_b - AR_LIKE_c +AR_b - AR_LIKE_m +AR_b - AR_LIKE_O + +AR_LIKE_u - AR_b +AR_LIKE_i - AR_b +AR_LIKE_f - AR_b +AR_LIKE_c - AR_b +AR_LIKE_m - AR_b +AR_LIKE_M - AR_b +AR_LIKE_O - AR_b + +AR_u - AR_LIKE_b +AR_u - AR_LIKE_u +AR_u - AR_LIKE_i +AR_u - AR_LIKE_f +AR_u - AR_LIKE_c +AR_u - AR_LIKE_m +AR_u - AR_LIKE_O + +AR_LIKE_b - AR_u +AR_LIKE_u - AR_u +AR_LIKE_i - AR_u +AR_LIKE_f - AR_u +AR_LIKE_c - AR_u +AR_LIKE_m - AR_u +AR_LIKE_M - AR_u +AR_LIKE_O - AR_u + +AR_i - AR_LIKE_b +AR_i - AR_LIKE_u +AR_i - AR_LIKE_i +AR_i - AR_LIKE_f +AR_i - AR_LIKE_c +AR_i - AR_LIKE_m +AR_i - AR_LIKE_O + +AR_LIKE_b - AR_i +AR_LIKE_u - AR_i +AR_LIKE_i - AR_i +AR_LIKE_f - AR_i +AR_LIKE_c - AR_i +AR_LIKE_m - AR_i +AR_LIKE_M - AR_i +AR_LIKE_O - AR_i + +AR_f - AR_LIKE_b +AR_f - AR_LIKE_u +AR_f - AR_LIKE_i +AR_f - AR_LIKE_f +AR_f - AR_LIKE_c +AR_f - AR_LIKE_O + +AR_LIKE_b - AR_f +AR_LIKE_u - AR_f +AR_LIKE_i - AR_f +AR_LIKE_f - AR_f +AR_LIKE_c - AR_f +AR_LIKE_O - AR_f + +AR_c - AR_LIKE_b +AR_c - AR_LIKE_u +AR_c - AR_LIKE_i +AR_c - AR_LIKE_f +AR_c - AR_LIKE_c +AR_c - AR_LIKE_O + +AR_LIKE_b - AR_c +AR_LIKE_u - AR_c +AR_LIKE_i - AR_c +AR_LIKE_f - AR_c +AR_LIKE_c - AR_c +AR_LIKE_O - AR_c + +AR_m - AR_LIKE_b +AR_m - AR_LIKE_u +AR_m - AR_LIKE_i +AR_m - AR_LIKE_m + +AR_LIKE_b - AR_m +AR_LIKE_u - AR_m +AR_LIKE_i - AR_m +AR_LIKE_m - AR_m +AR_LIKE_M - AR_m + +AR_M - AR_LIKE_b +AR_M - AR_LIKE_u +AR_M - AR_LIKE_i +AR_M - AR_LIKE_m +AR_M - AR_LIKE_M + +AR_LIKE_M - AR_M + +AR_O - AR_LIKE_b +AR_O - AR_LIKE_u +AR_O - AR_LIKE_i +AR_O - AR_LIKE_f +AR_O - AR_LIKE_c +AR_O - AR_LIKE_O + +AR_LIKE_b - AR_O +AR_LIKE_u - AR_O +AR_LIKE_i - AR_O +AR_LIKE_f - AR_O +AR_LIKE_c - AR_O +AR_LIKE_O - AR_O + +# Array floor division + +AR_b // AR_LIKE_b +AR_b // AR_LIKE_u +AR_b // AR_LIKE_i +AR_b // AR_LIKE_f +AR_b // AR_LIKE_c +AR_b // AR_LIKE_O + +AR_LIKE_b // AR_b +AR_LIKE_u // AR_b +AR_LIKE_i // AR_b +AR_LIKE_f // AR_b +AR_LIKE_c // AR_b +AR_LIKE_O // AR_b + +AR_u // AR_LIKE_b +AR_u // AR_LIKE_u +AR_u // AR_LIKE_i +AR_u // AR_LIKE_f +AR_u // AR_LIKE_c +AR_u // AR_LIKE_O + +AR_LIKE_b // AR_u +AR_LIKE_u // AR_u +AR_LIKE_i // AR_u +AR_LIKE_f // AR_u +AR_LIKE_c // AR_u +AR_LIKE_m // AR_u +AR_LIKE_O // AR_u + +AR_i // AR_LIKE_b +AR_i // AR_LIKE_u +AR_i // AR_LIKE_i +AR_i // AR_LIKE_f +AR_i // AR_LIKE_c +AR_i // AR_LIKE_O + +AR_LIKE_b // AR_i +AR_LIKE_u // AR_i +AR_LIKE_i // AR_i +AR_LIKE_f // AR_i +AR_LIKE_c // AR_i +AR_LIKE_m // AR_i +AR_LIKE_O // AR_i + +AR_f // AR_LIKE_b +AR_f // AR_LIKE_u +AR_f // AR_LIKE_i +AR_f // AR_LIKE_f +AR_f // AR_LIKE_c +AR_f // AR_LIKE_O + +AR_LIKE_b // AR_f +AR_LIKE_u // AR_f +AR_LIKE_i // AR_f +AR_LIKE_f // AR_f +AR_LIKE_c // AR_f +AR_LIKE_m // AR_f +AR_LIKE_O // AR_f + +AR_c // AR_LIKE_b +AR_c // AR_LIKE_u +AR_c // AR_LIKE_i +AR_c // AR_LIKE_f +AR_c // AR_LIKE_c + +AR_LIKE_b // AR_c +AR_LIKE_u // AR_c +AR_LIKE_i // AR_c +AR_LIKE_f // AR_c +AR_LIKE_c // AR_c +AR_LIKE_O // AR_c + +AR_m // AR_LIKE_u +AR_m // AR_LIKE_i +AR_m // AR_LIKE_f +AR_m // AR_LIKE_m + +AR_LIKE_m // AR_m + +AR_O // AR_LIKE_b +AR_O // AR_LIKE_u +AR_O // AR_LIKE_i +AR_O // AR_LIKE_f +AR_O // AR_LIKE_c +AR_O // AR_LIKE_O + +AR_LIKE_b // AR_O +AR_LIKE_u // AR_O +AR_LIKE_i // AR_O +AR_LIKE_f // AR_O +AR_LIKE_O // AR_O + +# Inplace multiplication + +AR_b *= AR_LIKE_b + +AR_u *= AR_LIKE_b +AR_u *= AR_LIKE_u + +AR_i *= AR_LIKE_b +AR_i *= AR_LIKE_u +AR_i *= AR_LIKE_i + +AR_f *= AR_LIKE_b +AR_f *= AR_LIKE_u +AR_f *= AR_LIKE_i +AR_f *= AR_LIKE_f + +AR_c *= AR_LIKE_b +AR_c *= AR_LIKE_u +AR_c *= AR_LIKE_i +AR_c *= AR_LIKE_f +AR_c *= AR_LIKE_c + +AR_m *= AR_LIKE_b +AR_m *= AR_LIKE_u +AR_m *= AR_LIKE_i +AR_m *= AR_LIKE_f + +AR_O *= AR_LIKE_b +AR_O *= AR_LIKE_u +AR_O *= AR_LIKE_i +AR_O *= AR_LIKE_f +AR_O *= AR_LIKE_c +AR_O *= AR_LIKE_O + +# Inplace power + +AR_u **= AR_LIKE_b +AR_u **= AR_LIKE_u + +AR_i **= AR_LIKE_b +AR_i **= AR_LIKE_u +AR_i **= AR_LIKE_i + +AR_f **= AR_LIKE_b +AR_f **= AR_LIKE_u +AR_f **= AR_LIKE_i +AR_f **= AR_LIKE_f + +AR_c **= AR_LIKE_b +AR_c **= AR_LIKE_u +AR_c **= AR_LIKE_i +AR_c **= AR_LIKE_f +AR_c **= AR_LIKE_c + +AR_O **= AR_LIKE_b +AR_O **= AR_LIKE_u +AR_O **= AR_LIKE_i +AR_O **= AR_LIKE_f +AR_O **= AR_LIKE_c +AR_O **= AR_LIKE_O + +# unary ops + +-c16 +-c8 +-f8 +-f4 +-i8 +-i4 +-u8 +-u4 +-td +-AR_f + ++c16 ++c8 ++f8 ++f4 ++i8 ++i4 ++u8 ++u4 ++td ++AR_f + +abs(c16) +abs(c8) +abs(f8) +abs(f4) +abs(i8) +abs(i4) +abs(u8) +abs(u4) +abs(td) +abs(b_) +abs(AR_f) + +# Time structures + +dt + td +dt + i +dt + i4 +dt + i8 +dt - dt +dt - i +dt - i4 +dt - i8 + +td + td +td + i +td + i4 +td + i8 +td - td +td - i +td - i4 +td - i8 +td / f +td / f4 +td / f8 +td / td +td // td +td % td + + +# boolean + +b_ / b +b_ / b_ +b_ / i +b_ / i8 +b_ / i4 +b_ / u8 +b_ / u4 +b_ / f +b_ / f8 +b_ / f4 +b_ / c +b_ / c16 +b_ / c8 + +b / b_ +b_ / b_ +i / b_ +i8 / b_ +i4 / b_ +u8 / b_ +u4 / b_ +f / b_ +f8 / b_ +f4 / b_ +c / b_ +c16 / b_ +c8 / b_ + +# Complex + +c16 + c16 +c16 + f8 +c16 + i8 +c16 + c8 +c16 + f4 +c16 + i4 +c16 + b_ +c16 + b +c16 + c +c16 + f +c16 + i +c16 + AR_f + +c16 + c16 +f8 + c16 +i8 + c16 +c8 + c16 +f4 + c16 +i4 + c16 +b_ + c16 +b + c16 +c + c16 +f + c16 +i + c16 +AR_f + c16 + +c8 + c16 +c8 + f8 +c8 + i8 +c8 + c8 +c8 + f4 +c8 + i4 +c8 + b_ +c8 + b +c8 + c +c8 + f +c8 + i +c8 + AR_f + +c16 + c8 +f8 + c8 +i8 + c8 +c8 + c8 +f4 + c8 +i4 + c8 +b_ + c8 +b + c8 +c + c8 +f + c8 +i + c8 +AR_f + c8 + +# Float + +f8 + f8 +f8 + i8 +f8 + f4 +f8 + i4 +f8 + b_ +f8 + b +f8 + c +f8 + f +f8 + i +f8 + AR_f + +f8 + f8 +i8 + f8 +f4 + f8 +i4 + f8 +b_ + f8 +b + f8 +c + f8 +f + f8 +i + f8 +AR_f + f8 + +f4 + f8 +f4 + i8 +f4 + f4 +f4 + i4 +f4 + b_ +f4 + b +f4 + c +f4 + f +f4 + i +f4 + AR_f + +f8 + f4 +i8 + f4 +f4 + f4 +i4 + f4 +b_ + f4 +b + f4 +c + f4 +f + f4 +i + f4 +AR_f + f4 + +# Int + +i8 + i8 +i8 + u8 +i8 + i4 +i8 + u4 +i8 + b_ +i8 + b +i8 + c +i8 + f +i8 + i +i8 + AR_f + +u8 + u8 +u8 + i4 +u8 + u4 +u8 + b_ +u8 + b +u8 + c +u8 + f +u8 + i +u8 + AR_f + +i8 + i8 +u8 + i8 +i4 + i8 +u4 + i8 +b_ + i8 +b + i8 +c + i8 +f + i8 +i + i8 +AR_f + i8 + +u8 + u8 +i4 + u8 +u4 + u8 +b_ + u8 +b + u8 +c + u8 +f + u8 +i + u8 +AR_f + u8 + +i4 + i8 +i4 + i4 +i4 + i +i4 + b_ +i4 + b +i4 + AR_f + +u4 + i8 +u4 + i4 +u4 + u8 +u4 + u4 +u4 + i +u4 + b_ +u4 + b +u4 + AR_f + +i8 + i4 +i4 + i4 +i + i4 +b_ + i4 +b + i4 +AR_f + i4 + +i8 + u4 +i4 + u4 +u8 + u4 +u4 + u4 +b_ + u4 +b + u4 +i + u4 +AR_f + u4 diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/array_constructors.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/array_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..99b9269930119dbf933353203b0e1e816d0c3f8a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/array_constructors.py @@ -0,0 +1,138 @@ +import sys +from typing import List, Any +import numpy as np + + +class Index: + def __index__(self) -> int: + return 0 + + +class SubClass(np.ndarray): + pass + + +def func(i: int, j: int, **kwargs: Any) -> SubClass: + return B + + +i8 = np.int64(1) + +A = np.array([1]) +B = A.view(SubClass).copy() +B_stack = np.array([[1], [1]]).view(SubClass) +C = [1] + +if sys.version_info >= (3, 8): + np.ndarray(Index()) + np.ndarray([Index()]) + +np.array(1, dtype=float) +np.array(1, copy=False) +np.array(1, order='F') +np.array(1, order=None) +np.array(1, subok=True) +np.array(1, ndmin=3) +np.array(1, str, copy=True, order='C', subok=False, ndmin=2) + +np.asarray(A) +np.asarray(B) +np.asarray(C) + +np.asanyarray(A) +np.asanyarray(B) +np.asanyarray(B, dtype=int) +np.asanyarray(C) + +np.ascontiguousarray(A) +np.ascontiguousarray(B) +np.ascontiguousarray(C) + +np.asfortranarray(A) +np.asfortranarray(B) +np.asfortranarray(C) + +np.require(A) +np.require(B) +np.require(B, dtype=int) +np.require(B, requirements=None) +np.require(B, requirements="E") +np.require(B, requirements=["ENSUREARRAY"]) +np.require(B, requirements={"F", "E"}) +np.require(B, requirements=["C", "OWNDATA"]) +np.require(B, requirements="W") +np.require(B, requirements="A") +np.require(C) + +np.linspace(0, 2) +np.linspace(0.5, [0, 1, 2]) +np.linspace([0, 1, 2], 3) +np.linspace(0j, 2) +np.linspace(0, 2, num=10) +np.linspace(0, 2, endpoint=True) +np.linspace(0, 2, retstep=True) +np.linspace(0j, 2j, retstep=True) +np.linspace(0, 2, dtype=bool) +np.linspace([0, 1], [2, 3], axis=Index()) + +np.logspace(0, 2, base=2) +np.logspace(0, 2, base=2) +np.logspace(0, 2, base=[1j, 2j], num=2) + +np.geomspace(1, 2) + +np.zeros_like(A) +np.zeros_like(C) +np.zeros_like(B) +np.zeros_like(B, dtype=np.int64) + +np.ones_like(A) +np.ones_like(C) +np.ones_like(B) +np.ones_like(B, dtype=np.int64) + +np.empty_like(A) +np.empty_like(C) +np.empty_like(B) +np.empty_like(B, dtype=np.int64) + +np.full_like(A, i8) +np.full_like(C, i8) +np.full_like(B, i8) +np.full_like(B, i8, dtype=np.int64) + +np.ones(1) +np.ones([1, 1, 1]) + +np.full(1, i8) +np.full([1, 1, 1], i8) + +np.indices([1, 2, 3]) +np.indices([1, 2, 3], sparse=True) + +np.fromfunction(func, (3, 5)) + +np.identity(10) + +np.atleast_1d(C) +np.atleast_1d(A) +np.atleast_1d(C, C) +np.atleast_1d(C, A) +np.atleast_1d(A, A) + +np.atleast_2d(C) + +np.atleast_3d(C) + +np.vstack([C, C]) +np.vstack([C, A]) +np.vstack([A, A]) + +np.hstack([C, C]) + +np.stack([C, C]) +np.stack([C, C], axis=0) +np.stack([C, C], out=B_stack) + +np.block([[C, C], [C, C]]) +np.block(A) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/array_like.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/array_like.py new file mode 100644 index 0000000000000000000000000000000000000000..d9bc84728888db23167f66514d6445907de2d244 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/array_like.py @@ -0,0 +1,39 @@ +from typing import Any, List, Optional + +import numpy as np +from numpy.typing import ArrayLike, _SupportsArray + +x1: ArrayLike = True +x2: ArrayLike = 5 +x3: ArrayLike = 1.0 +x4: ArrayLike = 1 + 1j +x5: ArrayLike = np.int8(1) +x6: ArrayLike = np.float64(1) +x7: ArrayLike = np.complex128(1) +x8: ArrayLike = np.array([1, 2, 3]) +x9: ArrayLike = [1, 2, 3] +x10: ArrayLike = (1, 2, 3) +x11: ArrayLike = "foo" +x12: ArrayLike = memoryview(b'foo') + + +class A: + def __array__(self, dtype: Optional[np.dtype] = None) -> np.ndarray: + return np.array([1, 2, 3]) + + +x13: ArrayLike = A() + +scalar: _SupportsArray = np.int64(1) +scalar.__array__() +array: _SupportsArray = np.array(1) +array.__array__() + +a: _SupportsArray = A() +a.__array__() +a.__array__() + +# Escape hatch for when you mean to make something like an object +# array. +object_array_scalar: Any = (i for i in range(10)) +np.array(object_array_scalar) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/arrayprint.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/arrayprint.py new file mode 100644 index 0000000000000000000000000000000000000000..1eacc04dd992463b9d3808991a0a2d1c36b2b526 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/arrayprint.py @@ -0,0 +1,37 @@ +import numpy as np + +AR = np.arange(10) +AR.setflags(write=False) + +with np.printoptions(): + np.set_printoptions( + precision=1, + threshold=2, + edgeitems=3, + linewidth=4, + suppress=False, + nanstr="Bob", + infstr="Bill", + formatter={}, + sign="+", + floatmode="unique", + ) + np.get_printoptions() + str(AR) + + np.array2string( + AR, + max_line_width=5, + precision=2, + suppress_small=True, + separator=";", + prefix="test", + threshold=5, + floatmode="fixed", + suffix="?", + legacy="1.13", + ) + np.format_float_scientific(1, precision=5) + np.format_float_positional(1, trim="k") + np.array_repr(AR) + np.array_str(AR) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/arrayterator.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/arrayterator.py new file mode 100644 index 0000000000000000000000000000000000000000..86640b267d4c5fde30b9eda481537af083306dcd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/arrayterator.py @@ -0,0 +1,27 @@ + +from __future__ import annotations + +from typing import Any +import numpy as np + +AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) +ar_iter = np.lib.Arrayterator(AR_i8) + +ar_iter.var +ar_iter.buf_size +ar_iter.start +ar_iter.stop +ar_iter.step +ar_iter.shape +ar_iter.flat + +ar_iter.__array__() + +for i in ar_iter: + pass + +ar_iter[0] +ar_iter[...] +ar_iter[:] +ar_iter[0, 0, 0] +ar_iter[..., 0, :] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e32207485adc855073a3944f97e7da646dd86c05 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py @@ -0,0 +1,131 @@ +import numpy as np + +i8 = np.int64(1) +u8 = np.uint64(1) + +i4 = np.int32(1) +u4 = np.uint32(1) + +b_ = np.bool_(1) + +b = bool(1) +i = int(1) + +AR = np.array([0, 1, 2], dtype=np.int32) +AR.setflags(write=False) + + +i8 << i8 +i8 >> i8 +i8 | i8 +i8 ^ i8 +i8 & i8 + +i8 << AR +i8 >> AR +i8 | AR +i8 ^ AR +i8 & AR + +i4 << i4 +i4 >> i4 +i4 | i4 +i4 ^ i4 +i4 & i4 + +i8 << i4 +i8 >> i4 +i8 | i4 +i8 ^ i4 +i8 & i4 + +i8 << i +i8 >> i +i8 | i +i8 ^ i +i8 & i + +i8 << b_ +i8 >> b_ +i8 | b_ +i8 ^ b_ +i8 & b_ + +i8 << b +i8 >> b +i8 | b +i8 ^ b +i8 & b + +u8 << u8 +u8 >> u8 +u8 | u8 +u8 ^ u8 +u8 & u8 + +u8 << AR +u8 >> AR +u8 | AR +u8 ^ AR +u8 & AR + +u4 << u4 +u4 >> u4 +u4 | u4 +u4 ^ u4 +u4 & u4 + +u4 << i4 +u4 >> i4 +u4 | i4 +u4 ^ i4 +u4 & i4 + +u4 << i +u4 >> i +u4 | i +u4 ^ i +u4 & i + +u8 << b_ +u8 >> b_ +u8 | b_ +u8 ^ b_ +u8 & b_ + +u8 << b +u8 >> b +u8 | b +u8 ^ b +u8 & b + +b_ << b_ +b_ >> b_ +b_ | b_ +b_ ^ b_ +b_ & b_ + +b_ << AR +b_ >> AR +b_ | AR +b_ ^ AR +b_ & AR + +b_ << b +b_ >> b +b_ | b +b_ ^ b +b_ & b + +b_ << i +b_ >> i +b_ | i +b_ ^ i +b_ & i + +~i8 +~i4 +~u8 +~u4 +~b_ +~AR diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/comparisons.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/comparisons.py new file mode 100644 index 0000000000000000000000000000000000000000..a4b47b5ab3a229387b2e5717f4f09b3b5b734f31 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/comparisons.py @@ -0,0 +1,301 @@ +from __future__ import annotations + +from typing import Any +import numpy as np + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool_() + +b = bool() +c = complex() +f = float() +i = int() + +SEQ = (0, 1, 2, 3, 4) + +AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True]) +AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) +AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) +AR_f: np.ndarray[Any, np.dtype[np.float_]] = np.array([1.0]) +AR_c: np.ndarray[Any, np.dtype[np.complex_]] = np.array([1.0j]) +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) +AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) + +# Arrays + +AR_b > AR_b +AR_b > AR_u +AR_b > AR_i +AR_b > AR_f +AR_b > AR_c + +AR_u > AR_b +AR_u > AR_u +AR_u > AR_i +AR_u > AR_f +AR_u > AR_c + +AR_i > AR_b +AR_i > AR_u +AR_i > AR_i +AR_i > AR_f +AR_i > AR_c + +AR_f > AR_b +AR_f > AR_u +AR_f > AR_i +AR_f > AR_f +AR_f > AR_c + +AR_c > AR_b +AR_c > AR_u +AR_c > AR_i +AR_c > AR_f +AR_c > AR_c + +AR_m > AR_b +AR_m > AR_u +AR_m > AR_i +AR_b > AR_m +AR_u > AR_m +AR_i > AR_m + +AR_M > AR_M + +AR_O > AR_O +1 > AR_O +AR_O > 1 + +# Time structures + +dt > dt + +td > td +td > i +td > i4 +td > i8 +td > AR_i +td > SEQ + +# boolean + +b_ > b +b_ > b_ +b_ > i +b_ > i8 +b_ > i4 +b_ > u8 +b_ > u4 +b_ > f +b_ > f8 +b_ > f4 +b_ > c +b_ > c16 +b_ > c8 +b_ > AR_i +b_ > SEQ + +# Complex + +c16 > c16 +c16 > f8 +c16 > i8 +c16 > c8 +c16 > f4 +c16 > i4 +c16 > b_ +c16 > b +c16 > c +c16 > f +c16 > i +c16 > AR_i +c16 > SEQ + +c16 > c16 +f8 > c16 +i8 > c16 +c8 > c16 +f4 > c16 +i4 > c16 +b_ > c16 +b > c16 +c > c16 +f > c16 +i > c16 +AR_i > c16 +SEQ > c16 + +c8 > c16 +c8 > f8 +c8 > i8 +c8 > c8 +c8 > f4 +c8 > i4 +c8 > b_ +c8 > b +c8 > c +c8 > f +c8 > i +c8 > AR_i +c8 > SEQ + +c16 > c8 +f8 > c8 +i8 > c8 +c8 > c8 +f4 > c8 +i4 > c8 +b_ > c8 +b > c8 +c > c8 +f > c8 +i > c8 +AR_i > c8 +SEQ > c8 + +# Float + +f8 > f8 +f8 > i8 +f8 > f4 +f8 > i4 +f8 > b_ +f8 > b +f8 > c +f8 > f +f8 > i +f8 > AR_i +f8 > SEQ + +f8 > f8 +i8 > f8 +f4 > f8 +i4 > f8 +b_ > f8 +b > f8 +c > f8 +f > f8 +i > f8 +AR_i > f8 +SEQ > f8 + +f4 > f8 +f4 > i8 +f4 > f4 +f4 > i4 +f4 > b_ +f4 > b +f4 > c +f4 > f +f4 > i +f4 > AR_i +f4 > SEQ + +f8 > f4 +i8 > f4 +f4 > f4 +i4 > f4 +b_ > f4 +b > f4 +c > f4 +f > f4 +i > f4 +AR_i > f4 +SEQ > f4 + +# Int + +i8 > i8 +i8 > u8 +i8 > i4 +i8 > u4 +i8 > b_ +i8 > b +i8 > c +i8 > f +i8 > i +i8 > AR_i +i8 > SEQ + +u8 > u8 +u8 > i4 +u8 > u4 +u8 > b_ +u8 > b +u8 > c +u8 > f +u8 > i +u8 > AR_i +u8 > SEQ + +i8 > i8 +u8 > i8 +i4 > i8 +u4 > i8 +b_ > i8 +b > i8 +c > i8 +f > i8 +i > i8 +AR_i > i8 +SEQ > i8 + +u8 > u8 +i4 > u8 +u4 > u8 +b_ > u8 +b > u8 +c > u8 +f > u8 +i > u8 +AR_i > u8 +SEQ > u8 + +i4 > i8 +i4 > i4 +i4 > i +i4 > b_ +i4 > b +i4 > AR_i +i4 > SEQ + +u4 > i8 +u4 > i4 +u4 > u8 +u4 > u4 +u4 > i +u4 > b_ +u4 > b +u4 > AR_i +u4 > SEQ + +i8 > i4 +i4 > i4 +i > i4 +b_ > i4 +b > i4 +AR_i > i4 +SEQ > i4 + +i8 > u4 +i4 > u4 +u8 > u4 +u4 > u4 +b_ > u4 +b > u4 +i > u4 +AR_i > u4 +SEQ > u4 diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/dtype.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..5c713836c217948f96e13d67ed1689c7691c384a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/dtype.py @@ -0,0 +1,57 @@ +import numpy as np + +dtype_obj = np.dtype(np.str_) +void_dtype_obj = np.dtype([("f0", np.float64), ("f1", np.float32)]) + +np.dtype(dtype=np.int64) +np.dtype(int) +np.dtype("int") +np.dtype(None) + +np.dtype((int, 2)) +np.dtype((int, (1,))) + +np.dtype({"names": ["a", "b"], "formats": [int, float]}) +np.dtype({"names": ["a"], "formats": [int], "titles": [object]}) +np.dtype({"names": ["a"], "formats": [int], "titles": [object()]}) + +np.dtype([("name", np.unicode_, 16), ("grades", np.float64, (2,)), ("age", "int32")]) + +np.dtype( + { + "names": ["a", "b"], + "formats": [int, float], + "itemsize": 9, + "aligned": False, + "titles": ["x", "y"], + "offsets": [0, 1], + } +) + +np.dtype((np.float_, float)) + + +class Test: + dtype = np.dtype(float) + + +np.dtype(Test()) + +# Methods and attributes +dtype_obj.base +dtype_obj.subdtype +dtype_obj.newbyteorder() +dtype_obj.type +dtype_obj.name +dtype_obj.names + +dtype_obj * 0 +dtype_obj * 2 + +0 * dtype_obj +2 * dtype_obj + +void_dtype_obj["f0"] +void_dtype_obj[0] +void_dtype_obj[["f0", "f1"]] +void_dtype_obj[["f0"]] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/einsumfunc.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/einsumfunc.py new file mode 100644 index 0000000000000000000000000000000000000000..6e933a3158d744f71d5d69a52c804fbf60a35f37 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/einsumfunc.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from typing import List, Any + +import numpy as np + +AR_LIKE_b = [True, True, True] +AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)] +AR_LIKE_i = [1, 2, 3] +AR_LIKE_f = [1.0, 2.0, 3.0] +AR_LIKE_c = [1j, 2j, 3j] +AR_LIKE_U = ["1", "2", "3"] + +OUT_f: np.ndarray[Any, np.dtype[np.float64]] = np.empty(3, dtype=np.float64) +OUT_c: np.ndarray[Any, np.dtype[np.complex128]] = np.empty(3, dtype=np.complex128) + +np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b) +np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u) +np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i) +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f) +np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c) +np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i) +np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c) + +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16") +np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe") +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, out=OUT_c) +np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=int, casting="unsafe", out=OUT_f) + +np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b) +np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u) +np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i) +np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f) +np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c) +np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i) +np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/flatiter.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/flatiter.py new file mode 100644 index 0000000000000000000000000000000000000000..4b34e27a1e77401dca3a79c71c4d8f29c1e36be6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/flatiter.py @@ -0,0 +1,16 @@ +import numpy as np + +a = np.empty((2, 2)).flat + +a.base +a.copy() +a.coords +a.index +iter(a) +next(a) +a[0] +a[[0, 1, 2]] +a[...] +a[:] +a.__array__() +a.__array__(np.dtype(np.float64)) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/fromnumeric.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/fromnumeric.py new file mode 100644 index 0000000000000000000000000000000000000000..df59f4730e20867d4ca7c619bfd2bad305c5e7b7 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/fromnumeric.py @@ -0,0 +1,260 @@ +"""Tests for :mod:`numpy.core.fromnumeric`.""" + +import numpy as np + +A = np.array(True, ndmin=2, dtype=bool) +B = np.array(1.0, ndmin=2, dtype=np.float32) +A.setflags(write=False) +B.setflags(write=False) + +a = np.bool_(True) +b = np.float32(1.0) +c = 1.0 +d = np.array(1.0, dtype=np.float32) # writeable + +np.take(a, 0) +np.take(b, 0) +np.take(c, 0) +np.take(A, 0) +np.take(B, 0) +np.take(A, [0]) +np.take(B, [0]) + +np.reshape(a, 1) +np.reshape(b, 1) +np.reshape(c, 1) +np.reshape(A, 1) +np.reshape(B, 1) + +np.choose(a, [True, True]) +np.choose(A, [1.0, 1.0]) + +np.repeat(a, 1) +np.repeat(b, 1) +np.repeat(c, 1) +np.repeat(A, 1) +np.repeat(B, 1) + +np.swapaxes(A, 0, 0) +np.swapaxes(B, 0, 0) + +np.transpose(a) +np.transpose(b) +np.transpose(c) +np.transpose(A) +np.transpose(B) + +np.partition(a, 0, axis=None) +np.partition(b, 0, axis=None) +np.partition(c, 0, axis=None) +np.partition(A, 0) +np.partition(B, 0) + +np.argpartition(a, 0) +np.argpartition(b, 0) +np.argpartition(c, 0) +np.argpartition(A, 0) +np.argpartition(B, 0) + +np.sort(A, 0) +np.sort(B, 0) + +np.argsort(A, 0) +np.argsort(B, 0) + +np.argmax(A) +np.argmax(B) +np.argmax(A, axis=0) +np.argmax(B, axis=0) + +np.argmin(A) +np.argmin(B) +np.argmin(A, axis=0) +np.argmin(B, axis=0) + +np.searchsorted(A[0], 0) +np.searchsorted(B[0], 0) +np.searchsorted(A[0], [0]) +np.searchsorted(B[0], [0]) + +np.resize(a, (5, 5)) +np.resize(b, (5, 5)) +np.resize(c, (5, 5)) +np.resize(A, (5, 5)) +np.resize(B, (5, 5)) + +np.squeeze(a) +np.squeeze(b) +np.squeeze(c) +np.squeeze(A) +np.squeeze(B) + +np.diagonal(A) +np.diagonal(B) + +np.trace(A) +np.trace(B) + +np.ravel(a) +np.ravel(b) +np.ravel(c) +np.ravel(A) +np.ravel(B) + +np.nonzero(A) +np.nonzero(B) + +np.shape(a) +np.shape(b) +np.shape(c) +np.shape(A) +np.shape(B) + +np.compress([True], a) +np.compress([True], b) +np.compress([True], c) +np.compress([True], A) +np.compress([True], B) + +np.clip(a, 0, 1.0) +np.clip(b, -1, 1) +np.clip(a, 0, None) +np.clip(b, None, 1) +np.clip(c, 0, 1) +np.clip(A, 0, 1) +np.clip(B, 0, 1) +np.clip(B, [0, 1], [1, 2]) + +np.sum(a) +np.sum(b) +np.sum(c) +np.sum(A) +np.sum(B) +np.sum(A, axis=0) +np.sum(B, axis=0) + +np.all(a) +np.all(b) +np.all(c) +np.all(A) +np.all(B) +np.all(A, axis=0) +np.all(B, axis=0) +np.all(A, keepdims=True) +np.all(B, keepdims=True) + +np.any(a) +np.any(b) +np.any(c) +np.any(A) +np.any(B) +np.any(A, axis=0) +np.any(B, axis=0) +np.any(A, keepdims=True) +np.any(B, keepdims=True) + +np.cumsum(a) +np.cumsum(b) +np.cumsum(c) +np.cumsum(A) +np.cumsum(B) + +np.ptp(b) +np.ptp(c) +np.ptp(B) +np.ptp(B, axis=0) +np.ptp(B, keepdims=True) + +np.amax(a) +np.amax(b) +np.amax(c) +np.amax(A) +np.amax(B) +np.amax(A, axis=0) +np.amax(B, axis=0) +np.amax(A, keepdims=True) +np.amax(B, keepdims=True) + +np.amin(a) +np.amin(b) +np.amin(c) +np.amin(A) +np.amin(B) +np.amin(A, axis=0) +np.amin(B, axis=0) +np.amin(A, keepdims=True) +np.amin(B, keepdims=True) + +np.prod(a) +np.prod(b) +np.prod(c) +np.prod(A) +np.prod(B) +np.prod(a, dtype=None) +np.prod(A, dtype=None) +np.prod(A, axis=0) +np.prod(B, axis=0) +np.prod(A, keepdims=True) +np.prod(B, keepdims=True) +np.prod(b, out=d) +np.prod(B, out=d) + +np.cumprod(a) +np.cumprod(b) +np.cumprod(c) +np.cumprod(A) +np.cumprod(B) + +np.ndim(a) +np.ndim(b) +np.ndim(c) +np.ndim(A) +np.ndim(B) + +np.size(a) +np.size(b) +np.size(c) +np.size(A) +np.size(B) + +np.around(a) +np.around(b) +np.around(c) +np.around(A) +np.around(B) + +np.mean(a) +np.mean(b) +np.mean(c) +np.mean(A) +np.mean(B) +np.mean(A, axis=0) +np.mean(B, axis=0) +np.mean(A, keepdims=True) +np.mean(B, keepdims=True) +np.mean(b, out=d) +np.mean(B, out=d) + +np.std(a) +np.std(b) +np.std(c) +np.std(A) +np.std(B) +np.std(A, axis=0) +np.std(B, axis=0) +np.std(A, keepdims=True) +np.std(B, keepdims=True) +np.std(b, out=d) +np.std(B, out=d) + +np.var(a) +np.var(b) +np.var(c) +np.var(A) +np.var(B) +np.var(A, axis=0) +np.var(B, axis=0) +np.var(A, keepdims=True) +np.var(B, keepdims=True) +np.var(b, out=d) +np.var(B, out=d) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/index_tricks.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/index_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..e257fb7e6ebe65dff0a40750e25f259518cd8a4d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/index_tricks.py @@ -0,0 +1,64 @@ +from __future__ import annotations +from typing import Any +import numpy as np + +AR_LIKE_b = [[True, True], [True, True]] +AR_LIKE_i = [[1, 2], [3, 4]] +AR_LIKE_f = [[1.0, 2.0], [3.0, 4.0]] +AR_LIKE_U = [["1", "2"], ["3", "4"]] + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] = np.array(AR_LIKE_i, dtype=np.int64) + +np.ndenumerate(AR_i8) +np.ndenumerate(AR_LIKE_f) +np.ndenumerate(AR_LIKE_U) + +np.ndenumerate(AR_i8).iter +np.ndenumerate(AR_LIKE_f).iter +np.ndenumerate(AR_LIKE_U).iter + +next(np.ndenumerate(AR_i8)) +next(np.ndenumerate(AR_LIKE_f)) +next(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndenumerate(AR_i8)) +iter(np.ndenumerate(AR_LIKE_f)) +iter(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndindex(1, 2, 3)) +next(np.ndindex(1, 2, 3)) + +np.unravel_index([22, 41, 37], (7, 6)) +np.unravel_index([31, 41, 13], (7, 6), order='F') +np.unravel_index(1621, (6, 7, 8, 9)) + +np.ravel_multi_index(AR_LIKE_i, (7, 6)) +np.ravel_multi_index(AR_LIKE_i, (7, 6), order='F') +np.ravel_multi_index(AR_LIKE_i, (4, 6), mode='clip') +np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=('clip', 'wrap')) +np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)) + +np.mgrid[1:1:2] +np.mgrid[1:1:2, None:10] + +np.ogrid[1:1:2] +np.ogrid[1:1:2, None:10] + +np.index_exp[0:1] +np.index_exp[0:1, None:3] +np.index_exp[0, 0:1, ..., [0, 1, 3]] + +np.s_[0:1] +np.s_[0:1, None:3] +np.s_[0, 0:1, ..., [0, 1, 3]] + +np.ix_(AR_LIKE_b[0]) +np.ix_(AR_LIKE_i[0], AR_LIKE_f[0]) +np.ix_(AR_i8[0]) + +np.fill_diagonal(AR_i8, 5) + +np.diag_indices(4) +np.diag_indices(2, 3) + +np.diag_indices_from(AR_i8) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/lib_utils.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/lib_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..579b2f2e9ddf732ad91e86e12d3df43f4b1d7faa --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/lib_utils.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from io import StringIO +from typing import Any + +import numpy as np + +FILE = StringIO() +AR: np.ndarray[Any, np.dtype[np.float64]] = np.arange(10).astype(np.float64) + +def func(a: int) -> bool: ... + +np.deprecate(func) +np.deprecate() + +np.deprecate_with_doc("test") +np.deprecate_with_doc(None) + +np.byte_bounds(AR) +np.byte_bounds(np.float64()) + +np.info(1, output=FILE) + +np.source(np.interp, output=FILE) + +np.lookfor("binary representation", output=FILE) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/lib_version.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/lib_version.py new file mode 100644 index 0000000000000000000000000000000000000000..ab55916c556fd55fead90913e45f209405bc9ee9 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/lib_version.py @@ -0,0 +1,18 @@ +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +version.vstring +version.version +version.major +version.minor +version.bugfix +version.pre_release +version.is_devversion + +version == version +version != version +version < "1.8.0" +version <= version +version > version +version >= "1.8.0" diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/literal.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/literal.py new file mode 100644 index 0000000000000000000000000000000000000000..1595f0574210b01dbb55f23dc67e1118bfc9f001 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/literal.py @@ -0,0 +1,45 @@ +from functools import partial +from typing import Callable, List, Tuple + +import pytest # type: ignore +import numpy as np + +AR = np.array(0) +AR.setflags(write=False) + +KACF = frozenset({None, "K", "A", "C", "F"}) +ACF = frozenset({None, "A", "C", "F"}) +CF = frozenset({None, "C", "F"}) + +order_list: List[Tuple[frozenset, Callable]] = [ + (KACF, partial(np.ndarray, 1)), + (KACF, AR.tobytes), + (KACF, partial(AR.astype, int)), + (KACF, AR.copy), + (ACF, partial(AR.reshape, 1)), + (KACF, AR.flatten), + (KACF, AR.ravel), + (KACF, partial(np.array, 1)), + (CF, partial(np.zeros, 1)), + (CF, partial(np.ones, 1)), + (CF, partial(np.empty, 1)), + (CF, partial(np.full, 1, 1)), + (KACF, partial(np.zeros_like, AR)), + (KACF, partial(np.ones_like, AR)), + (KACF, partial(np.empty_like, AR)), + (KACF, partial(np.full_like, AR, 1)), + (KACF, partial(np.add, 1, 1)), # i.e. np.ufunc.__call__ + (ACF, partial(np.reshape, AR, 1)), + (KACF, partial(np.ravel, AR)), + (KACF, partial(np.asarray, 1)), + (KACF, partial(np.asanyarray, 1)), +] + +for order_set, func in order_list: + for order in order_set: + func(order=order) + + invalid_orders = KACF - order_set + for order in invalid_orders: + with pytest.raises(ValueError): + func(order=order) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/mod.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/mod.py new file mode 100644 index 0000000000000000000000000000000000000000..a80dc89145deb737fe0ef2833967a1045bee685a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/mod.py @@ -0,0 +1,149 @@ +import numpy as np + +f8 = np.float64(1) +i8 = np.int64(1) +u8 = np.uint64(1) + +f4 = np.float32(1) +i4 = np.int32(1) +u4 = np.uint32(1) + +td = np.timedelta64(1, "D") +b_ = np.bool_(1) + +b = bool(1) +f = float(1) +i = int(1) + +AR = np.array([1], dtype=np.bool_) +AR.setflags(write=False) + +AR2 = np.array([1], dtype=np.timedelta64) +AR2.setflags(write=False) + +# Time structures + +td % td +td % AR2 +AR2 % td + +divmod(td, td) +divmod(td, AR2) +divmod(AR2, td) + +# Bool + +b_ % b +b_ % i +b_ % f +b_ % b_ +b_ % i8 +b_ % u8 +b_ % f8 +b_ % AR + +divmod(b_, b) +divmod(b_, i) +divmod(b_, f) +divmod(b_, b_) +divmod(b_, i8) +divmod(b_, u8) +divmod(b_, f8) +divmod(b_, AR) + +b % b_ +i % b_ +f % b_ +b_ % b_ +i8 % b_ +u8 % b_ +f8 % b_ +AR % b_ + +divmod(b, b_) +divmod(i, b_) +divmod(f, b_) +divmod(b_, b_) +divmod(i8, b_) +divmod(u8, b_) +divmod(f8, b_) +divmod(AR, b_) + +# int + +i8 % b +i8 % i +i8 % f +i8 % i8 +i8 % f8 +i4 % i8 +i4 % f8 +i4 % i4 +i4 % f4 +i8 % AR + +divmod(i8, b) +divmod(i8, i) +divmod(i8, f) +divmod(i8, i8) +divmod(i8, f8) +divmod(i8, i4) +divmod(i8, f4) +divmod(i4, i4) +divmod(i4, f4) +divmod(i8, AR) + +b % i8 +i % i8 +f % i8 +i8 % i8 +f8 % i8 +i8 % i4 +f8 % i4 +i4 % i4 +f4 % i4 +AR % i8 + +divmod(b, i8) +divmod(i, i8) +divmod(f, i8) +divmod(i8, i8) +divmod(f8, i8) +divmod(i4, i8) +divmod(f4, i8) +divmod(i4, i4) +divmod(f4, i4) +divmod(AR, i8) + +# float + +f8 % b +f8 % i +f8 % f +i8 % f4 +f4 % f4 +f8 % AR + +divmod(f8, b) +divmod(f8, i) +divmod(f8, f) +divmod(f8, f8) +divmod(f8, f4) +divmod(f4, f4) +divmod(f8, AR) + +b % f8 +i % f8 +f % f8 +f8 % f8 +f8 % f8 +f4 % f4 +AR % f8 + +divmod(b, f8) +divmod(i, f8) +divmod(f, f8) +divmod(f8, f8) +divmod(f4, f8) +divmod(f4, f4) +divmod(AR, f8) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/modules.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..0f0ecdbe553b330e0f9ab45e0a45cbaf82242400 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/modules.py @@ -0,0 +1,43 @@ +import numpy as np +from numpy import f2py + +np.char +np.ctypeslib +np.emath +np.fft +np.lib +np.linalg +np.ma +np.matrixlib +np.polynomial +np.random +np.rec +np.testing +np.version + +np.lib.format +np.lib.mixins +np.lib.scimath +np.lib.stride_tricks +np.ma.extras +np.polynomial.chebyshev +np.polynomial.hermite +np.polynomial.hermite_e +np.polynomial.laguerre +np.polynomial.legendre +np.polynomial.polynomial + +np.__path__ +np.__version__ +np.__git_version__ + +np.__all__ +np.char.__all__ +np.ctypeslib.__all__ +np.emath.__all__ +np.lib.__all__ +np.ma.__all__ +np.random.__all__ +np.rec.__all__ +np.testing.__all__ +f2py.__all__ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/multiarray.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/multiarray.py new file mode 100644 index 0000000000000000000000000000000000000000..932ed5fe06a89c85544a29fd3b3d3c8d98b9bd1f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/multiarray.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from typing import Any +import numpy as np + +AR_f8: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) +AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) + +b_f8 = np.broadcast(AR_f8) +b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) + +next(b_f8) +next(b_i8_f8_f8) + +b_f8.reset() +b_i8_f8_f8.reset() + +b_f8.index +b_i8_f8_f8.index + +b_f8.iters +b_i8_f8_f8.iters + +b_f8.nd +b_i8_f8_f8.nd + +b_f8.ndim +b_i8_f8_f8.ndim + +b_f8.numiter +b_i8_f8_f8.numiter + +b_f8.shape +b_i8_f8_f8.shape + +b_f8.size +b_i8_f8_f8.size diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..81b50e9a8983869b71184fb2e0ca9d224929d2d6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py @@ -0,0 +1,94 @@ +import os +import tempfile + +import numpy as np + +nd = np.array([[1, 2], [3, 4]]) +scalar_array = np.array(1) + +# item +scalar_array.item() +nd.item(1) +nd.item(0, 1) +nd.item((0, 1)) + +# tolist is pretty simple + +# itemset +scalar_array.itemset(3) +nd.itemset(3, 0) +nd.itemset((0, 0), 3) + +# tobytes +nd.tobytes() +nd.tobytes("C") +nd.tobytes(None) + +# tofile +if os.name != "nt": + with tempfile.NamedTemporaryFile(suffix=".txt") as tmp: + nd.tofile(tmp.name) + nd.tofile(tmp.name, "") + nd.tofile(tmp.name, sep="") + + nd.tofile(tmp.name, "", "%s") + nd.tofile(tmp.name, format="%s") + + nd.tofile(tmp) + +# dump is pretty simple +# dumps is pretty simple + +# astype +nd.astype("float") +nd.astype(float) + +nd.astype(float, "K") +nd.astype(float, order="K") + +nd.astype(float, "K", "unsafe") +nd.astype(float, casting="unsafe") + +nd.astype(float, "K", "unsafe", True) +nd.astype(float, subok=True) + +nd.astype(float, "K", "unsafe", True, True) +nd.astype(float, copy=True) + +# byteswap +nd.byteswap() +nd.byteswap(True) + +# copy +nd.copy() +nd.copy("C") + +# view +nd.view() +nd.view(np.int64) +nd.view(dtype=np.int64) +nd.view(np.int64, np.matrix) +nd.view(type=np.matrix) + +# getfield +complex_array = np.array([[1 + 1j, 0], [0, 1 - 1j]], dtype=np.complex128) + +complex_array.getfield("float") +complex_array.getfield(float) + +complex_array.getfield("float", 8) +complex_array.getfield(float, offset=8) + +# setflags +nd.setflags() + +nd.setflags(True) +nd.setflags(write=True) + +nd.setflags(True, True) +nd.setflags(write=True, align=True) + +nd.setflags(True, True, False) +nd.setflags(write=True, align=True, uic=False) + +# fill is pretty simple diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..337c1ccb182bc668c7d7f1081d695b8e6ddd618f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py @@ -0,0 +1,185 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +from __future__ import annotations + +import operator +from typing import cast, Any + +import numpy as np + +class SubClass(np.ndarray): ... + +i4 = np.int32(1) +A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) +B0 = np.empty((), dtype=np.int32).view(SubClass) +B1 = np.empty((1,), dtype=np.int32).view(SubClass) +B2 = np.empty((1, 1), dtype=np.int32).view(SubClass) +C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) +D = np.empty(3).view(SubClass) + +i4.all() +A.all() +A.all(axis=0) +A.all(keepdims=True) +A.all(out=B0) + +i4.any() +A.any() +A.any(axis=0) +A.any(keepdims=True) +A.any(out=B0) + +i4.argmax() +A.argmax() +A.argmax(axis=0) +A.argmax(out=B0) + +i4.argmin() +A.argmin() +A.argmin(axis=0) +A.argmin(out=B0) + +i4.argsort() +A.argsort() + +i4.choose([()]) +_choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32) +C.choose(_choices) +C.choose(_choices, out=D) + +i4.clip(1) +A.clip(1) +A.clip(None, 1) +A.clip(1, out=B2) +A.clip(None, 1, out=B2) + +i4.compress([1]) +A.compress([1]) +A.compress([1], out=B1) + +i4.conj() +A.conj() +B0.conj() + +i4.conjugate() +A.conjugate() +B0.conjugate() + +i4.cumprod() +A.cumprod() +A.cumprod(out=B1) + +i4.cumsum() +A.cumsum() +A.cumsum(out=B1) + +i4.max() +A.max() +A.max(axis=0) +A.max(keepdims=True) +A.max(out=B0) + +i4.mean() +A.mean() +A.mean(axis=0) +A.mean(keepdims=True) +A.mean(out=B0) + +i4.min() +A.min() +A.min(axis=0) +A.min(keepdims=True) +A.min(out=B0) + +i4.newbyteorder() +A.newbyteorder() +B0.newbyteorder('|') + +i4.prod() +A.prod() +A.prod(axis=0) +A.prod(keepdims=True) +A.prod(out=B0) + +i4.ptp() +A.ptp() +A.ptp(axis=0) +A.ptp(keepdims=True) +A.astype(int).ptp(out=B0) + +i4.round() +A.round() +A.round(out=B2) + +i4.repeat(1) +A.repeat(1) +B0.repeat(1) + +i4.std() +A.std() +A.std(axis=0) +A.std(keepdims=True) +A.std(out=B0.astype(np.float64)) + +i4.sum() +A.sum() +A.sum(axis=0) +A.sum(keepdims=True) +A.sum(out=B0) + +i4.take(0) +A.take(0) +A.take([0]) +A.take(0, out=B0) +A.take([0], out=B1) + +i4.var() +A.var() +A.var(axis=0) +A.var(keepdims=True) +A.var(out=B0) + +A.argpartition([0]) + +A.diagonal() + +A.dot(1) +A.dot(1, out=B0) + +A.nonzero() + +C.searchsorted(1) + +A.trace() +A.trace(out=B0) + +void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0)) +void.setfield(10, np.float64) + +A.item(0) +C.item(0) + +A.ravel() +C.ravel() + +A.flatten() +C.flatten() + +A.reshape(1) +C.reshape(3) + +int(np.array(1.0, dtype=np.float64)) +int(np.array("1", dtype=np.str_)) + +float(np.array(1.0, dtype=np.float64)) +float(np.array("1", dtype=np.str_)) + +complex(np.array(1.0, dtype=np.float64)) + +operator.index(np.array(1, dtype=np.int64)) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py new file mode 100644 index 0000000000000000000000000000000000000000..179d043db725bce8f3b6948c83cd4e08552dd537 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py @@ -0,0 +1,47 @@ +import numpy as np + +nd1 = np.array([[1, 2], [3, 4]]) + +# reshape +nd1.reshape(4) +nd1.reshape(2, 2) +nd1.reshape((2, 2)) + +nd1.reshape((2, 2), order="C") +nd1.reshape(4, order="C") + +# resize +nd1.resize() +nd1.resize(4) +nd1.resize(2, 2) +nd1.resize((2, 2)) + +nd1.resize((2, 2), refcheck=True) +nd1.resize(4, refcheck=True) + +nd2 = np.array([[1, 2], [3, 4]]) + +# transpose +nd2.transpose() +nd2.transpose(1, 0) +nd2.transpose((1, 0)) + +# swapaxes +nd2.swapaxes(0, 1) + +# flatten +nd2.flatten() +nd2.flatten("C") + +# ravel +nd2.ravel() +nd2.ravel("C") + +# squeeze +nd2.squeeze() + +nd3 = np.array([[1, 2]]) +nd3.squeeze(0) + +nd4 = np.array([[[1, 2]]]) +nd4.squeeze((0, 1)) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/numeric.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..920ba12b3337e3bd0f21b48a2479213a600ff905 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/numeric.py @@ -0,0 +1,89 @@ +""" +Tests for :mod:`numpy.core.numeric`. + +Does not include tests which fall under ``array_constructors``. + +""" + +from typing import List +import numpy as np + +class SubClass(np.ndarray): + ... + +i8 = np.int64(1) + +A = np.arange(27).reshape(3, 3, 3) +B: List[List[List[int]]] = A.tolist() +C = np.empty((27, 27)).view(SubClass) + +np.count_nonzero(i8) +np.count_nonzero(A) +np.count_nonzero(B) +np.count_nonzero(A, keepdims=True) +np.count_nonzero(A, axis=0) + +np.isfortran(i8) +np.isfortran(A) + +np.argwhere(i8) +np.argwhere(A) + +np.flatnonzero(i8) +np.flatnonzero(A) + +np.correlate(B[0][0], A.ravel(), mode="valid") +np.correlate(A.ravel(), A.ravel(), mode="same") + +np.convolve(B[0][0], A.ravel(), mode="valid") +np.convolve(A.ravel(), A.ravel(), mode="same") + +np.outer(i8, A) +np.outer(B, A) +np.outer(A, A) +np.outer(A, A, out=C) + +np.tensordot(B, A) +np.tensordot(A, A) +np.tensordot(A, A, axes=0) +np.tensordot(A, A, axes=(0, 1)) + +np.isscalar(i8) +np.isscalar(A) +np.isscalar(B) + +np.roll(A, 1) +np.roll(A, (1, 2)) +np.roll(B, 1) + +np.rollaxis(A, 0, 1) + +np.moveaxis(A, 0, 1) +np.moveaxis(A, (0, 1), (1, 2)) + +np.cross(B, A) +np.cross(A, A) + +np.indices([0, 1, 2]) +np.indices([0, 1, 2], sparse=False) +np.indices([0, 1, 2], sparse=True) + +np.binary_repr(1) + +np.base_repr(1) + +np.allclose(i8, A) +np.allclose(B, A) +np.allclose(A, A) + +np.isclose(i8, A) +np.isclose(B, A) +np.isclose(A, A) + +np.array_equal(i8, A) +np.array_equal(B, A) +np.array_equal(A, A) + +np.array_equiv(i8, A) +np.array_equiv(B, A) +np.array_equiv(A, A) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/numerictypes.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/numerictypes.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b6e0cd45d70ca6490cac9ff35b0e1fd3a00912 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/numerictypes.py @@ -0,0 +1,47 @@ +import numpy as np + +np.maximum_sctype("S8") +np.maximum_sctype(object) + +np.issctype(object) +np.issctype("S8") + +np.obj2sctype(list) +np.obj2sctype(list, default=None) +np.obj2sctype(list, default=np.string_) + +np.issubclass_(np.int32, int) +np.issubclass_(np.float64, float) +np.issubclass_(np.float64, (int, float)) + +np.issubsctype("int64", int) +np.issubsctype(np.array([1]), np.array([1])) + +np.issubdtype("S1", np.string_) +np.issubdtype(np.float64, np.float32) + +np.sctype2char("S1") +np.sctype2char(list) + +np.find_common_type([], [np.int64, np.float32, complex]) +np.find_common_type((), (np.int64, np.float32, complex)) +np.find_common_type([np.int64, np.float32], []) +np.find_common_type([np.float32], [np.int64, np.float64]) + +np.cast[int] +np.cast["i8"] +np.cast[np.int64] + +np.nbytes[int] +np.nbytes["i8"] +np.nbytes[np.int64] + +np.ScalarType +np.ScalarType[0] +np.ScalarType[4] +np.ScalarType[9] +np.ScalarType[11] + +np.typecodes["Character"] +np.typecodes["Complex"] +np.typecodes["All"] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/random.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/random.py new file mode 100644 index 0000000000000000000000000000000000000000..dfe845b5cd002f1df77684cd2fedb8b18937f8d3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/random.py @@ -0,0 +1,1497 @@ +from __future__ import annotations + +from typing import Any, List, Dict + +import numpy as np + +SEED_NONE = None +SEED_INT = 4579435749574957634658964293569 +SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64) +SEED_ARRLIKE: List[int] = [1, 2, 3, 4] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_MT19937: np.random.MT19937 = np.random.MT19937(0) +SEED_PCG64: np.random.PCG64 = np.random.PCG64(0) +SEED_PHILOX: np.random.Philox = np.random.Philox(0) +SEED_SFC64: np.random.SFC64 = np.random.SFC64(0) + +# default rng +np.random.default_rng() +np.random.default_rng(SEED_NONE) +np.random.default_rng(SEED_INT) +np.random.default_rng(SEED_ARR) +np.random.default_rng(SEED_ARRLIKE) +np.random.default_rng(SEED_SEED_SEQ) +np.random.default_rng(SEED_MT19937) +np.random.default_rng(SEED_PCG64) +np.random.default_rng(SEED_PHILOX) +np.random.default_rng(SEED_SFC64) + +# Seed Sequence +np.random.SeedSequence(SEED_NONE) +np.random.SeedSequence(SEED_INT) +np.random.SeedSequence(SEED_ARR) +np.random.SeedSequence(SEED_ARRLIKE) + +# Bit Generators +np.random.MT19937(SEED_NONE) +np.random.MT19937(SEED_INT) +np.random.MT19937(SEED_ARR) +np.random.MT19937(SEED_ARRLIKE) +np.random.MT19937(SEED_SEED_SEQ) + +np.random.PCG64(SEED_NONE) +np.random.PCG64(SEED_INT) +np.random.PCG64(SEED_ARR) +np.random.PCG64(SEED_ARRLIKE) +np.random.PCG64(SEED_SEED_SEQ) + +np.random.Philox(SEED_NONE) +np.random.Philox(SEED_INT) +np.random.Philox(SEED_ARR) +np.random.Philox(SEED_ARRLIKE) +np.random.Philox(SEED_SEED_SEQ) + +np.random.SFC64(SEED_NONE) +np.random.SFC64(SEED_INT) +np.random.SFC64(SEED_ARR) +np.random.SFC64(SEED_ARRLIKE) +np.random.SFC64(SEED_SEED_SEQ) + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence(SEED_NONE) +seed_seq.spawn(10) +seed_seq.generate_state(3) +seed_seq.generate_state(3, "u4") +seed_seq.generate_state(3, "uint32") +seed_seq.generate_state(3, "u8") +seed_seq.generate_state(3, "uint64") +seed_seq.generate_state(3, np.uint32) +seed_seq.generate_state(3, np.uint64) + + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1]) +D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5]) +D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) +D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) +I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) +I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) +D_arr_like_0p1: List[float] = [0.1] +D_arr_like_0p5: List[float] = [0.5] +D_arr_like_0p9: List[float] = [0.9] +D_arr_like_1p5: List[float] = [1.5] +I_arr_like_10: List[int] = [10] +I_arr_like_20: List[int] = [20] +D_2D_like: List[List[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) + +S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) +D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1) + +def_gen.standard_normal() +def_gen.standard_normal(dtype=np.float32) +def_gen.standard_normal(dtype="float32") +def_gen.standard_normal(dtype="double") +def_gen.standard_normal(dtype=np.float64) +def_gen.standard_normal(size=None) +def_gen.standard_normal(size=1) +def_gen.standard_normal(size=1, dtype=np.float32) +def_gen.standard_normal(size=1, dtype="f4") +def_gen.standard_normal(size=1, dtype="float32", out=S_out) +def_gen.standard_normal(dtype=np.float32, out=S_out) +def_gen.standard_normal(size=1, dtype=np.float64) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="f8") +def_gen.standard_normal(out=D_out) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="float64", out=D_out) + +def_gen.random() +def_gen.random(dtype=np.float32) +def_gen.random(dtype="float32") +def_gen.random(dtype="double") +def_gen.random(dtype=np.float64) +def_gen.random(size=None) +def_gen.random(size=1) +def_gen.random(size=1, dtype=np.float32) +def_gen.random(size=1, dtype="f4") +def_gen.random(size=1, dtype="float32", out=S_out) +def_gen.random(dtype=np.float32, out=S_out) +def_gen.random(size=1, dtype=np.float64) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="f8") +def_gen.random(out=D_out) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="float64", out=D_out) + +def_gen.standard_cauchy() +def_gen.standard_cauchy(size=None) +def_gen.standard_cauchy(size=1) + +def_gen.standard_exponential() +def_gen.standard_exponential(method="inv") +def_gen.standard_exponential(dtype=np.float32) +def_gen.standard_exponential(dtype="float32") +def_gen.standard_exponential(dtype="double") +def_gen.standard_exponential(dtype=np.float64) +def_gen.standard_exponential(size=None) +def_gen.standard_exponential(size=None, method="inv") +def_gen.standard_exponential(size=1, method="inv") +def_gen.standard_exponential(size=1, dtype=np.float32) +def_gen.standard_exponential(size=1, dtype="f4", method="inv") +def_gen.standard_exponential(size=1, dtype="float32", out=S_out) +def_gen.standard_exponential(dtype=np.float32, out=S_out) +def_gen.standard_exponential(size=1, dtype=np.float64, method="inv") +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="f8") +def_gen.standard_exponential(out=D_out) +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="float64", out=D_out) + +def_gen.zipf(1.5) +def_gen.zipf(1.5, size=None) +def_gen.zipf(1.5, size=1) +def_gen.zipf(D_arr_1p5) +def_gen.zipf(D_arr_1p5, size=1) +def_gen.zipf(D_arr_like_1p5) +def_gen.zipf(D_arr_like_1p5, size=1) + +def_gen.weibull(0.5) +def_gen.weibull(0.5, size=None) +def_gen.weibull(0.5, size=1) +def_gen.weibull(D_arr_0p5) +def_gen.weibull(D_arr_0p5, size=1) +def_gen.weibull(D_arr_like_0p5) +def_gen.weibull(D_arr_like_0p5, size=1) + +def_gen.standard_t(0.5) +def_gen.standard_t(0.5, size=None) +def_gen.standard_t(0.5, size=1) +def_gen.standard_t(D_arr_0p5) +def_gen.standard_t(D_arr_0p5, size=1) +def_gen.standard_t(D_arr_like_0p5) +def_gen.standard_t(D_arr_like_0p5, size=1) + +def_gen.poisson(0.5) +def_gen.poisson(0.5, size=None) +def_gen.poisson(0.5, size=1) +def_gen.poisson(D_arr_0p5) +def_gen.poisson(D_arr_0p5, size=1) +def_gen.poisson(D_arr_like_0p5) +def_gen.poisson(D_arr_like_0p5, size=1) + +def_gen.power(0.5) +def_gen.power(0.5, size=None) +def_gen.power(0.5, size=1) +def_gen.power(D_arr_0p5) +def_gen.power(D_arr_0p5, size=1) +def_gen.power(D_arr_like_0p5) +def_gen.power(D_arr_like_0p5, size=1) + +def_gen.pareto(0.5) +def_gen.pareto(0.5, size=None) +def_gen.pareto(0.5, size=1) +def_gen.pareto(D_arr_0p5) +def_gen.pareto(D_arr_0p5, size=1) +def_gen.pareto(D_arr_like_0p5) +def_gen.pareto(D_arr_like_0p5, size=1) + +def_gen.chisquare(0.5) +def_gen.chisquare(0.5, size=None) +def_gen.chisquare(0.5, size=1) +def_gen.chisquare(D_arr_0p5) +def_gen.chisquare(D_arr_0p5, size=1) +def_gen.chisquare(D_arr_like_0p5) +def_gen.chisquare(D_arr_like_0p5, size=1) + +def_gen.exponential(0.5) +def_gen.exponential(0.5, size=None) +def_gen.exponential(0.5, size=1) +def_gen.exponential(D_arr_0p5) +def_gen.exponential(D_arr_0p5, size=1) +def_gen.exponential(D_arr_like_0p5) +def_gen.exponential(D_arr_like_0p5, size=1) + +def_gen.geometric(0.5) +def_gen.geometric(0.5, size=None) +def_gen.geometric(0.5, size=1) +def_gen.geometric(D_arr_0p5) +def_gen.geometric(D_arr_0p5, size=1) +def_gen.geometric(D_arr_like_0p5) +def_gen.geometric(D_arr_like_0p5, size=1) + +def_gen.logseries(0.5) +def_gen.logseries(0.5, size=None) +def_gen.logseries(0.5, size=1) +def_gen.logseries(D_arr_0p5) +def_gen.logseries(D_arr_0p5, size=1) +def_gen.logseries(D_arr_like_0p5) +def_gen.logseries(D_arr_like_0p5, size=1) + +def_gen.rayleigh(0.5) +def_gen.rayleigh(0.5, size=None) +def_gen.rayleigh(0.5, size=1) +def_gen.rayleigh(D_arr_0p5) +def_gen.rayleigh(D_arr_0p5, size=1) +def_gen.rayleigh(D_arr_like_0p5) +def_gen.rayleigh(D_arr_like_0p5, size=1) + +def_gen.standard_gamma(0.5) +def_gen.standard_gamma(0.5, size=None) +def_gen.standard_gamma(0.5, dtype="float32") +def_gen.standard_gamma(0.5, size=None, dtype="float32") +def_gen.standard_gamma(0.5, size=1) +def_gen.standard_gamma(D_arr_0p5) +def_gen.standard_gamma(D_arr_0p5, dtype="f4") +def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out) +def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out) +def_gen.standard_gamma(D_arr_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5) +def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(0.5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64) + +def_gen.vonmises(0.5, 0.5) +def_gen.vonmises(0.5, 0.5, size=None) +def_gen.vonmises(0.5, 0.5, size=1) +def_gen.vonmises(D_arr_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_0p5) +def_gen.vonmises(D_arr_0p5, 0.5, size=1) +def_gen.vonmises(0.5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.wald(0.5, 0.5) +def_gen.wald(0.5, 0.5, size=None) +def_gen.wald(0.5, 0.5, size=1) +def_gen.wald(D_arr_0p5, 0.5) +def_gen.wald(0.5, D_arr_0p5) +def_gen.wald(D_arr_0p5, 0.5, size=1) +def_gen.wald(0.5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, 0.5) +def_gen.wald(0.5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.uniform(0.5, 0.5) +def_gen.uniform(0.5, 0.5, size=None) +def_gen.uniform(0.5, 0.5, size=1) +def_gen.uniform(D_arr_0p5, 0.5) +def_gen.uniform(0.5, D_arr_0p5) +def_gen.uniform(D_arr_0p5, 0.5, size=1) +def_gen.uniform(0.5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, 0.5) +def_gen.uniform(0.5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.beta(0.5, 0.5) +def_gen.beta(0.5, 0.5, size=None) +def_gen.beta(0.5, 0.5, size=1) +def_gen.beta(D_arr_0p5, 0.5) +def_gen.beta(0.5, D_arr_0p5) +def_gen.beta(D_arr_0p5, 0.5, size=1) +def_gen.beta(0.5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, 0.5) +def_gen.beta(0.5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.f(0.5, 0.5) +def_gen.f(0.5, 0.5, size=None) +def_gen.f(0.5, 0.5, size=1) +def_gen.f(D_arr_0p5, 0.5) +def_gen.f(0.5, D_arr_0p5) +def_gen.f(D_arr_0p5, 0.5, size=1) +def_gen.f(0.5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, 0.5) +def_gen.f(0.5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gamma(0.5, 0.5) +def_gen.gamma(0.5, 0.5, size=None) +def_gen.gamma(0.5, 0.5, size=1) +def_gen.gamma(D_arr_0p5, 0.5) +def_gen.gamma(0.5, D_arr_0p5) +def_gen.gamma(D_arr_0p5, 0.5, size=1) +def_gen.gamma(0.5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, 0.5) +def_gen.gamma(0.5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gumbel(0.5, 0.5) +def_gen.gumbel(0.5, 0.5, size=None) +def_gen.gumbel(0.5, 0.5, size=1) +def_gen.gumbel(D_arr_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_0p5) +def_gen.gumbel(D_arr_0p5, 0.5, size=1) +def_gen.gumbel(0.5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.laplace(0.5, 0.5) +def_gen.laplace(0.5, 0.5, size=None) +def_gen.laplace(0.5, 0.5, size=1) +def_gen.laplace(D_arr_0p5, 0.5) +def_gen.laplace(0.5, D_arr_0p5) +def_gen.laplace(D_arr_0p5, 0.5, size=1) +def_gen.laplace(0.5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, 0.5) +def_gen.laplace(0.5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.logistic(0.5, 0.5) +def_gen.logistic(0.5, 0.5, size=None) +def_gen.logistic(0.5, 0.5, size=1) +def_gen.logistic(D_arr_0p5, 0.5) +def_gen.logistic(0.5, D_arr_0p5) +def_gen.logistic(D_arr_0p5, 0.5, size=1) +def_gen.logistic(0.5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, 0.5) +def_gen.logistic(0.5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.lognormal(0.5, 0.5) +def_gen.lognormal(0.5, 0.5, size=None) +def_gen.lognormal(0.5, 0.5, size=1) +def_gen.lognormal(D_arr_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_0p5) +def_gen.lognormal(D_arr_0p5, 0.5, size=1) +def_gen.lognormal(0.5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.noncentral_chisquare(0.5, 0.5) +def_gen.noncentral_chisquare(0.5, 0.5, size=None) +def_gen.noncentral_chisquare(0.5, 0.5, size=1) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.normal(0.5, 0.5) +def_gen.normal(0.5, 0.5, size=None) +def_gen.normal(0.5, 0.5, size=1) +def_gen.normal(D_arr_0p5, 0.5) +def_gen.normal(0.5, D_arr_0p5) +def_gen.normal(D_arr_0p5, 0.5, size=1) +def_gen.normal(0.5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, 0.5) +def_gen.normal(0.5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.triangular(0.1, 0.5, 0.9) +def_gen.triangular(0.1, 0.5, 0.9, size=None) +def_gen.triangular(0.1, 0.5, 0.9, size=1) +def_gen.triangular(D_arr_0p1, 0.5, 0.9) +def_gen.triangular(0.1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1) +def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.triangular(0.5, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.noncentral_f(0.1, 0.5, 0.9) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=None) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=1) +def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.binomial(10, 0.5) +def_gen.binomial(10, 0.5, size=None) +def_gen.binomial(10, 0.5, size=1) +def_gen.binomial(I_arr_10, 0.5) +def_gen.binomial(10, D_arr_0p5) +def_gen.binomial(I_arr_10, 0.5, size=1) +def_gen.binomial(10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, 0.5) +def_gen.binomial(10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.negative_binomial(10, 0.5) +def_gen.negative_binomial(10, 0.5, size=None) +def_gen.negative_binomial(10, 0.5, size=1) +def_gen.negative_binomial(I_arr_10, 0.5) +def_gen.negative_binomial(10, D_arr_0p5) +def_gen.negative_binomial(I_arr_10, 0.5, size=1) +def_gen.negative_binomial(10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, 0.5) +def_gen.negative_binomial(10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.hypergeometric(20, 20, 10) +def_gen.hypergeometric(20, 20, 10, size=None) +def_gen.hypergeometric(20, 20, 10, size=1) +def_gen.hypergeometric(I_arr_20, 20, 10) +def_gen.hypergeometric(20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +def_gen.hypergeometric(20, I_arr_20, 10, size=1) +def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10) +def_gen.hypergeometric(20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + +I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64) + +def_gen.integers(0, 100) +def_gen.integers(100) +def_gen.integers([100]) +def_gen.integers(0, [100]) + +I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_) +I_bool_low_like: List[int] = [0] +I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) +I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) + +def_gen.integers(2, dtype=bool) +def_gen.integers(0, 2, dtype=bool) +def_gen.integers(1, dtype=bool, endpoint=True) +def_gen.integers(0, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool) +def_gen.integers(0, I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True) + +def_gen.integers(2, dtype=np.bool_) +def_gen.integers(0, 2, dtype=np.bool_) +def_gen.integers(1, dtype=np.bool_, endpoint=True) +def_gen.integers(0, 1, dtype=np.bool_, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=np.bool_) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_) +def_gen.integers(0, I_bool_high_open, dtype=np.bool_) +def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True) + +I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) +I_u1_low_like: List[int] = [0] +I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) +I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) + +def_gen.integers(256, dtype="u1") +def_gen.integers(0, 256, dtype="u1") +def_gen.integers(255, dtype="u1", endpoint=True) +def_gen.integers(0, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1") +def_gen.integers(0, I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True) + +def_gen.integers(256, dtype="uint8") +def_gen.integers(0, 256, dtype="uint8") +def_gen.integers(255, dtype="uint8", endpoint=True) +def_gen.integers(0, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8") +def_gen.integers(0, I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True) + +def_gen.integers(256, dtype=np.uint8) +def_gen.integers(0, 256, dtype=np.uint8) +def_gen.integers(255, dtype=np.uint8, endpoint=True) +def_gen.integers(0, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8) +def_gen.integers(0, I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True) + +I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) +I_u2_low_like: List[int] = [0] +I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) + +def_gen.integers(65536, dtype="u2") +def_gen.integers(0, 65536, dtype="u2") +def_gen.integers(65535, dtype="u2", endpoint=True) +def_gen.integers(0, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2") +def_gen.integers(0, I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True) + +def_gen.integers(65536, dtype="uint16") +def_gen.integers(0, 65536, dtype="uint16") +def_gen.integers(65535, dtype="uint16", endpoint=True) +def_gen.integers(0, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16") +def_gen.integers(0, I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True) + +def_gen.integers(65536, dtype=np.uint16) +def_gen.integers(0, 65536, dtype=np.uint16) +def_gen.integers(65535, dtype=np.uint16, endpoint=True) +def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16) +def_gen.integers(0, I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True) + +I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) +I_u4_low_like: List[int] = [0] +I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) + +def_gen.integers(4294967296, dtype="u4") +def_gen.integers(0, 4294967296, dtype="u4") +def_gen.integers(4294967295, dtype="u4", endpoint=True) +def_gen.integers(0, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4") +def_gen.integers(0, I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True) + +def_gen.integers(4294967296, dtype="uint32") +def_gen.integers(0, 4294967296, dtype="uint32") +def_gen.integers(4294967295, dtype="uint32", endpoint=True) +def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32") +def_gen.integers(0, I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True) + +def_gen.integers(4294967296, dtype=np.uint32) +def_gen.integers(0, 4294967296, dtype=np.uint32) +def_gen.integers(4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32) +def_gen.integers(0, I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True) + +I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) +I_u8_low_like: List[int] = [0] +I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) + +def_gen.integers(18446744073709551616, dtype="u8") +def_gen.integers(0, 18446744073709551616, dtype="u8") +def_gen.integers(18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8") +def_gen.integers(0, I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True) + +def_gen.integers(18446744073709551616, dtype="uint64") +def_gen.integers(0, 18446744073709551616, dtype="uint64") +def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64") +def_gen.integers(0, I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True) + +def_gen.integers(18446744073709551616, dtype=np.uint64) +def_gen.integers(0, 18446744073709551616, dtype=np.uint64) +def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64) +def_gen.integers(0, I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True) + +I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) +I_i1_low_like: List[int] = [-128] +I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) +I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) + +def_gen.integers(128, dtype="i1") +def_gen.integers(-128, 128, dtype="i1") +def_gen.integers(127, dtype="i1", endpoint=True) +def_gen.integers(-128, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1") +def_gen.integers(-128, I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True) + +def_gen.integers(128, dtype="int8") +def_gen.integers(-128, 128, dtype="int8") +def_gen.integers(127, dtype="int8", endpoint=True) +def_gen.integers(-128, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8") +def_gen.integers(-128, I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True) + +def_gen.integers(128, dtype=np.int8) +def_gen.integers(-128, 128, dtype=np.int8) +def_gen.integers(127, dtype=np.int8, endpoint=True) +def_gen.integers(-128, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8) +def_gen.integers(-128, I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True) + +I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) +I_i2_low_like: List[int] = [-32768] +I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) +I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) + +def_gen.integers(32768, dtype="i2") +def_gen.integers(-32768, 32768, dtype="i2") +def_gen.integers(32767, dtype="i2", endpoint=True) +def_gen.integers(-32768, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2") +def_gen.integers(-32768, I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True) + +def_gen.integers(32768, dtype="int16") +def_gen.integers(-32768, 32768, dtype="int16") +def_gen.integers(32767, dtype="int16", endpoint=True) +def_gen.integers(-32768, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16") +def_gen.integers(-32768, I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True) + +def_gen.integers(32768, dtype=np.int16) +def_gen.integers(-32768, 32768, dtype=np.int16) +def_gen.integers(32767, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16) +def_gen.integers(-32768, I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True) + +I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: List[int] = [-2147483648] +I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) + +def_gen.integers(2147483648, dtype="i4") +def_gen.integers(-2147483648, 2147483648, dtype="i4") +def_gen.integers(2147483647, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4") +def_gen.integers(-2147483648, I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True) + +def_gen.integers(2147483648, dtype="int32") +def_gen.integers(-2147483648, 2147483648, dtype="int32") +def_gen.integers(2147483647, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32") +def_gen.integers(-2147483648, I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True) + +def_gen.integers(2147483648, dtype=np.int32) +def_gen.integers(-2147483648, 2147483648, dtype=np.int32) +def_gen.integers(2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32) +def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True) + +I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: List[int] = [-9223372036854775808] +I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) + +def_gen.integers(9223372036854775808, dtype="i8") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8") +def_gen.integers(9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True) + +def_gen.integers(9223372036854775808, dtype="int64") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64") +def_gen.integers(9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True) + +def_gen.integers(9223372036854775808, dtype=np.int64) +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64) +def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64) +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True) + + +def_gen.bit_generator + +def_gen.bytes(2) + +def_gen.choice(5) +def_gen.choice(5, 3) +def_gen.choice(5, 3, replace=True) +def_gen.choice(5, 3, p=[1 / 5] * 5) +def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False) + +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +def_gen.dirichlet([0.5, 0.5]) +def_gen.dirichlet(np.array([0.5, 0.5])) +def_gen.dirichlet(np.array([0.5, 0.5]), size=3) + +def_gen.multinomial(20, [1 / 6.0] * 6) +def_gen.multinomial(20, np.array([0.5, 0.5])) +def_gen.multinomial(20, [1 / 6.0] * 6, size=2) +def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2)) +def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2)) + +def_gen.multivariate_hypergeometric([3, 5, 7], 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7)) +def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count") +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals") + +def_gen.multivariate_normal([0.0], [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) +def_gen.multivariate_normal(np.array([0.0]), [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) + +def_gen.permutation(10) +def_gen.permutation([1, 2, 3, 4]) +def_gen.permutation(np.array([1, 2, 3, 4])) +def_gen.permutation(D_2D, axis=1) +def_gen.permuted(D_2D) +def_gen.permuted(D_2D_like) +def_gen.permuted(D_2D, axis=1) +def_gen.permuted(D_2D, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D, axis=1, out=D_2D) + +def_gen.shuffle(np.arange(10)) +def_gen.shuffle([1, 2, 3, 4, 5]) +def_gen.shuffle(D_2D, axis=1) + +def_gen.__str__() +def_gen.__repr__() +def_gen_state: Dict[str, Any] +def_gen_state = def_gen.__getstate__() +def_gen.__setstate__(def_gen_state) + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +random_st.standard_normal() +random_st.standard_normal(size=None) +random_st.standard_normal(size=1) + +random_st.random() +random_st.random(size=None) +random_st.random(size=1) + +random_st.standard_cauchy() +random_st.standard_cauchy(size=None) +random_st.standard_cauchy(size=1) + +random_st.standard_exponential() +random_st.standard_exponential(size=None) +random_st.standard_exponential(size=1) + +random_st.zipf(1.5) +random_st.zipf(1.5, size=None) +random_st.zipf(1.5, size=1) +random_st.zipf(D_arr_1p5) +random_st.zipf(D_arr_1p5, size=1) +random_st.zipf(D_arr_like_1p5) +random_st.zipf(D_arr_like_1p5, size=1) + +random_st.weibull(0.5) +random_st.weibull(0.5, size=None) +random_st.weibull(0.5, size=1) +random_st.weibull(D_arr_0p5) +random_st.weibull(D_arr_0p5, size=1) +random_st.weibull(D_arr_like_0p5) +random_st.weibull(D_arr_like_0p5, size=1) + +random_st.standard_t(0.5) +random_st.standard_t(0.5, size=None) +random_st.standard_t(0.5, size=1) +random_st.standard_t(D_arr_0p5) +random_st.standard_t(D_arr_0p5, size=1) +random_st.standard_t(D_arr_like_0p5) +random_st.standard_t(D_arr_like_0p5, size=1) + +random_st.poisson(0.5) +random_st.poisson(0.5, size=None) +random_st.poisson(0.5, size=1) +random_st.poisson(D_arr_0p5) +random_st.poisson(D_arr_0p5, size=1) +random_st.poisson(D_arr_like_0p5) +random_st.poisson(D_arr_like_0p5, size=1) + +random_st.power(0.5) +random_st.power(0.5, size=None) +random_st.power(0.5, size=1) +random_st.power(D_arr_0p5) +random_st.power(D_arr_0p5, size=1) +random_st.power(D_arr_like_0p5) +random_st.power(D_arr_like_0p5, size=1) + +random_st.pareto(0.5) +random_st.pareto(0.5, size=None) +random_st.pareto(0.5, size=1) +random_st.pareto(D_arr_0p5) +random_st.pareto(D_arr_0p5, size=1) +random_st.pareto(D_arr_like_0p5) +random_st.pareto(D_arr_like_0p5, size=1) + +random_st.chisquare(0.5) +random_st.chisquare(0.5, size=None) +random_st.chisquare(0.5, size=1) +random_st.chisquare(D_arr_0p5) +random_st.chisquare(D_arr_0p5, size=1) +random_st.chisquare(D_arr_like_0p5) +random_st.chisquare(D_arr_like_0p5, size=1) + +random_st.exponential(0.5) +random_st.exponential(0.5, size=None) +random_st.exponential(0.5, size=1) +random_st.exponential(D_arr_0p5) +random_st.exponential(D_arr_0p5, size=1) +random_st.exponential(D_arr_like_0p5) +random_st.exponential(D_arr_like_0p5, size=1) + +random_st.geometric(0.5) +random_st.geometric(0.5, size=None) +random_st.geometric(0.5, size=1) +random_st.geometric(D_arr_0p5) +random_st.geometric(D_arr_0p5, size=1) +random_st.geometric(D_arr_like_0p5) +random_st.geometric(D_arr_like_0p5, size=1) + +random_st.logseries(0.5) +random_st.logseries(0.5, size=None) +random_st.logseries(0.5, size=1) +random_st.logseries(D_arr_0p5) +random_st.logseries(D_arr_0p5, size=1) +random_st.logseries(D_arr_like_0p5) +random_st.logseries(D_arr_like_0p5, size=1) + +random_st.rayleigh(0.5) +random_st.rayleigh(0.5, size=None) +random_st.rayleigh(0.5, size=1) +random_st.rayleigh(D_arr_0p5) +random_st.rayleigh(D_arr_0p5, size=1) +random_st.rayleigh(D_arr_like_0p5) +random_st.rayleigh(D_arr_like_0p5, size=1) + +random_st.standard_gamma(0.5) +random_st.standard_gamma(0.5, size=None) +random_st.standard_gamma(0.5, size=1) +random_st.standard_gamma(D_arr_0p5) +random_st.standard_gamma(D_arr_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5) +random_st.standard_gamma(D_arr_like_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5, size=1) + +random_st.vonmises(0.5, 0.5) +random_st.vonmises(0.5, 0.5, size=None) +random_st.vonmises(0.5, 0.5, size=1) +random_st.vonmises(D_arr_0p5, 0.5) +random_st.vonmises(0.5, D_arr_0p5) +random_st.vonmises(D_arr_0p5, 0.5, size=1) +random_st.vonmises(0.5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, 0.5) +random_st.vonmises(0.5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.wald(0.5, 0.5) +random_st.wald(0.5, 0.5, size=None) +random_st.wald(0.5, 0.5, size=1) +random_st.wald(D_arr_0p5, 0.5) +random_st.wald(0.5, D_arr_0p5) +random_st.wald(D_arr_0p5, 0.5, size=1) +random_st.wald(0.5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, 0.5) +random_st.wald(0.5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.uniform(0.5, 0.5) +random_st.uniform(0.5, 0.5, size=None) +random_st.uniform(0.5, 0.5, size=1) +random_st.uniform(D_arr_0p5, 0.5) +random_st.uniform(0.5, D_arr_0p5) +random_st.uniform(D_arr_0p5, 0.5, size=1) +random_st.uniform(0.5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, 0.5) +random_st.uniform(0.5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.beta(0.5, 0.5) +random_st.beta(0.5, 0.5, size=None) +random_st.beta(0.5, 0.5, size=1) +random_st.beta(D_arr_0p5, 0.5) +random_st.beta(0.5, D_arr_0p5) +random_st.beta(D_arr_0p5, 0.5, size=1) +random_st.beta(0.5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, 0.5) +random_st.beta(0.5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.f(0.5, 0.5) +random_st.f(0.5, 0.5, size=None) +random_st.f(0.5, 0.5, size=1) +random_st.f(D_arr_0p5, 0.5) +random_st.f(0.5, D_arr_0p5) +random_st.f(D_arr_0p5, 0.5, size=1) +random_st.f(0.5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, 0.5) +random_st.f(0.5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5) +random_st.f(D_arr_like_0p5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gamma(0.5, 0.5) +random_st.gamma(0.5, 0.5, size=None) +random_st.gamma(0.5, 0.5, size=1) +random_st.gamma(D_arr_0p5, 0.5) +random_st.gamma(0.5, D_arr_0p5) +random_st.gamma(D_arr_0p5, 0.5, size=1) +random_st.gamma(0.5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, 0.5) +random_st.gamma(0.5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gumbel(0.5, 0.5) +random_st.gumbel(0.5, 0.5, size=None) +random_st.gumbel(0.5, 0.5, size=1) +random_st.gumbel(D_arr_0p5, 0.5) +random_st.gumbel(0.5, D_arr_0p5) +random_st.gumbel(D_arr_0p5, 0.5, size=1) +random_st.gumbel(0.5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, 0.5) +random_st.gumbel(0.5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.laplace(0.5, 0.5) +random_st.laplace(0.5, 0.5, size=None) +random_st.laplace(0.5, 0.5, size=1) +random_st.laplace(D_arr_0p5, 0.5) +random_st.laplace(0.5, D_arr_0p5) +random_st.laplace(D_arr_0p5, 0.5, size=1) +random_st.laplace(0.5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, 0.5) +random_st.laplace(0.5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.logistic(0.5, 0.5) +random_st.logistic(0.5, 0.5, size=None) +random_st.logistic(0.5, 0.5, size=1) +random_st.logistic(D_arr_0p5, 0.5) +random_st.logistic(0.5, D_arr_0p5) +random_st.logistic(D_arr_0p5, 0.5, size=1) +random_st.logistic(0.5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, 0.5) +random_st.logistic(0.5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.lognormal(0.5, 0.5) +random_st.lognormal(0.5, 0.5, size=None) +random_st.lognormal(0.5, 0.5, size=1) +random_st.lognormal(D_arr_0p5, 0.5) +random_st.lognormal(0.5, D_arr_0p5) +random_st.lognormal(D_arr_0p5, 0.5, size=1) +random_st.lognormal(0.5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, 0.5) +random_st.lognormal(0.5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.noncentral_chisquare(0.5, 0.5) +random_st.noncentral_chisquare(0.5, 0.5, size=None) +random_st.noncentral_chisquare(0.5, 0.5, size=1) +random_st.noncentral_chisquare(D_arr_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.normal(0.5, 0.5) +random_st.normal(0.5, 0.5, size=None) +random_st.normal(0.5, 0.5, size=1) +random_st.normal(D_arr_0p5, 0.5) +random_st.normal(0.5, D_arr_0p5) +random_st.normal(D_arr_0p5, 0.5, size=1) +random_st.normal(0.5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, 0.5) +random_st.normal(0.5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.triangular(0.1, 0.5, 0.9) +random_st.triangular(0.1, 0.5, 0.9, size=None) +random_st.triangular(0.1, 0.5, 0.9, size=1) +random_st.triangular(D_arr_0p1, 0.5, 0.9) +random_st.triangular(0.1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.triangular(0.1, D_arr_0p5, 0.9, size=1) +random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.triangular(0.5, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.noncentral_f(0.1, 0.5, 0.9) +random_st.noncentral_f(0.1, 0.5, 0.9, size=None) +random_st.noncentral_f(0.1, 0.5, 0.9, size=1) +random_st.noncentral_f(D_arr_0p1, 0.5, 0.9) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.binomial(10, 0.5) +random_st.binomial(10, 0.5, size=None) +random_st.binomial(10, 0.5, size=1) +random_st.binomial(I_arr_10, 0.5) +random_st.binomial(10, D_arr_0p5) +random_st.binomial(I_arr_10, 0.5, size=1) +random_st.binomial(10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, 0.5) +random_st.binomial(10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5) +random_st.binomial(I_arr_like_10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.negative_binomial(10, 0.5) +random_st.negative_binomial(10, 0.5, size=None) +random_st.negative_binomial(10, 0.5, size=1) +random_st.negative_binomial(I_arr_10, 0.5) +random_st.negative_binomial(10, D_arr_0p5) +random_st.negative_binomial(I_arr_10, 0.5, size=1) +random_st.negative_binomial(10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, 0.5) +random_st.negative_binomial(10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.hypergeometric(20, 20, 10) +random_st.hypergeometric(20, 20, 10, size=None) +random_st.hypergeometric(20, 20, 10, size=1) +random_st.hypergeometric(I_arr_20, 20, 10) +random_st.hypergeometric(20, I_arr_20, 10) +random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +random_st.hypergeometric(20, I_arr_20, 10, size=1) +random_st.hypergeometric(I_arr_like_20, 20, I_arr_10) +random_st.hypergeometric(20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, 10) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + +random_st.randint(0, 100) +random_st.randint(100) +random_st.randint([100]) +random_st.randint(0, [100]) + +random_st.randint(2, dtype=bool) +random_st.randint(0, 2, dtype=bool) +random_st.randint(I_bool_high_open, dtype=bool) +random_st.randint(I_bool_low, I_bool_high_open, dtype=bool) +random_st.randint(0, I_bool_high_open, dtype=bool) + +random_st.randint(2, dtype=np.bool_) +random_st.randint(0, 2, dtype=np.bool_) +random_st.randint(I_bool_high_open, dtype=np.bool_) +random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_) +random_st.randint(0, I_bool_high_open, dtype=np.bool_) + +random_st.randint(256, dtype="u1") +random_st.randint(0, 256, dtype="u1") +random_st.randint(I_u1_high_open, dtype="u1") +random_st.randint(I_u1_low, I_u1_high_open, dtype="u1") +random_st.randint(0, I_u1_high_open, dtype="u1") + +random_st.randint(256, dtype="uint8") +random_st.randint(0, 256, dtype="uint8") +random_st.randint(I_u1_high_open, dtype="uint8") +random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8") +random_st.randint(0, I_u1_high_open, dtype="uint8") + +random_st.randint(256, dtype=np.uint8) +random_st.randint(0, 256, dtype=np.uint8) +random_st.randint(I_u1_high_open, dtype=np.uint8) +random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8) +random_st.randint(0, I_u1_high_open, dtype=np.uint8) + +random_st.randint(65536, dtype="u2") +random_st.randint(0, 65536, dtype="u2") +random_st.randint(I_u2_high_open, dtype="u2") +random_st.randint(I_u2_low, I_u2_high_open, dtype="u2") +random_st.randint(0, I_u2_high_open, dtype="u2") + +random_st.randint(65536, dtype="uint16") +random_st.randint(0, 65536, dtype="uint16") +random_st.randint(I_u2_high_open, dtype="uint16") +random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16") +random_st.randint(0, I_u2_high_open, dtype="uint16") + +random_st.randint(65536, dtype=np.uint16) +random_st.randint(0, 65536, dtype=np.uint16) +random_st.randint(I_u2_high_open, dtype=np.uint16) +random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16) +random_st.randint(0, I_u2_high_open, dtype=np.uint16) + +random_st.randint(4294967296, dtype="u4") +random_st.randint(0, 4294967296, dtype="u4") +random_st.randint(I_u4_high_open, dtype="u4") +random_st.randint(I_u4_low, I_u4_high_open, dtype="u4") +random_st.randint(0, I_u4_high_open, dtype="u4") + +random_st.randint(4294967296, dtype="uint32") +random_st.randint(0, 4294967296, dtype="uint32") +random_st.randint(I_u4_high_open, dtype="uint32") +random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32") +random_st.randint(0, I_u4_high_open, dtype="uint32") + +random_st.randint(4294967296, dtype=np.uint32) +random_st.randint(0, 4294967296, dtype=np.uint32) +random_st.randint(I_u4_high_open, dtype=np.uint32) +random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32) +random_st.randint(0, I_u4_high_open, dtype=np.uint32) + + +random_st.randint(18446744073709551616, dtype="u8") +random_st.randint(0, 18446744073709551616, dtype="u8") +random_st.randint(I_u8_high_open, dtype="u8") +random_st.randint(I_u8_low, I_u8_high_open, dtype="u8") +random_st.randint(0, I_u8_high_open, dtype="u8") + +random_st.randint(18446744073709551616, dtype="uint64") +random_st.randint(0, 18446744073709551616, dtype="uint64") +random_st.randint(I_u8_high_open, dtype="uint64") +random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64") +random_st.randint(0, I_u8_high_open, dtype="uint64") + +random_st.randint(18446744073709551616, dtype=np.uint64) +random_st.randint(0, 18446744073709551616, dtype=np.uint64) +random_st.randint(I_u8_high_open, dtype=np.uint64) +random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64) +random_st.randint(0, I_u8_high_open, dtype=np.uint64) + +random_st.randint(128, dtype="i1") +random_st.randint(-128, 128, dtype="i1") +random_st.randint(I_i1_high_open, dtype="i1") +random_st.randint(I_i1_low, I_i1_high_open, dtype="i1") +random_st.randint(-128, I_i1_high_open, dtype="i1") + +random_st.randint(128, dtype="int8") +random_st.randint(-128, 128, dtype="int8") +random_st.randint(I_i1_high_open, dtype="int8") +random_st.randint(I_i1_low, I_i1_high_open, dtype="int8") +random_st.randint(-128, I_i1_high_open, dtype="int8") + +random_st.randint(128, dtype=np.int8) +random_st.randint(-128, 128, dtype=np.int8) +random_st.randint(I_i1_high_open, dtype=np.int8) +random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8) +random_st.randint(-128, I_i1_high_open, dtype=np.int8) + +random_st.randint(32768, dtype="i2") +random_st.randint(-32768, 32768, dtype="i2") +random_st.randint(I_i2_high_open, dtype="i2") +random_st.randint(I_i2_low, I_i2_high_open, dtype="i2") +random_st.randint(-32768, I_i2_high_open, dtype="i2") +random_st.randint(32768, dtype="int16") +random_st.randint(-32768, 32768, dtype="int16") +random_st.randint(I_i2_high_open, dtype="int16") +random_st.randint(I_i2_low, I_i2_high_open, dtype="int16") +random_st.randint(-32768, I_i2_high_open, dtype="int16") +random_st.randint(32768, dtype=np.int16) +random_st.randint(-32768, 32768, dtype=np.int16) +random_st.randint(I_i2_high_open, dtype=np.int16) +random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16) +random_st.randint(-32768, I_i2_high_open, dtype=np.int16) + +random_st.randint(2147483648, dtype="i4") +random_st.randint(-2147483648, 2147483648, dtype="i4") +random_st.randint(I_i4_high_open, dtype="i4") +random_st.randint(I_i4_low, I_i4_high_open, dtype="i4") +random_st.randint(-2147483648, I_i4_high_open, dtype="i4") + +random_st.randint(2147483648, dtype="int32") +random_st.randint(-2147483648, 2147483648, dtype="int32") +random_st.randint(I_i4_high_open, dtype="int32") +random_st.randint(I_i4_low, I_i4_high_open, dtype="int32") +random_st.randint(-2147483648, I_i4_high_open, dtype="int32") + +random_st.randint(2147483648, dtype=np.int32) +random_st.randint(-2147483648, 2147483648, dtype=np.int32) +random_st.randint(I_i4_high_open, dtype=np.int32) +random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32) +random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32) + +random_st.randint(9223372036854775808, dtype="i8") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8") +random_st.randint(I_i8_high_open, dtype="i8") +random_st.randint(I_i8_low, I_i8_high_open, dtype="i8") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8") + +random_st.randint(9223372036854775808, dtype="int64") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64") +random_st.randint(I_i8_high_open, dtype="int64") +random_st.randint(I_i8_low, I_i8_high_open, dtype="int64") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64") + +random_st.randint(9223372036854775808, dtype=np.int64) +random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64) +random_st.randint(I_i8_high_open, dtype=np.int64) +random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64) +random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64) + +bg: np.random.BitGenerator = random_st._bit_generator + +random_st.bytes(2) + +random_st.choice(5) +random_st.choice(5, 3) +random_st.choice(5, 3, replace=True) +random_st.choice(5, 3, p=[1 / 5] * 5) +random_st.choice(5, 3, p=[1 / 5] * 5, replace=False) + +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"]) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +random_st.dirichlet([0.5, 0.5]) +random_st.dirichlet(np.array([0.5, 0.5])) +random_st.dirichlet(np.array([0.5, 0.5]), size=3) + +random_st.multinomial(20, [1 / 6.0] * 6) +random_st.multinomial(20, np.array([0.5, 0.5])) +random_st.multinomial(20, [1 / 6.0] * 6, size=2) + +random_st.multivariate_normal([0.0], [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) +random_st.multivariate_normal(np.array([0.0]), [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) + +random_st.permutation(10) +random_st.permutation([1, 2, 3, 4]) +random_st.permutation(np.array([1, 2, 3, 4])) +random_st.permutation(D_2D) + +random_st.shuffle(np.arange(10)) +random_st.shuffle([1, 2, 3, 4, 5]) +random_st.shuffle(D_2D) + +np.random.RandomState(SEED_PCG64) +np.random.RandomState(0) +np.random.RandomState([0, 1, 2]) +random_st.__str__() +random_st.__repr__() +random_st_state = random_st.__getstate__() +random_st.__setstate__(random_st_state) +random_st.seed() +random_st.seed(1) +random_st.seed([0, 1]) +random_st_get_state = random_st.get_state() +random_st_get_state_legacy = random_st.get_state(legacy=True) +random_st.set_state(random_st_get_state) + +random_st.rand() +random_st.rand(1) +random_st.rand(1, 2) +random_st.randn() +random_st.randn(1) +random_st.randn(1, 2) +random_st.random_sample() +random_st.random_sample(1) +random_st.random_sample(size=(1, 2)) + +random_st.tomaxint() +random_st.tomaxint(1) +random_st.tomaxint((1,)) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/scalars.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/scalars.py new file mode 100644 index 0000000000000000000000000000000000000000..a58667fefd553a17fcfc8f41609c2a5e4c8606a1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/scalars.py @@ -0,0 +1,254 @@ +import sys +import datetime as dt + +import pytest +import numpy as np + +b = np.bool_() +u8 = np.uint64() +i8 = np.int64() +f8 = np.float64() +c16 = np.complex128() +U = np.str_() +S = np.bytes_() + + +# Construction +class D: + def __index__(self) -> int: + return 0 + + +class C: + def __complex__(self) -> complex: + return 3j + + +class B: + def __int__(self) -> int: + return 4 + + +class A: + def __float__(self) -> float: + return 4.0 + + +np.complex64(3j) +np.complex64(A()) +np.complex64(C()) +np.complex128(3j) +np.complex128(C()) +np.complex128(None) +np.complex64("1.2") +np.complex128(b"2j") + +np.int8(4) +np.int16(3.4) +np.int32(4) +np.int64(-1) +np.uint8(B()) +np.uint32() +np.int32("1") +np.int64(b"2") + +np.float16(A()) +np.float32(16) +np.float64(3.0) +np.float64(None) +np.float32("1") +np.float16(b"2.5") + +if sys.version_info >= (3, 8): + np.uint64(D()) + np.float32(D()) + np.complex64(D()) + +np.bytes_(b"hello") +np.bytes_("hello", 'utf-8') +np.bytes_("hello", encoding='utf-8') +np.str_("hello") +np.str_(b"hello", 'utf-8') +np.str_(b"hello", encoding='utf-8') + +# Array-ish semantics +np.int8().real +np.int16().imag +np.int32().data +np.int64().flags + +np.uint8().itemsize * 2 +np.uint16().ndim + 1 +np.uint32().strides +np.uint64().shape + +# Time structures +np.datetime64() +np.datetime64(0, "D") +np.datetime64(0, b"D") +np.datetime64(0, ('ms', 3)) +np.datetime64("2019") +np.datetime64(b"2019") +np.datetime64("2019", "D") +np.datetime64(np.datetime64()) +np.datetime64(dt.datetime(2000, 5, 3)) +np.datetime64(dt.date(2000, 5, 3)) +np.datetime64(None) +np.datetime64(None, "D") + +np.timedelta64() +np.timedelta64(0) +np.timedelta64(0, "D") +np.timedelta64(0, ('ms', 3)) +np.timedelta64(0, b"D") +np.timedelta64("3") +np.timedelta64(b"5") +np.timedelta64(np.timedelta64(2)) +np.timedelta64(dt.timedelta(2)) +np.timedelta64(None) +np.timedelta64(None, "D") + +np.void(1) +np.void(np.int64(1)) +np.void(True) +np.void(np.bool_(True)) +np.void(b"test") +np.void(np.bytes_("test")) + +# Protocols +i8 = np.int64() +u8 = np.uint64() +f8 = np.float64() +c16 = np.complex128() +b_ = np.bool_() +td = np.timedelta64() +U = np.str_("1") +S = np.bytes_("1") +AR = np.array(1, dtype=np.float64) + +int(i8) +int(u8) +int(f8) +int(b_) +int(td) +int(U) +int(S) +int(AR) +with pytest.warns(np.ComplexWarning): + int(c16) + +float(i8) +float(u8) +float(f8) +float(b_) +float(td) +float(U) +float(S) +float(AR) +with pytest.warns(np.ComplexWarning): + float(c16) + +complex(i8) +complex(u8) +complex(f8) +complex(c16) +complex(b_) +complex(td) +complex(U) +complex(AR) + + +# Misc +c16.dtype +c16.real +c16.imag +c16.real.real +c16.real.imag +c16.ndim +c16.size +c16.itemsize +c16.shape +c16.strides +c16.squeeze() +c16.byteswap() +c16.transpose() + +# Aliases +np.str0() +np.bool8() +np.bytes0() +np.string_() +np.object0() +np.void0(0) + +np.byte() +np.short() +np.intc() +np.intp() +np.int0() +np.int_() +np.longlong() + +np.ubyte() +np.ushort() +np.uintc() +np.uintp() +np.uint0() +np.uint() +np.ulonglong() + +np.half() +np.single() +np.double() +np.float_() +np.longdouble() +np.longfloat() + +np.csingle() +np.singlecomplex() +np.cdouble() +np.complex_() +np.cfloat() +np.clongdouble() +np.clongfloat() +np.longcomplex() + +b.item() +i8.item() +u8.item() +f8.item() +c16.item() +U.item() +S.item() + +b.tolist() +i8.tolist() +u8.tolist() +f8.tolist() +c16.tolist() +U.tolist() +S.tolist() + +b.ravel() +i8.ravel() +u8.ravel() +f8.ravel() +c16.ravel() +U.ravel() +S.ravel() + +b.flatten() +i8.flatten() +u8.flatten() +f8.flatten() +c16.flatten() +U.flatten() +S.flatten() + +b.reshape(1) +i8.reshape(1) +u8.reshape(1) +f8.reshape(1) +c16.reshape(1) +U.reshape(1) +S.reshape(1) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/simple.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/simple.py new file mode 100644 index 0000000000000000000000000000000000000000..fcdf1cbcff8e53df7c834ebbe7d0c6963ad90b2d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/simple.py @@ -0,0 +1,165 @@ +"""Simple expression that should pass with mypy.""" +import operator + +import numpy as np +from typing import Iterable # noqa: F401 + +# Basic checks +array = np.array([1, 2]) + + +def ndarray_func(x): + # type: (np.ndarray) -> np.ndarray + return x + + +ndarray_func(np.array([1, 2])) +array == 1 +array.dtype == float + +# Dtype construction +np.dtype(float) +np.dtype(np.float64) +np.dtype(None) +np.dtype("float64") +np.dtype(np.dtype(float)) +np.dtype(("U", 10)) +np.dtype((np.int32, (2, 2))) +# Define the arguments on the previous line to prevent bidirectional +# type inference in mypy from broadening the types. +two_tuples_dtype = [("R", "u1"), ("G", "u1"), ("B", "u1")] +np.dtype(two_tuples_dtype) + +three_tuples_dtype = [("R", "u1", 2)] +np.dtype(three_tuples_dtype) + +mixed_tuples_dtype = [("R", "u1"), ("G", np.unicode_, 1)] +np.dtype(mixed_tuples_dtype) + +shape_tuple_dtype = [("R", "u1", (2, 2))] +np.dtype(shape_tuple_dtype) + +shape_like_dtype = [("R", "u1", (2, 2)), ("G", np.unicode_, 1)] +np.dtype(shape_like_dtype) + +object_dtype = [("field1", object)] +np.dtype(object_dtype) + +np.dtype((np.int32, (np.int8, 4))) + +# Dtype comparision +np.dtype(float) == float +np.dtype(float) != np.float64 +np.dtype(float) < None +np.dtype(float) <= "float64" +np.dtype(float) > np.dtype(float) +np.dtype(float) >= np.dtype(("U", 10)) + +# Iteration and indexing +def iterable_func(x): + # type: (Iterable) -> Iterable + return x + + +iterable_func(array) +[element for element in array] +iter(array) +zip(array, array) +array[1] +array[:] +array[...] +array[:] = 0 + +array_2d = np.ones((3, 3)) +array_2d[:2, :2] +array_2d[..., 0] +array_2d[:2, :2] = 0 + +# Other special methods +len(array) +str(array) +array_scalar = np.array(1) +int(array_scalar) +float(array_scalar) +# currently does not work due to https://github.com/python/typeshed/issues/1904 +# complex(array_scalar) +bytes(array_scalar) +operator.index(array_scalar) +bool(array_scalar) + +# comparisons +array < 1 +array <= 1 +array == 1 +array != 1 +array > 1 +array >= 1 +1 < array +1 <= array +1 == array +1 != array +1 > array +1 >= array + +# binary arithmetic +array + 1 +1 + array +array += 1 + +array - 1 +1 - array +array -= 1 + +array * 1 +1 * array +array *= 1 + +nonzero_array = np.array([1, 2]) +array / 1 +1 / nonzero_array +float_array = np.array([1.0, 2.0]) +float_array /= 1 + +array // 1 +1 // nonzero_array +array //= 1 + +array % 1 +1 % nonzero_array +array %= 1 + +divmod(array, 1) +divmod(1, nonzero_array) + +array ** 1 +1 ** array +array **= 1 + +array << 1 +1 << array +array <<= 1 + +array >> 1 +1 >> array +array >>= 1 + +array & 1 +1 & array +array &= 1 + +array ^ 1 +1 ^ array +array ^= 1 + +array | 1 +1 | array +array |= 1 + +# unary arithmetic +-array ++array +abs(array) +~array + +# Other methods +np.array([1, 2]).transpose() diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/simple_py3.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/simple_py3.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a3a8f3dbc80823b966705e8802def2ed85da11 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/simple_py3.py @@ -0,0 +1,6 @@ +import numpy as np + +array = np.array([1, 2]) + +# The @ operator is not in python 2 +array @ array diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ufunc_config.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ufunc_config.py new file mode 100644 index 0000000000000000000000000000000000000000..039ce4d9c723ab3b85605fdf46eb8848c5535176 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ufunc_config.py @@ -0,0 +1,50 @@ +"""Typing tests for `numpy.core._ufunc_config`.""" + +import numpy as np + +def func1(a: str, b: int) -> None: ... +def func2(a: str, b: int, c: float = ...) -> None: ... +def func3(a: str, b: int) -> int: ... + +class Write1: + def write(self, a: str) -> None: ... + +class Write2: + def write(self, a: str, b: int = ...) -> None: ... + +class Write3: + def write(self, a: str) -> int: ... + + +_err_default = np.geterr() +_bufsize_default = np.getbufsize() +_errcall_default = np.geterrcall() + +try: + np.seterr(all=None) + np.seterr(divide="ignore") + np.seterr(over="warn") + np.seterr(under="call") + np.seterr(invalid="raise") + np.geterr() + + np.setbufsize(4096) + np.getbufsize() + + np.seterrcall(func1) + np.seterrcall(func2) + np.seterrcall(func3) + np.seterrcall(Write1()) + np.seterrcall(Write2()) + np.seterrcall(Write3()) + np.geterrcall() + + with np.errstate(call=func1, all="call"): + pass + with np.errstate(call=Write1(), divide="log", over="log"): + pass + +finally: + np.seterr(**_err_default) + np.setbufsize(_bufsize_default) + np.seterrcall(_errcall_default) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ufunclike.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ufunclike.py new file mode 100644 index 0000000000000000000000000000000000000000..ac532271192492fa11316a10cff175f48f1d4d49 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ufunclike.py @@ -0,0 +1,46 @@ +from __future__ import annotations +from typing import Any +import numpy as np + + +class Object: + def __ceil__(self) -> Object: + return self + + def __floor__(self) -> Object: + return self + + def __ge__(self, value: object) -> bool: + return True + + def __array__(self) -> np.ndarray[Any, np.dtype[np.object_]]: + ret = np.empty((), dtype=object) + ret[()] = self + return ret + + +AR_LIKE_b = [True, True, False] +AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)] +AR_LIKE_i = [1, 2, 3] +AR_LIKE_f = [1.0, 2.0, 3.0] +AR_LIKE_O = [Object(), Object(), Object()] +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5") + +np.fix(AR_LIKE_b) +np.fix(AR_LIKE_u) +np.fix(AR_LIKE_i) +np.fix(AR_LIKE_f) +np.fix(AR_LIKE_O) +np.fix(AR_LIKE_f, out=AR_U) + +np.isposinf(AR_LIKE_b) +np.isposinf(AR_LIKE_u) +np.isposinf(AR_LIKE_i) +np.isposinf(AR_LIKE_f) +np.isposinf(AR_LIKE_f, out=AR_U) + +np.isneginf(AR_LIKE_b) +np.isneginf(AR_LIKE_u) +np.isneginf(AR_LIKE_i) +np.isneginf(AR_LIKE_f) +np.isneginf(AR_LIKE_f, out=AR_U) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ufuncs.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..b0113dd418ddf351d16dcadf001361e0c2622e33 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/ufuncs.py @@ -0,0 +1,17 @@ +import numpy as np + +np.sin(1) +np.sin([1, 2, 3]) +np.sin(1, out=np.empty(1)) +np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)]) +np.sin(1, signature="D->D") +np.sin(1, extobj=[16, 1, lambda: None]) +# NOTE: `np.generic` subclasses are not guaranteed to support addition; +# re-enable this we can infer the exact return type of `np.sin(...)`. +# +# np.sin(1) + np.sin(1) +np.sin.types[0] +np.sin.__name__ +np.sin.__doc__ + +np.abs(np.array([1])) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5ceb0c926d0342880ceb8b1650404ee7ce1c05 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py @@ -0,0 +1,7 @@ +import numpy as np + +np.AxisError(1) +np.AxisError(1, ndim=2) +np.AxisError(1, ndim=None) +np.AxisError(1, ndim=2, msg_prefix="error") +np.AxisError(1, ndim=2, msg_prefix=None) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/arithmetic.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/arithmetic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..934fd9a25761c2ee01a7417814e6f896716c0f37 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/arithmetic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/array_constructors.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/array_constructors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8ddfee19691a47917263b2708619b4321339f6f Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/array_constructors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/arrayprint.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/arrayprint.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14d350beb2ae5a8dd79e974a15daf04b96b652bd Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/arrayprint.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/arrayterator.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/arrayterator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed909b6a93643ddb79eaa218c2eba3f03a456e3a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/arrayterator.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/bitwise_ops.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/bitwise_ops.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72ffacd176e863c3035ee4bb739101c5a0aae312 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/bitwise_ops.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/comparisons.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/comparisons.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67190af19714e364205c128c1d01eea7029ff36c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/comparisons.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/constants.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/constants.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82e47a233903f0e47b21692b457d65f7074418be Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/constants.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/datasource.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/datasource.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd1789cbb215e9cce5e3dd05e0263458dccda76b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/datasource.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/dtype.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/dtype.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92b4d38d103715948842241a7320e24595b43936 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/dtype.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/einsumfunc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/einsumfunc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b88ae195890f6837663d4a8cefd01283acfff5c Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/einsumfunc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/flatiter.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/flatiter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0a2d81bb8057d726b07e1dbb6085e5b0b128616 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/flatiter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/fromnumeric.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/fromnumeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b7d65c81a9f784efe495fda4d0027e7d65969ba Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/fromnumeric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/index_tricks.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/index_tricks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..827dc08590f509193627c6f86b51c116c2cb79d9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/index_tricks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/lib_utils.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/lib_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a306e5e08085512cb80f7bf65c2e5e16edd5391d Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/lib_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/lib_version.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/lib_version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e042e9354e74f5a8fe2108d81b8b6225ca20b62 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/lib_version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/mod.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/mod.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7893629952ddaaa221b0a525e5e7f61c495bfb39 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/mod.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/modules.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/modules.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a10a034d036667a057a124f5913681fea7cc23a Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/modules.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/multiarray.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/multiarray.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5d0ba1597c891138a2028fe5940f11201ef05a9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/multiarray.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/nbit_base_example.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/nbit_base_example.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fdf7f32e7c7d5aa452003e84e3d0e5519f9299b Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/nbit_base_example.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ndarray_conversion.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ndarray_conversion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1f9ed21ee7718af26a1972bc016f6f6eef505d9 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ndarray_conversion.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ndarray_misc.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ndarray_misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea63630d2ae85ce3ed1b1fcca3a2a33029f0bb65 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ndarray_misc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ndarray_shape_manipulation.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ndarray_shape_manipulation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8b85229938ce5575d0526d21f44c9e3fb08c0fb Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ndarray_shape_manipulation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/nditer.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/nditer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e44b1db9a910208890cc12638fe86b4cd2505a1 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/nditer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/numeric.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/numeric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d8b52c94c7f721b7959ea611b3dd7b2c6e7b4e6 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/numeric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/numerictypes.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/numerictypes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10a1255a5957ef484425a755f3738ac4861ab783 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/numerictypes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/random.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/random.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a84a029602ac6e56dab14a7a22106441bb30581 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/random.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/scalars.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/scalars.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f9d4983e6a3c4ce46a0cc3f087b83e23fb68879 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/scalars.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ufunc_config.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ufunc_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c28af3e868b4f4ee8e487db1bc921b844509bc8 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ufunc_config.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ufunclike.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ufunclike.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23d1c3a9952f098726ae473d47f2363991984767 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ufunclike.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ufuncs.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ufuncs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1df61da5f4f4b7284f937f80ff9dd22d46ddec7 Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/ufuncs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/warnings_and_errors.cpython-39.pyc b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/warnings_and_errors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48ae4597e71613a6cb6aedb1839e2ef4f226a8ef Binary files /dev/null and b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/__pycache__/warnings_and_errors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/arithmetic.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..227c33225af257e17cda3b188e135599cd772eac --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/arithmetic.py @@ -0,0 +1,546 @@ +from typing import Any, List +import numpy as np +import numpy.typing as npt + +# Can't directly import `np.float128` as it is not available on all platforms +f16: np.floating[npt._128Bit] + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool_() + +b = bool() +c = complex() +f = float() +i = int() + +AR_b: np.ndarray[Any, np.dtype[np.bool_]] +AR_u: np.ndarray[Any, np.dtype[np.uint32]] +AR_i: np.ndarray[Any, np.dtype[np.int64]] +AR_f: np.ndarray[Any, np.dtype[np.float64]] +AR_c: np.ndarray[Any, np.dtype[np.complex128]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] +AR_O: np.ndarray[Any, np.dtype[np.object_]] + +AR_LIKE_b: List[bool] +AR_LIKE_u: List[np.uint32] +AR_LIKE_i: List[int] +AR_LIKE_f: List[float] +AR_LIKE_c: List[complex] +AR_LIKE_m: List[np.timedelta64] +AR_LIKE_M: List[np.datetime64] +AR_LIKE_O: List[np.object_] + +# Array subtraction + +reveal_type(AR_b - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_b - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_b - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_b - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_b - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_b - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_u - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_i - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_M - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_LIKE_O - AR_b) # E: Any + +reveal_type(AR_u - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_u - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_u - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_u - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_u - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_u - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_u - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_u - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_i - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_M - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_LIKE_O - AR_u) # E: Any + +reveal_type(AR_i - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_i - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_i - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_i - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_u - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_i - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_M - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_LIKE_O - AR_i) # E: Any + +reveal_type(AR_f - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_f - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_u - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_i - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_f - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_O - AR_f) # E: Any + +reveal_type(AR_c - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_u - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_i - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_f - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_c - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_O - AR_c) # E: Any + +reveal_type(AR_m - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_u - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_i - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_m - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_M - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_LIKE_O - AR_m) # E: Any + +reveal_type(AR_M - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_M - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_M - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_M - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] +reveal_type(AR_M - AR_LIKE_M) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_M - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_M - AR_M) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_O - AR_M) # E: Any + +reveal_type(AR_O - AR_LIKE_b) # E: Any +reveal_type(AR_O - AR_LIKE_u) # E: Any +reveal_type(AR_O - AR_LIKE_i) # E: Any +reveal_type(AR_O - AR_LIKE_f) # E: Any +reveal_type(AR_O - AR_LIKE_c) # E: Any +reveal_type(AR_O - AR_LIKE_m) # E: Any +reveal_type(AR_O - AR_LIKE_M) # E: Any +reveal_type(AR_O - AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b - AR_O) # E: Any +reveal_type(AR_LIKE_u - AR_O) # E: Any +reveal_type(AR_LIKE_i - AR_O) # E: Any +reveal_type(AR_LIKE_f - AR_O) # E: Any +reveal_type(AR_LIKE_c - AR_O) # E: Any +reveal_type(AR_LIKE_m - AR_O) # E: Any +reveal_type(AR_LIKE_M - AR_O) # E: Any +reveal_type(AR_LIKE_O - AR_O) # E: Any + +# Array floor division + +reveal_type(AR_b // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] +reveal_type(AR_b // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_b // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_b // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_b // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_b // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_b) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] +reveal_type(AR_LIKE_u // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_i // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_O // AR_b) # E: Any + +reveal_type(AR_u // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_u // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_u // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_u // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_u // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_u // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_u // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] +reveal_type(AR_LIKE_i // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_O // AR_u) # E: Any + +reveal_type(AR_i // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_i // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_i // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_i // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_u // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_i // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] +reveal_type(AR_LIKE_f // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_O // AR_i) # E: Any + +reveal_type(AR_f // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_f // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_f // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_u // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_i // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_f // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(AR_LIKE_c // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_m // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_LIKE_O // AR_f) # E: Any + +reveal_type(AR_c // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c // AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_c // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_u // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_i // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_f // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_c // AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] +reveal_type(AR_LIKE_O // AR_c) # E: Any + +reveal_type(AR_m // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] +reveal_type(AR_m // AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(AR_m // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_m // AR_m) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(AR_LIKE_O // AR_m) # E: Any + +reveal_type(AR_O // AR_LIKE_b) # E: Any +reveal_type(AR_O // AR_LIKE_u) # E: Any +reveal_type(AR_O // AR_LIKE_i) # E: Any +reveal_type(AR_O // AR_LIKE_f) # E: Any +reveal_type(AR_O // AR_LIKE_c) # E: Any +reveal_type(AR_O // AR_LIKE_m) # E: Any +reveal_type(AR_O // AR_LIKE_M) # E: Any +reveal_type(AR_O // AR_LIKE_O) # E: Any + +reveal_type(AR_LIKE_b // AR_O) # E: Any +reveal_type(AR_LIKE_u // AR_O) # E: Any +reveal_type(AR_LIKE_i // AR_O) # E: Any +reveal_type(AR_LIKE_f // AR_O) # E: Any +reveal_type(AR_LIKE_c // AR_O) # E: Any +reveal_type(AR_LIKE_m // AR_O) # E: Any +reveal_type(AR_LIKE_M // AR_O) # E: Any +reveal_type(AR_LIKE_O // AR_O) # E: Any + +# unary ops + +reveal_type(-f16) # E: {float128} +reveal_type(-c16) # E: {complex128} +reveal_type(-c8) # E: {complex64} +reveal_type(-f8) # E: {float64} +reveal_type(-f4) # E: {float32} +reveal_type(-i8) # E: {int64} +reveal_type(-i4) # E: {int32} +reveal_type(-u8) # E: {uint64} +reveal_type(-u4) # E: {uint32} +reveal_type(-td) # E: numpy.timedelta64 +reveal_type(-AR_f) # E: Any + +reveal_type(+f16) # E: {float128} +reveal_type(+c16) # E: {complex128} +reveal_type(+c8) # E: {complex64} +reveal_type(+f8) # E: {float64} +reveal_type(+f4) # E: {float32} +reveal_type(+i8) # E: {int64} +reveal_type(+i4) # E: {int32} +reveal_type(+u8) # E: {uint64} +reveal_type(+u4) # E: {uint32} +reveal_type(+td) # E: numpy.timedelta64 +reveal_type(+AR_f) # E: Any + +reveal_type(abs(f16)) # E: {float128} +reveal_type(abs(c16)) # E: {float64} +reveal_type(abs(c8)) # E: {float32} +reveal_type(abs(f8)) # E: {float64} +reveal_type(abs(f4)) # E: {float32} +reveal_type(abs(i8)) # E: {int64} +reveal_type(abs(i4)) # E: {int32} +reveal_type(abs(u8)) # E: {uint64} +reveal_type(abs(u4)) # E: {uint32} +reveal_type(abs(td)) # E: numpy.timedelta64 +reveal_type(abs(b_)) # E: numpy.bool_ +reveal_type(abs(AR_f)) # E: Any + +# Time structures + +reveal_type(dt + td) # E: numpy.datetime64 +reveal_type(dt + i) # E: numpy.datetime64 +reveal_type(dt + i4) # E: numpy.datetime64 +reveal_type(dt + i8) # E: numpy.datetime64 +reveal_type(dt - dt) # E: numpy.timedelta64 +reveal_type(dt - i) # E: numpy.datetime64 +reveal_type(dt - i4) # E: numpy.datetime64 +reveal_type(dt - i8) # E: numpy.datetime64 + +reveal_type(td + td) # E: numpy.timedelta64 +reveal_type(td + i) # E: numpy.timedelta64 +reveal_type(td + i4) # E: numpy.timedelta64 +reveal_type(td + i8) # E: numpy.timedelta64 +reveal_type(td - td) # E: numpy.timedelta64 +reveal_type(td - i) # E: numpy.timedelta64 +reveal_type(td - i4) # E: numpy.timedelta64 +reveal_type(td - i8) # E: numpy.timedelta64 +reveal_type(td / f) # E: numpy.timedelta64 +reveal_type(td / f4) # E: numpy.timedelta64 +reveal_type(td / f8) # E: numpy.timedelta64 +reveal_type(td / td) # E: {float64} +reveal_type(td // td) # E: {int64} + +# boolean + +reveal_type(b_ / b) # E: {float64} +reveal_type(b_ / b_) # E: {float64} +reveal_type(b_ / i) # E: {float64} +reveal_type(b_ / i8) # E: {float64} +reveal_type(b_ / i4) # E: {float64} +reveal_type(b_ / u8) # E: {float64} +reveal_type(b_ / u4) # E: {float64} +reveal_type(b_ / f) # E: {float64} +reveal_type(b_ / f16) # E: {float128} +reveal_type(b_ / f8) # E: {float64} +reveal_type(b_ / f4) # E: {float32} +reveal_type(b_ / c) # E: {complex128} +reveal_type(b_ / c16) # E: {complex128} +reveal_type(b_ / c8) # E: {complex64} + +reveal_type(b / b_) # E: {float64} +reveal_type(b_ / b_) # E: {float64} +reveal_type(i / b_) # E: {float64} +reveal_type(i8 / b_) # E: {float64} +reveal_type(i4 / b_) # E: {float64} +reveal_type(u8 / b_) # E: {float64} +reveal_type(u4 / b_) # E: {float64} +reveal_type(f / b_) # E: {float64} +reveal_type(f16 / b_) # E: {float128} +reveal_type(f8 / b_) # E: {float64} +reveal_type(f4 / b_) # E: {float32} +reveal_type(c / b_) # E: {complex128} +reveal_type(c16 / b_) # E: {complex128} +reveal_type(c8 / b_) # E: {complex64} + +# Complex + +reveal_type(c16 + f16) # E: {complex256} +reveal_type(c16 + c16) # E: {complex128} +reveal_type(c16 + f8) # E: {complex128} +reveal_type(c16 + i8) # E: {complex128} +reveal_type(c16 + c8) # E: {complex128} +reveal_type(c16 + f4) # E: {complex128} +reveal_type(c16 + i4) # E: {complex128} +reveal_type(c16 + b_) # E: {complex128} +reveal_type(c16 + b) # E: {complex128} +reveal_type(c16 + c) # E: {complex128} +reveal_type(c16 + f) # E: {complex128} +reveal_type(c16 + i) # E: {complex128} +reveal_type(c16 + AR_f) # E: Any + +reveal_type(f16 + c16) # E: {complex256} +reveal_type(c16 + c16) # E: {complex128} +reveal_type(f8 + c16) # E: {complex128} +reveal_type(i8 + c16) # E: {complex128} +reveal_type(c8 + c16) # E: {complex128} +reveal_type(f4 + c16) # E: {complex128} +reveal_type(i4 + c16) # E: {complex128} +reveal_type(b_ + c16) # E: {complex128} +reveal_type(b + c16) # E: {complex128} +reveal_type(c + c16) # E: {complex128} +reveal_type(f + c16) # E: {complex128} +reveal_type(i + c16) # E: {complex128} +reveal_type(AR_f + c16) # E: Any + +reveal_type(c8 + f16) # E: {complex256} +reveal_type(c8 + c16) # E: {complex128} +reveal_type(c8 + f8) # E: {complex128} +reveal_type(c8 + i8) # E: {complex128} +reveal_type(c8 + c8) # E: {complex64} +reveal_type(c8 + f4) # E: {complex64} +reveal_type(c8 + i4) # E: {complex64} +reveal_type(c8 + b_) # E: {complex64} +reveal_type(c8 + b) # E: {complex64} +reveal_type(c8 + c) # E: {complex128} +reveal_type(c8 + f) # E: {complex128} +reveal_type(c8 + i) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}] +reveal_type(c8 + AR_f) # E: Any + +reveal_type(f16 + c8) # E: {complex256} +reveal_type(c16 + c8) # E: {complex128} +reveal_type(f8 + c8) # E: {complex128} +reveal_type(i8 + c8) # E: {complex128} +reveal_type(c8 + c8) # E: {complex64} +reveal_type(f4 + c8) # E: {complex64} +reveal_type(i4 + c8) # E: {complex64} +reveal_type(b_ + c8) # E: {complex64} +reveal_type(b + c8) # E: {complex64} +reveal_type(c + c8) # E: {complex128} +reveal_type(f + c8) # E: {complex128} +reveal_type(i + c8) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}] +reveal_type(AR_f + c8) # E: Any + +# Float + +reveal_type(f8 + f16) # E: {float128} +reveal_type(f8 + f8) # E: {float64} +reveal_type(f8 + i8) # E: {float64} +reveal_type(f8 + f4) # E: {float64} +reveal_type(f8 + i4) # E: {float64} +reveal_type(f8 + b_) # E: {float64} +reveal_type(f8 + b) # E: {float64} +reveal_type(f8 + c) # E: {complex128} +reveal_type(f8 + f) # E: {float64} +reveal_type(f8 + i) # E: {float64} +reveal_type(f8 + AR_f) # E: Any + +reveal_type(f16 + f8) # E: {float128} +reveal_type(f8 + f8) # E: {float64} +reveal_type(i8 + f8) # E: {float64} +reveal_type(f4 + f8) # E: {float64} +reveal_type(i4 + f8) # E: {float64} +reveal_type(b_ + f8) # E: {float64} +reveal_type(b + f8) # E: {float64} +reveal_type(c + f8) # E: {complex128} +reveal_type(f + f8) # E: {float64} +reveal_type(i + f8) # E: {float64} +reveal_type(AR_f + f8) # E: Any + +reveal_type(f4 + f16) # E: {float128} +reveal_type(f4 + f8) # E: {float64} +reveal_type(f4 + i8) # E: {float64} +reveal_type(f4 + f4) # E: {float32} +reveal_type(f4 + i4) # E: {float32} +reveal_type(f4 + b_) # E: {float32} +reveal_type(f4 + b) # E: {float32} +reveal_type(f4 + c) # E: {complex128} +reveal_type(f4 + f) # E: {float64} +reveal_type(f4 + i) # E: numpy.floating[{_NBitInt}] +reveal_type(f4 + AR_f) # E: Any + +reveal_type(f16 + f4) # E: {float128} +reveal_type(f8 + f4) # E: {float64} +reveal_type(i8 + f4) # E: {float64} +reveal_type(f4 + f4) # E: {float32} +reveal_type(i4 + f4) # E: {float32} +reveal_type(b_ + f4) # E: {float32} +reveal_type(b + f4) # E: {float32} +reveal_type(c + f4) # E: {complex128} +reveal_type(f + f4) # E: {float64} +reveal_type(i + f4) # E: numpy.floating[{_NBitInt}] +reveal_type(AR_f + f4) # E: Any + +# Int + +reveal_type(i8 + i8) # E: {int64} +reveal_type(i8 + u8) # E: Any +reveal_type(i8 + i4) # E: {int64} +reveal_type(i8 + u4) # E: Any +reveal_type(i8 + b_) # E: {int64} +reveal_type(i8 + b) # E: {int64} +reveal_type(i8 + c) # E: {complex128} +reveal_type(i8 + f) # E: {float64} +reveal_type(i8 + i) # E: {int64} +reveal_type(i8 + AR_f) # E: Any + +reveal_type(u8 + u8) # E: {uint64} +reveal_type(u8 + i4) # E: Any +reveal_type(u8 + u4) # E: {uint64} +reveal_type(u8 + b_) # E: {uint64} +reveal_type(u8 + b) # E: {uint64} +reveal_type(u8 + c) # E: {complex128} +reveal_type(u8 + f) # E: {float64} +reveal_type(u8 + i) # E: Any +reveal_type(u8 + AR_f) # E: Any + +reveal_type(i8 + i8) # E: {int64} +reveal_type(u8 + i8) # E: Any +reveal_type(i4 + i8) # E: {int64} +reveal_type(u4 + i8) # E: Any +reveal_type(b_ + i8) # E: {int64} +reveal_type(b + i8) # E: {int64} +reveal_type(c + i8) # E: {complex128} +reveal_type(f + i8) # E: {float64} +reveal_type(i + i8) # E: {int64} +reveal_type(AR_f + i8) # E: Any + +reveal_type(u8 + u8) # E: {uint64} +reveal_type(i4 + u8) # E: Any +reveal_type(u4 + u8) # E: {uint64} +reveal_type(b_ + u8) # E: {uint64} +reveal_type(b + u8) # E: {uint64} +reveal_type(c + u8) # E: {complex128} +reveal_type(f + u8) # E: {float64} +reveal_type(i + u8) # E: Any +reveal_type(AR_f + u8) # E: Any + +reveal_type(i4 + i8) # E: {int64} +reveal_type(i4 + i4) # E: {int32} +reveal_type(i4 + i) # E: {int_} +reveal_type(i4 + b_) # E: {int32} +reveal_type(i4 + b) # E: {int32} +reveal_type(i4 + AR_f) # E: Any + +reveal_type(u4 + i8) # E: Any +reveal_type(u4 + i4) # E: Any +reveal_type(u4 + u8) # E: {uint64} +reveal_type(u4 + u4) # E: {uint32} +reveal_type(u4 + i) # E: Any +reveal_type(u4 + b_) # E: {uint32} +reveal_type(u4 + b) # E: {uint32} +reveal_type(u4 + AR_f) # E: Any + +reveal_type(i8 + i4) # E: {int64} +reveal_type(i4 + i4) # E: {int32} +reveal_type(i + i4) # E: {int_} +reveal_type(b_ + i4) # E: {int32} +reveal_type(b + i4) # E: {int32} +reveal_type(AR_f + i4) # E: Any + +reveal_type(i8 + u4) # E: Any +reveal_type(i4 + u4) # E: Any +reveal_type(u8 + u4) # E: {uint64} +reveal_type(u4 + u4) # E: {uint32} +reveal_type(b_ + u4) # E: {uint32} +reveal_type(b + u4) # E: {uint32} +reveal_type(i + u4) # E: Any +reveal_type(AR_f + u4) # E: Any diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/array_constructors.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/array_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..ea8b19a5ddf35db4a9fb5e602b0fe39ee57b101d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/array_constructors.py @@ -0,0 +1,102 @@ +from typing import List, Any +import numpy as np + +class SubClass(np.ndarray): ... + +i8: np.int64 + +A: np.ndarray +B: SubClass +C: List[int] + +def func(i: int, j: int, **kwargs: Any) -> SubClass: ... + +reveal_type(np.asarray(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asarray(B)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asarray(C)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.asanyarray(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asanyarray(B)) # E: SubClass +reveal_type(np.asanyarray(B, dtype=int)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asanyarray(C)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.ascontiguousarray(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ascontiguousarray(B)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ascontiguousarray(C)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.asfortranarray(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asfortranarray(B)) # E: numpy.ndarray[Any, Any] +reveal_type(np.asfortranarray(C)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.require(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.require(B)) # E: SubClass +reveal_type(np.require(B, requirements=None)) # E: SubClass +reveal_type(np.require(B, dtype=int)) # E: numpy.ndarray[Any, Any] +reveal_type(np.require(B, requirements="E")) # E: numpy.ndarray[Any, Any] +reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: numpy.ndarray[Any, Any] +reveal_type(np.require(B, requirements={"F", "E"})) # E: numpy.ndarray[Any, Any] +reveal_type(np.require(B, requirements=["C", "OWNDATA"])) # E: SubClass +reveal_type(np.require(B, requirements="W")) # E: SubClass +reveal_type(np.require(B, requirements="A")) # E: SubClass +reveal_type(np.require(C)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.linspace(0, 10)) # E: numpy.ndarray[Any, Any] +reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray[Any, Any], Any] +reveal_type(np.logspace(0, 10)) # E: numpy.ndarray[Any, Any] +reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.zeros_like(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.zeros_like(C)) # E: numpy.ndarray[Any, Any] +reveal_type(np.zeros_like(B)) # E: SubClass +reveal_type(np.zeros_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.ones_like(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ones_like(C)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ones_like(B)) # E: SubClass +reveal_type(np.ones_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.empty_like(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.empty_like(C)) # E: numpy.ndarray[Any, Any] +reveal_type(np.empty_like(B)) # E: SubClass +reveal_type(np.empty_like(B, dtype=np.int64)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.full_like(A, i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.full_like(C, i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.full_like(B, i8)) # E: SubClass +reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.ones(1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ones([1, 1, 1])) # E: numpy.ndarray[Any, Any] + +reveal_type(np.full(1, i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.full([1, 1, 1], i8)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.indices([1, 2, 3])) # E: numpy.ndarray[Any, Any] +reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[numpy.ndarray[Any, Any]] + +reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass + +reveal_type(np.identity(10)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.atleast_1d(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.atleast_1d(C)) # E: numpy.ndarray[Any, Any] +reveal_type(np.atleast_1d(A, A)) # E: list[numpy.ndarray[Any, Any]] +reveal_type(np.atleast_1d(A, C)) # E: list[numpy.ndarray[Any, Any]] +reveal_type(np.atleast_1d(C, C)) # E: list[numpy.ndarray[Any, Any]] + +reveal_type(np.atleast_2d(A)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.atleast_3d(A)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.vstack([A, A])) # E: numpy.ndarray[Any, Any] +reveal_type(np.vstack([A, C])) # E: numpy.ndarray[Any, Any] +reveal_type(np.vstack([C, C])) # E: numpy.ndarray[Any, Any] + +reveal_type(np.hstack([A, A])) # E: numpy.ndarray[Any, Any] + +reveal_type(np.stack([A, A])) # E: numpy.ndarray[Any, Any] +reveal_type(np.stack([A, A], axis=0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.stack([A, A], out=B)) # E: SubClass + +reveal_type(np.block([[A, A], [A, A]])) # E: numpy.ndarray[Any, Any] +reveal_type(np.block(C)) # E: numpy.ndarray[Any, Any] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/arrayprint.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/arrayprint.py new file mode 100644 index 0000000000000000000000000000000000000000..9c09fde202d2e62a02922351f953e974b251892e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/arrayprint.py @@ -0,0 +1,19 @@ +from typing import Any, Callable +import numpy as np + +AR: np.ndarray[Any, Any] +func_float: Callable[[np.floating[Any]], str] +func_int: Callable[[np.integer[Any]], str] + +reveal_type(np.get_printoptions()) # E: TypedDict +reveal_type(np.array2string( # E: str + AR, formatter={'float_kind': func_float, 'int_kind': func_int} +)) +reveal_type(np.format_float_scientific(1.0)) # E: str +reveal_type(np.format_float_positional(1)) # E: str +reveal_type(np.array_repr(AR)) # E: str +reveal_type(np.array_str(AR)) # E: str + +reveal_type(np.printoptions()) # E: contextlib._GeneratorContextManager +with np.printoptions() as dct: + reveal_type(dct) # E: TypedDict diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/arrayterator.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/arrayterator.py new file mode 100644 index 0000000000000000000000000000000000000000..ca44fac969f01be26487b77eab1084aeaf3e6678 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/arrayterator.py @@ -0,0 +1,24 @@ +from typing import Any +import numpy as np + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] +ar_iter = np.lib.Arrayterator(AR_i8) + +reveal_type(ar_iter.var) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter.buf_size) # E: Union[None, builtins.int] +reveal_type(ar_iter.start) # E: builtins.list[builtins.int] +reveal_type(ar_iter.stop) # E: builtins.list[builtins.int] +reveal_type(ar_iter.step) # E: builtins.list[builtins.int] +reveal_type(ar_iter.shape) # E: builtins.tuple[builtins.int] +reveal_type(ar_iter.flat) # E: 'typing.Generator[{int64}, None, None] + +reveal_type(ar_iter.__array__()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] + +for i in ar_iter: + reveal_type(i) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] + +reveal_type(ar_iter[0]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter[...]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter[:]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter[0, 0, 0]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] +reveal_type(ar_iter[..., 0, :]) # E: numpy.lib.arrayterator.Arrayterator[Any, numpy.dtype[{int64}]] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..fa6dcd98211f6be602e362d3085114d2681bf382 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.py @@ -0,0 +1,131 @@ +import numpy as np + +i8 = np.int64(1) +u8 = np.uint64(1) + +i4 = np.int32(1) +u4 = np.uint32(1) + +b_ = np.bool_(1) + +b = bool(1) +i = int(1) + +AR = np.array([0, 1, 2], dtype=np.int32) +AR.setflags(write=False) + + +reveal_type(i8 << i8) # E: {int64} +reveal_type(i8 >> i8) # E: {int64} +reveal_type(i8 | i8) # E: {int64} +reveal_type(i8 ^ i8) # E: {int64} +reveal_type(i8 & i8) # E: {int64} + +reveal_type(i8 << AR) # E: Any +reveal_type(i8 >> AR) # E: Any +reveal_type(i8 | AR) # E: Any +reveal_type(i8 ^ AR) # E: Any +reveal_type(i8 & AR) # E: Any + +reveal_type(i4 << i4) # E: {int32} +reveal_type(i4 >> i4) # E: {int32} +reveal_type(i4 | i4) # E: {int32} +reveal_type(i4 ^ i4) # E: {int32} +reveal_type(i4 & i4) # E: {int32} + +reveal_type(i8 << i4) # E: {int64} +reveal_type(i8 >> i4) # E: {int64} +reveal_type(i8 | i4) # E: {int64} +reveal_type(i8 ^ i4) # E: {int64} +reveal_type(i8 & i4) # E: {int64} + +reveal_type(i8 << i) # E: {int64} +reveal_type(i8 >> i) # E: {int64} +reveal_type(i8 | i) # E: {int64} +reveal_type(i8 ^ i) # E: {int64} +reveal_type(i8 & i) # E: {int64} + +reveal_type(i8 << b_) # E: {int64} +reveal_type(i8 >> b_) # E: {int64} +reveal_type(i8 | b_) # E: {int64} +reveal_type(i8 ^ b_) # E: {int64} +reveal_type(i8 & b_) # E: {int64} + +reveal_type(i8 << b) # E: {int64} +reveal_type(i8 >> b) # E: {int64} +reveal_type(i8 | b) # E: {int64} +reveal_type(i8 ^ b) # E: {int64} +reveal_type(i8 & b) # E: {int64} + +reveal_type(u8 << u8) # E: {uint64} +reveal_type(u8 >> u8) # E: {uint64} +reveal_type(u8 | u8) # E: {uint64} +reveal_type(u8 ^ u8) # E: {uint64} +reveal_type(u8 & u8) # E: {uint64} + +reveal_type(u8 << AR) # E: Any +reveal_type(u8 >> AR) # E: Any +reveal_type(u8 | AR) # E: Any +reveal_type(u8 ^ AR) # E: Any +reveal_type(u8 & AR) # E: Any + +reveal_type(u4 << u4) # E: {uint32} +reveal_type(u4 >> u4) # E: {uint32} +reveal_type(u4 | u4) # E: {uint32} +reveal_type(u4 ^ u4) # E: {uint32} +reveal_type(u4 & u4) # E: {uint32} + +reveal_type(u4 << i4) # E: numpy.signedinteger[Any] +reveal_type(u4 >> i4) # E: numpy.signedinteger[Any] +reveal_type(u4 | i4) # E: numpy.signedinteger[Any] +reveal_type(u4 ^ i4) # E: numpy.signedinteger[Any] +reveal_type(u4 & i4) # E: numpy.signedinteger[Any] + +reveal_type(u4 << i) # E: numpy.signedinteger[Any] +reveal_type(u4 >> i) # E: numpy.signedinteger[Any] +reveal_type(u4 | i) # E: numpy.signedinteger[Any] +reveal_type(u4 ^ i) # E: numpy.signedinteger[Any] +reveal_type(u4 & i) # E: numpy.signedinteger[Any] + +reveal_type(u8 << b_) # E: {uint64} +reveal_type(u8 >> b_) # E: {uint64} +reveal_type(u8 | b_) # E: {uint64} +reveal_type(u8 ^ b_) # E: {uint64} +reveal_type(u8 & b_) # E: {uint64} + +reveal_type(u8 << b) # E: {uint64} +reveal_type(u8 >> b) # E: {uint64} +reveal_type(u8 | b) # E: {uint64} +reveal_type(u8 ^ b) # E: {uint64} +reveal_type(u8 & b) # E: {uint64} + +reveal_type(b_ << b_) # E: {int8} +reveal_type(b_ >> b_) # E: {int8} +reveal_type(b_ | b_) # E: numpy.bool_ +reveal_type(b_ ^ b_) # E: numpy.bool_ +reveal_type(b_ & b_) # E: numpy.bool_ + +reveal_type(b_ << AR) # E: Any +reveal_type(b_ >> AR) # E: Any +reveal_type(b_ | AR) # E: Any +reveal_type(b_ ^ AR) # E: Any +reveal_type(b_ & AR) # E: Any + +reveal_type(b_ << b) # E: {int8} +reveal_type(b_ >> b) # E: {int8} +reveal_type(b_ | b) # E: numpy.bool_ +reveal_type(b_ ^ b) # E: numpy.bool_ +reveal_type(b_ & b) # E: numpy.bool_ + +reveal_type(b_ << i) # E: {int_} +reveal_type(b_ >> i) # E: {int_} +reveal_type(b_ | i) # E: {int_} +reveal_type(b_ ^ i) # E: {int_} +reveal_type(b_ & i) # E: {int_} + +reveal_type(~i8) # E: {int64} +reveal_type(~i4) # E: {int32} +reveal_type(~u8) # E: {uint64} +reveal_type(~u4) # E: {uint32} +reveal_type(~b_) # E: numpy.bool_ +reveal_type(~AR) # E: Any diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/comparisons.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/comparisons.py new file mode 100644 index 0000000000000000000000000000000000000000..dc56e74c2ab91b6674569940d3b187b406f17fb2 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/comparisons.py @@ -0,0 +1,252 @@ +import numpy as np + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool_() + +b = bool() +c = complex() +f = float() +i = int() + +AR = np.array([0], dtype=np.int64) +AR.setflags(write=False) + +SEQ = (0, 1, 2, 3, 4) + +# Time structures + +reveal_type(dt > dt) # E: numpy.bool_ + +reveal_type(td > td) # E: numpy.bool_ +reveal_type(td > i) # E: numpy.bool_ +reveal_type(td > i4) # E: numpy.bool_ +reveal_type(td > i8) # E: numpy.bool_ + +reveal_type(td > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(td > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(AR > td) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > td) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +# boolean + +reveal_type(b_ > b) # E: numpy.bool_ +reveal_type(b_ > b_) # E: numpy.bool_ +reveal_type(b_ > i) # E: numpy.bool_ +reveal_type(b_ > i8) # E: numpy.bool_ +reveal_type(b_ > i4) # E: numpy.bool_ +reveal_type(b_ > u8) # E: numpy.bool_ +reveal_type(b_ > u4) # E: numpy.bool_ +reveal_type(b_ > f) # E: numpy.bool_ +reveal_type(b_ > f8) # E: numpy.bool_ +reveal_type(b_ > f4) # E: numpy.bool_ +reveal_type(b_ > c) # E: numpy.bool_ +reveal_type(b_ > c16) # E: numpy.bool_ +reveal_type(b_ > c8) # E: numpy.bool_ +reveal_type(b_ > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(b_ > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +# Complex + +reveal_type(c16 > c16) # E: numpy.bool_ +reveal_type(c16 > f8) # E: numpy.bool_ +reveal_type(c16 > i8) # E: numpy.bool_ +reveal_type(c16 > c8) # E: numpy.bool_ +reveal_type(c16 > f4) # E: numpy.bool_ +reveal_type(c16 > i4) # E: numpy.bool_ +reveal_type(c16 > b_) # E: numpy.bool_ +reveal_type(c16 > b) # E: numpy.bool_ +reveal_type(c16 > c) # E: numpy.bool_ +reveal_type(c16 > f) # E: numpy.bool_ +reveal_type(c16 > i) # E: numpy.bool_ +reveal_type(c16 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(c16 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(c16 > c16) # E: numpy.bool_ +reveal_type(f8 > c16) # E: numpy.bool_ +reveal_type(i8 > c16) # E: numpy.bool_ +reveal_type(c8 > c16) # E: numpy.bool_ +reveal_type(f4 > c16) # E: numpy.bool_ +reveal_type(i4 > c16) # E: numpy.bool_ +reveal_type(b_ > c16) # E: numpy.bool_ +reveal_type(b > c16) # E: numpy.bool_ +reveal_type(c > c16) # E: numpy.bool_ +reveal_type(f > c16) # E: numpy.bool_ +reveal_type(i > c16) # E: numpy.bool_ +reveal_type(AR > c16) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > c16) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(c8 > c16) # E: numpy.bool_ +reveal_type(c8 > f8) # E: numpy.bool_ +reveal_type(c8 > i8) # E: numpy.bool_ +reveal_type(c8 > c8) # E: numpy.bool_ +reveal_type(c8 > f4) # E: numpy.bool_ +reveal_type(c8 > i4) # E: numpy.bool_ +reveal_type(c8 > b_) # E: numpy.bool_ +reveal_type(c8 > b) # E: numpy.bool_ +reveal_type(c8 > c) # E: numpy.bool_ +reveal_type(c8 > f) # E: numpy.bool_ +reveal_type(c8 > i) # E: numpy.bool_ +reveal_type(c8 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(c8 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(c16 > c8) # E: numpy.bool_ +reveal_type(f8 > c8) # E: numpy.bool_ +reveal_type(i8 > c8) # E: numpy.bool_ +reveal_type(c8 > c8) # E: numpy.bool_ +reveal_type(f4 > c8) # E: numpy.bool_ +reveal_type(i4 > c8) # E: numpy.bool_ +reveal_type(b_ > c8) # E: numpy.bool_ +reveal_type(b > c8) # E: numpy.bool_ +reveal_type(c > c8) # E: numpy.bool_ +reveal_type(f > c8) # E: numpy.bool_ +reveal_type(i > c8) # E: numpy.bool_ +reveal_type(AR > c8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > c8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +# Float + +reveal_type(f8 > f8) # E: numpy.bool_ +reveal_type(f8 > i8) # E: numpy.bool_ +reveal_type(f8 > f4) # E: numpy.bool_ +reveal_type(f8 > i4) # E: numpy.bool_ +reveal_type(f8 > b_) # E: numpy.bool_ +reveal_type(f8 > b) # E: numpy.bool_ +reveal_type(f8 > c) # E: numpy.bool_ +reveal_type(f8 > f) # E: numpy.bool_ +reveal_type(f8 > i) # E: numpy.bool_ +reveal_type(f8 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(f8 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(f8 > f8) # E: numpy.bool_ +reveal_type(i8 > f8) # E: numpy.bool_ +reveal_type(f4 > f8) # E: numpy.bool_ +reveal_type(i4 > f8) # E: numpy.bool_ +reveal_type(b_ > f8) # E: numpy.bool_ +reveal_type(b > f8) # E: numpy.bool_ +reveal_type(c > f8) # E: numpy.bool_ +reveal_type(f > f8) # E: numpy.bool_ +reveal_type(i > f8) # E: numpy.bool_ +reveal_type(AR > f8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > f8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(f4 > f8) # E: numpy.bool_ +reveal_type(f4 > i8) # E: numpy.bool_ +reveal_type(f4 > f4) # E: numpy.bool_ +reveal_type(f4 > i4) # E: numpy.bool_ +reveal_type(f4 > b_) # E: numpy.bool_ +reveal_type(f4 > b) # E: numpy.bool_ +reveal_type(f4 > c) # E: numpy.bool_ +reveal_type(f4 > f) # E: numpy.bool_ +reveal_type(f4 > i) # E: numpy.bool_ +reveal_type(f4 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(f4 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(f8 > f4) # E: numpy.bool_ +reveal_type(i8 > f4) # E: numpy.bool_ +reveal_type(f4 > f4) # E: numpy.bool_ +reveal_type(i4 > f4) # E: numpy.bool_ +reveal_type(b_ > f4) # E: numpy.bool_ +reveal_type(b > f4) # E: numpy.bool_ +reveal_type(c > f4) # E: numpy.bool_ +reveal_type(f > f4) # E: numpy.bool_ +reveal_type(i > f4) # E: numpy.bool_ +reveal_type(AR > f4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > f4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +# Int + +reveal_type(i8 > i8) # E: numpy.bool_ +reveal_type(i8 > u8) # E: numpy.bool_ +reveal_type(i8 > i4) # E: numpy.bool_ +reveal_type(i8 > u4) # E: numpy.bool_ +reveal_type(i8 > b_) # E: numpy.bool_ +reveal_type(i8 > b) # E: numpy.bool_ +reveal_type(i8 > c) # E: numpy.bool_ +reveal_type(i8 > f) # E: numpy.bool_ +reveal_type(i8 > i) # E: numpy.bool_ +reveal_type(i8 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i8 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(u8 > u8) # E: numpy.bool_ +reveal_type(u8 > i4) # E: numpy.bool_ +reveal_type(u8 > u4) # E: numpy.bool_ +reveal_type(u8 > b_) # E: numpy.bool_ +reveal_type(u8 > b) # E: numpy.bool_ +reveal_type(u8 > c) # E: numpy.bool_ +reveal_type(u8 > f) # E: numpy.bool_ +reveal_type(u8 > i) # E: numpy.bool_ +reveal_type(u8 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(u8 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(i8 > i8) # E: numpy.bool_ +reveal_type(u8 > i8) # E: numpy.bool_ +reveal_type(i4 > i8) # E: numpy.bool_ +reveal_type(u4 > i8) # E: numpy.bool_ +reveal_type(b_ > i8) # E: numpy.bool_ +reveal_type(b > i8) # E: numpy.bool_ +reveal_type(c > i8) # E: numpy.bool_ +reveal_type(f > i8) # E: numpy.bool_ +reveal_type(i > i8) # E: numpy.bool_ +reveal_type(AR > i8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > i8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(u8 > u8) # E: numpy.bool_ +reveal_type(i4 > u8) # E: numpy.bool_ +reveal_type(u4 > u8) # E: numpy.bool_ +reveal_type(b_ > u8) # E: numpy.bool_ +reveal_type(b > u8) # E: numpy.bool_ +reveal_type(c > u8) # E: numpy.bool_ +reveal_type(f > u8) # E: numpy.bool_ +reveal_type(i > u8) # E: numpy.bool_ +reveal_type(AR > u8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > u8) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(i4 > i8) # E: numpy.bool_ +reveal_type(i4 > i4) # E: numpy.bool_ +reveal_type(i4 > i) # E: numpy.bool_ +reveal_type(i4 > b_) # E: numpy.bool_ +reveal_type(i4 > b) # E: numpy.bool_ +reveal_type(i4 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i4 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(u4 > i8) # E: numpy.bool_ +reveal_type(u4 > i4) # E: numpy.bool_ +reveal_type(u4 > u8) # E: numpy.bool_ +reveal_type(u4 > u4) # E: numpy.bool_ +reveal_type(u4 > i) # E: numpy.bool_ +reveal_type(u4 > b_) # E: numpy.bool_ +reveal_type(u4 > b) # E: numpy.bool_ +reveal_type(u4 > AR) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(u4 > SEQ) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(i8 > i4) # E: numpy.bool_ +reveal_type(i4 > i4) # E: numpy.bool_ +reveal_type(i > i4) # E: numpy.bool_ +reveal_type(b_ > i4) # E: numpy.bool_ +reveal_type(b > i4) # E: numpy.bool_ +reveal_type(AR > i4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > i4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] + +reveal_type(i8 > u4) # E: numpy.bool_ +reveal_type(i4 > u4) # E: numpy.bool_ +reveal_type(u8 > u4) # E: numpy.bool_ +reveal_type(u4 > u4) # E: numpy.bool_ +reveal_type(b_ > u4) # E: numpy.bool_ +reveal_type(b > u4) # E: numpy.bool_ +reveal_type(i > u4) # E: numpy.bool_ +reveal_type(AR > u4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(SEQ > u4) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/constants.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..b9ddcb5f5b0a3e8c9da1b6a4fdbb3ce3c998e51c --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/constants.py @@ -0,0 +1,52 @@ +import numpy as np + +reveal_type(np.Inf) # E: float +reveal_type(np.Infinity) # E: float +reveal_type(np.NAN) # E: float +reveal_type(np.NINF) # E: float +reveal_type(np.NZERO) # E: float +reveal_type(np.NaN) # E: float +reveal_type(np.PINF) # E: float +reveal_type(np.PZERO) # E: float +reveal_type(np.e) # E: float +reveal_type(np.euler_gamma) # E: float +reveal_type(np.inf) # E: float +reveal_type(np.infty) # E: float +reveal_type(np.nan) # E: float +reveal_type(np.pi) # E: float + +reveal_type(np.ALLOW_THREADS) # E: int +reveal_type(np.BUFSIZE) # E: int +reveal_type(np.CLIP) # E: int +reveal_type(np.ERR_CALL) # E: int +reveal_type(np.ERR_DEFAULT) # E: int +reveal_type(np.ERR_IGNORE) # E: int +reveal_type(np.ERR_LOG) # E: int +reveal_type(np.ERR_PRINT) # E: int +reveal_type(np.ERR_RAISE) # E: int +reveal_type(np.ERR_WARN) # E: int +reveal_type(np.FLOATING_POINT_SUPPORT) # E: int +reveal_type(np.FPE_DIVIDEBYZERO) # E: int +reveal_type(np.FPE_INVALID) # E: int +reveal_type(np.FPE_OVERFLOW) # E: int +reveal_type(np.FPE_UNDERFLOW) # E: int +reveal_type(np.MAXDIMS) # E: int +reveal_type(np.MAY_SHARE_BOUNDS) # E: int +reveal_type(np.MAY_SHARE_EXACT) # E: int +reveal_type(np.RAISE) # E: int +reveal_type(np.SHIFT_DIVIDEBYZERO) # E: int +reveal_type(np.SHIFT_INVALID) # E: int +reveal_type(np.SHIFT_OVERFLOW) # E: int +reveal_type(np.SHIFT_UNDERFLOW) # E: int +reveal_type(np.UFUNC_BUFSIZE_DEFAULT) # E: int +reveal_type(np.WRAP) # E: int +reveal_type(np.tracemalloc_domain) # E: int + +reveal_type(np.little_endian) # E: bool +reveal_type(np.True_) # E: numpy.bool_ +reveal_type(np.False_) # E: numpy.bool_ + +reveal_type(np.UFUNC_PYVALS_NAME) # E: str + +reveal_type(np.sctypeDict) # E: dict +reveal_type(np.sctypes) # E: TypedDict diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/datasource.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..da7e6f2bb48c818d37b1e9b3432bede5dbc6c2cd --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/datasource.py @@ -0,0 +1,21 @@ +from pathlib import Path +import numpy as np + +path1: Path +path2: str + +d1 = np.DataSource(path1) +d2 = np.DataSource(path2) +d3 = np.DataSource(None) + +reveal_type(d1.abspath("...")) # E: str +reveal_type(d2.abspath("...")) # E: str +reveal_type(d3.abspath("...")) # E: str + +reveal_type(d1.exists("...")) # E: bool +reveal_type(d2.exists("...")) # E: bool +reveal_type(d3.exists("...")) # E: bool + +reveal_type(d1.open("...", "r")) # E: IO[Any] +reveal_type(d2.open("...", encoding="utf8")) # E: IO[Any] +reveal_type(d3.open("...", newline="/n")) # E: IO[Any] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/dtype.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..2e0f8a32ae7fab8686d44615e8d2b0f66a25494a --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/dtype.py @@ -0,0 +1,67 @@ +import ctypes as ct +import numpy as np + +dtype_obj: np.dtype[np.str_] +void_dtype_obj: np.dtype[np.void] + +reveal_type(np.dtype(np.float64)) # E: numpy.dtype[{float64}] +reveal_type(np.dtype(np.int64)) # E: numpy.dtype[{int64}] + +# String aliases +reveal_type(np.dtype("float64")) # E: numpy.dtype[{float64}] +reveal_type(np.dtype("float32")) # E: numpy.dtype[{float32}] +reveal_type(np.dtype("int64")) # E: numpy.dtype[{int64}] +reveal_type(np.dtype("int32")) # E: numpy.dtype[{int32}] +reveal_type(np.dtype("bool")) # E: numpy.dtype[numpy.bool_] +reveal_type(np.dtype("bytes")) # E: numpy.dtype[numpy.bytes_] +reveal_type(np.dtype("str")) # E: numpy.dtype[numpy.str_] + +# Python types +reveal_type(np.dtype(complex)) # E: numpy.dtype[{cdouble}] +reveal_type(np.dtype(float)) # E: numpy.dtype[{double}] +reveal_type(np.dtype(int)) # E: numpy.dtype[{int_}] +reveal_type(np.dtype(bool)) # E: numpy.dtype[numpy.bool_] +reveal_type(np.dtype(str)) # E: numpy.dtype[numpy.str_] +reveal_type(np.dtype(bytes)) # E: numpy.dtype[numpy.bytes_] +reveal_type(np.dtype(object)) # E: numpy.dtype[numpy.object_] + +# ctypes +reveal_type(np.dtype(ct.c_double)) # E: numpy.dtype[{double}] +reveal_type(np.dtype(ct.c_longlong)) # E: numpy.dtype[{longlong}] +reveal_type(np.dtype(ct.c_uint32)) # E: numpy.dtype[{uint32}] +reveal_type(np.dtype(ct.c_bool)) # E: numpy.dtype[numpy.bool_] +reveal_type(np.dtype(ct.c_char)) # E: numpy.dtype[numpy.bytes_] +reveal_type(np.dtype(ct.py_object)) # E: numpy.dtype[numpy.object_] + +# Special case for None +reveal_type(np.dtype(None)) # E: numpy.dtype[{double}] + +# Dtypes of dtypes +reveal_type(np.dtype(np.dtype(np.float64))) # E: numpy.dtype[{float64}] + +# Parameterized dtypes +reveal_type(np.dtype("S8")) # E: numpy.dtype + +# Void +reveal_type(np.dtype(("U", 10))) # E: numpy.dtype[numpy.void] + +# Methods and attributes +reveal_type(dtype_obj.base) # E: numpy.dtype[numpy.str_] +reveal_type(dtype_obj.subdtype) # E: Union[Tuple[numpy.dtype[numpy.str_], builtins.tuple[builtins.int]], None] +reveal_type(dtype_obj.newbyteorder()) # E: numpy.dtype[numpy.str_] +reveal_type(dtype_obj.type) # E: Type[numpy.str_] +reveal_type(dtype_obj.name) # E: str +reveal_type(dtype_obj.names) # E: Union[builtins.tuple[builtins.str], None] + +reveal_type(dtype_obj * 0) # E: None +reveal_type(dtype_obj * 1) # E: numpy.dtype[numpy.str_] +reveal_type(dtype_obj * 2) # E: numpy.dtype[numpy.void] + +reveal_type(0 * dtype_obj) # E: Any +reveal_type(1 * dtype_obj) # E: Any +reveal_type(2 * dtype_obj) # E: Any + +reveal_type(void_dtype_obj["f0"]) # E: numpy.dtype[Any] +reveal_type(void_dtype_obj[0]) # E: numpy.dtype[Any] +reveal_type(void_dtype_obj[["f0", "f1"]]) # E: numpy.dtype[numpy.void] +reveal_type(void_dtype_obj[["f0"]]) # E: numpy.dtype[numpy.void] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/einsumfunc.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/einsumfunc.py new file mode 100644 index 0000000000000000000000000000000000000000..e02aacb259de138039540f705f002ebfe523c921 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/einsumfunc.py @@ -0,0 +1,32 @@ +from typing import List, Any +import numpy as np + +AR_LIKE_b: List[bool] +AR_LIKE_u: List[np.uint32] +AR_LIKE_i: List[int] +AR_LIKE_f: List[float] +AR_LIKE_c: List[complex] +AR_LIKE_U: List[str] + +OUT_f: np.ndarray[Any, np.dtype[np.float64]] + +reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Any +reveal_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Any + +reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f)) # E: numpy.ndarray[Any, numpy.dtype[{float64}] +reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f)) # E: numpy.ndarray[Any, numpy.dtype[{float64}] +reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")) # E: Any +reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe")) # E: Any + +reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str] +reveal_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Tuple[builtins.list[Any], builtins.str] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/flatiter.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/flatiter.py new file mode 100644 index 0000000000000000000000000000000000000000..628362425a29c683da6179a69be79c4249dc1c9e --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/flatiter.py @@ -0,0 +1,17 @@ +from typing import Any +import numpy as np + +a: np.flatiter[np.ndarray[Any, np.dtype[np.str_]]] + +reveal_type(a.base) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a.copy()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a.coords) # E: tuple[builtins.int] +reveal_type(a.index) # E: int +reveal_type(iter(a)) # E: Iterator[numpy.str_] +reveal_type(next(a)) # E: numpy.str_ +reveal_type(a[0]) # E: numpy.str_ +reveal_type(a[[0, 1, 2]]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a[...]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a[:]) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a.__array__()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(a.__array__(np.dtype(np.float64))) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/fromnumeric.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/fromnumeric.py new file mode 100644 index 0000000000000000000000000000000000000000..a43ed27cc1ccda706e4068570311ac85f6611a8f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/fromnumeric.py @@ -0,0 +1,264 @@ +"""Tests for :mod:`numpy.core.fromnumeric`.""" + +import numpy as np + +A = np.array(True, ndmin=2, dtype=bool) +B = np.array(1.0, ndmin=2, dtype=np.float32) +A.setflags(write=False) +B.setflags(write=False) + +a = np.bool_(True) +b = np.float32(1.0) +c = 1.0 +d = np.array(1.0, dtype=np.float32) # writeable + +reveal_type(np.take(a, 0)) # E: Any +reveal_type(np.take(b, 0)) # E: Any +reveal_type(np.take(c, 0)) # E: Any +reveal_type(np.take(A, 0)) # E: Any +reveal_type(np.take(B, 0)) # E: Any +reveal_type(np.take(A, [0])) # E: Any +reveal_type(np.take(B, [0])) # E: Any + +reveal_type(np.reshape(a, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.reshape(b, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.reshape(c, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.reshape(A, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.reshape(B, 1)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.choose(a, [True, True])) # E: Any +reveal_type(np.choose(A, [True, True])) # E: Any + +reveal_type(np.repeat(a, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.repeat(b, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.repeat(c, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.repeat(A, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.repeat(B, 1)) # E: numpy.ndarray[Any, Any] + +# TODO: Add tests for np.put() + +reveal_type(np.swapaxes(A, 0, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.swapaxes(B, 0, 0)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.transpose(a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.transpose(b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.transpose(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.transpose(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.transpose(B)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.partition(a, 0, axis=None)) # E: numpy.ndarray[Any, Any] +reveal_type(np.partition(b, 0, axis=None)) # E: numpy.ndarray[Any, Any] +reveal_type(np.partition(c, 0, axis=None)) # E: numpy.ndarray[Any, Any] +reveal_type(np.partition(A, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.partition(B, 0)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.argpartition(a, 0)) # E: Any +reveal_type(np.argpartition(b, 0)) # E: Any +reveal_type(np.argpartition(c, 0)) # E: Any +reveal_type(np.argpartition(A, 0)) # E: Any +reveal_type(np.argpartition(B, 0)) # E: Any + +reveal_type(np.sort(A, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.sort(B, 0)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.argsort(A, 0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.argsort(B, 0)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.argmax(A)) # E: {intp} +reveal_type(np.argmax(B)) # E: {intp} +reveal_type(np.argmax(A, axis=0)) # E: Any +reveal_type(np.argmax(B, axis=0)) # E: Any + +reveal_type(np.argmin(A)) # E: {intp} +reveal_type(np.argmin(B)) # E: {intp} +reveal_type(np.argmin(A, axis=0)) # E: Any +reveal_type(np.argmin(B, axis=0)) # E: Any + +reveal_type(np.searchsorted(A[0], 0)) # E: {intp} +reveal_type(np.searchsorted(B[0], 0)) # E: {intp} +reveal_type(np.searchsorted(A[0], [0])) # E: numpy.ndarray[Any, Any] +reveal_type(np.searchsorted(B[0], [0])) # E: numpy.ndarray[Any, Any] + +reveal_type(np.resize(a, (5, 5))) # E: numpy.ndarray[Any, Any] +reveal_type(np.resize(b, (5, 5))) # E: numpy.ndarray[Any, Any] +reveal_type(np.resize(c, (5, 5))) # E: numpy.ndarray[Any, Any] +reveal_type(np.resize(A, (5, 5))) # E: numpy.ndarray[Any, Any] +reveal_type(np.resize(B, (5, 5))) # E: numpy.ndarray[Any, Any] + +reveal_type(np.squeeze(a)) # E: numpy.bool_ +reveal_type(np.squeeze(b)) # E: {float32} +reveal_type(np.squeeze(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.squeeze(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.squeeze(B)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.diagonal(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.diagonal(B)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.trace(A)) # E: Any +reveal_type(np.trace(B)) # E: Any + +reveal_type(np.ravel(a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ravel(b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ravel(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ravel(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.ravel(B)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.nonzero(a)) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(np.nonzero(b)) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(np.nonzero(c)) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(np.nonzero(A)) # E: tuple[numpy.ndarray[Any, Any]] +reveal_type(np.nonzero(B)) # E: tuple[numpy.ndarray[Any, Any]] + +reveal_type(np.shape(a)) # E: tuple[builtins.int] +reveal_type(np.shape(b)) # E: tuple[builtins.int] +reveal_type(np.shape(c)) # E: tuple[builtins.int] +reveal_type(np.shape(A)) # E: tuple[builtins.int] +reveal_type(np.shape(B)) # E: tuple[builtins.int] + +reveal_type(np.compress([True], a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.compress([True], b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.compress([True], c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.compress([True], A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.compress([True], B)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.clip(a, 0, 1.0)) # E: Any +reveal_type(np.clip(b, -1, 1)) # E: Any +reveal_type(np.clip(c, 0, 1)) # E: Any +reveal_type(np.clip(A, 0, 1)) # E: Any +reveal_type(np.clip(B, 0, 1)) # E: Any + +reveal_type(np.sum(a)) # E: Any +reveal_type(np.sum(b)) # E: Any +reveal_type(np.sum(c)) # E: Any +reveal_type(np.sum(A)) # E: Any +reveal_type(np.sum(B)) # E: Any +reveal_type(np.sum(A, axis=0)) # E: Any +reveal_type(np.sum(B, axis=0)) # E: Any + +reveal_type(np.all(a)) # E: numpy.bool_ +reveal_type(np.all(b)) # E: numpy.bool_ +reveal_type(np.all(c)) # E: numpy.bool_ +reveal_type(np.all(A)) # E: numpy.bool_ +reveal_type(np.all(B)) # E: numpy.bool_ +reveal_type(np.all(A, axis=0)) # E: Any +reveal_type(np.all(B, axis=0)) # E: Any +reveal_type(np.all(A, keepdims=True)) # E: Any +reveal_type(np.all(B, keepdims=True)) # E: Any + +reveal_type(np.any(a)) # E: numpy.bool_ +reveal_type(np.any(b)) # E: numpy.bool_ +reveal_type(np.any(c)) # E: numpy.bool_ +reveal_type(np.any(A)) # E: numpy.bool_ +reveal_type(np.any(B)) # E: numpy.bool_ +reveal_type(np.any(A, axis=0)) # E: Any +reveal_type(np.any(B, axis=0)) # E: Any +reveal_type(np.any(A, keepdims=True)) # E: Any +reveal_type(np.any(B, keepdims=True)) # E: Any + +reveal_type(np.cumsum(a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumsum(b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumsum(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumsum(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumsum(B)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.ptp(a)) # E: Any +reveal_type(np.ptp(b)) # E: Any +reveal_type(np.ptp(c)) # E: Any +reveal_type(np.ptp(A)) # E: Any +reveal_type(np.ptp(B)) # E: Any +reveal_type(np.ptp(A, axis=0)) # E: Any +reveal_type(np.ptp(B, axis=0)) # E: Any +reveal_type(np.ptp(A, keepdims=True)) # E: Any +reveal_type(np.ptp(B, keepdims=True)) # E: Any + +reveal_type(np.amax(a)) # E: Any +reveal_type(np.amax(b)) # E: Any +reveal_type(np.amax(c)) # E: Any +reveal_type(np.amax(A)) # E: Any +reveal_type(np.amax(B)) # E: Any +reveal_type(np.amax(A, axis=0)) # E: Any +reveal_type(np.amax(B, axis=0)) # E: Any +reveal_type(np.amax(A, keepdims=True)) # E: Any +reveal_type(np.amax(B, keepdims=True)) # E: Any + +reveal_type(np.amin(a)) # E: Any +reveal_type(np.amin(b)) # E: Any +reveal_type(np.amin(c)) # E: Any +reveal_type(np.amin(A)) # E: Any +reveal_type(np.amin(B)) # E: Any +reveal_type(np.amin(A, axis=0)) # E: Any +reveal_type(np.amin(B, axis=0)) # E: Any +reveal_type(np.amin(A, keepdims=True)) # E: Any +reveal_type(np.amin(B, keepdims=True)) # E: Any + +reveal_type(np.prod(a)) # E: Any +reveal_type(np.prod(b)) # E: Any +reveal_type(np.prod(c)) # E: Any +reveal_type(np.prod(A)) # E: Any +reveal_type(np.prod(B)) # E: Any +reveal_type(np.prod(A, axis=0)) # E: Any +reveal_type(np.prod(B, axis=0)) # E: Any +reveal_type(np.prod(A, keepdims=True)) # E: Any +reveal_type(np.prod(B, keepdims=True)) # E: Any +reveal_type(np.prod(b, out=d)) # E: Any +reveal_type(np.prod(B, out=d)) # E: Any + +reveal_type(np.cumprod(a)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumprod(b)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumprod(c)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumprod(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cumprod(B)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.ndim(a)) # E: int +reveal_type(np.ndim(b)) # E: int +reveal_type(np.ndim(c)) # E: int +reveal_type(np.ndim(A)) # E: int +reveal_type(np.ndim(B)) # E: int + +reveal_type(np.size(a)) # E: int +reveal_type(np.size(b)) # E: int +reveal_type(np.size(c)) # E: int +reveal_type(np.size(A)) # E: int +reveal_type(np.size(B)) # E: int + +reveal_type(np.around(a)) # E: Any +reveal_type(np.around(b)) # E: Any +reveal_type(np.around(c)) # E: Any +reveal_type(np.around(A)) # E: Any +reveal_type(np.around(B)) # E: Any + +reveal_type(np.mean(a)) # E: Any +reveal_type(np.mean(b)) # E: Any +reveal_type(np.mean(c)) # E: Any +reveal_type(np.mean(A)) # E: Any +reveal_type(np.mean(B)) # E: Any +reveal_type(np.mean(A, axis=0)) # E: Any +reveal_type(np.mean(B, axis=0)) # E: Any +reveal_type(np.mean(A, keepdims=True)) # E: Any +reveal_type(np.mean(B, keepdims=True)) # E: Any +reveal_type(np.mean(b, out=d)) # E: Any +reveal_type(np.mean(B, out=d)) # E: Any + +reveal_type(np.std(a)) # E: Any +reveal_type(np.std(b)) # E: Any +reveal_type(np.std(c)) # E: Any +reveal_type(np.std(A)) # E: Any +reveal_type(np.std(B)) # E: Any +reveal_type(np.std(A, axis=0)) # E: Any +reveal_type(np.std(B, axis=0)) # E: Any +reveal_type(np.std(A, keepdims=True)) # E: Any +reveal_type(np.std(B, keepdims=True)) # E: Any +reveal_type(np.std(b, out=d)) # E: Any +reveal_type(np.std(B, out=d)) # E: Any + +reveal_type(np.var(a)) # E: Any +reveal_type(np.var(b)) # E: Any +reveal_type(np.var(c)) # E: Any +reveal_type(np.var(A)) # E: Any +reveal_type(np.var(B)) # E: Any +reveal_type(np.var(A, axis=0)) # E: Any +reveal_type(np.var(B, axis=0)) # E: Any +reveal_type(np.var(A, keepdims=True)) # E: Any +reveal_type(np.var(B, keepdims=True)) # E: Any +reveal_type(np.var(b, out=d)) # E: Any +reveal_type(np.var(B, out=d)) # E: Any diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/index_tricks.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/index_tricks.py new file mode 100644 index 0000000000000000000000000000000000000000..4ef5b39e72f401c6a2cf419f09507ce6ef7e1908 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/index_tricks.py @@ -0,0 +1,64 @@ +from typing import Any, List +import numpy as np + +AR_LIKE_b: List[bool] +AR_LIKE_i: List[int] +AR_LIKE_f: List[float] +AR_LIKE_U: List[str] + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] + +reveal_type(np.ndenumerate(AR_i8)) # E: numpy.ndenumerate[{int64}] +reveal_type(np.ndenumerate(AR_LIKE_f)) # E: numpy.ndenumerate[{double}] +reveal_type(np.ndenumerate(AR_LIKE_U)) # E: numpy.ndenumerate[numpy.str_] + +reveal_type(np.ndenumerate(AR_i8).iter) # E: numpy.flatiter[numpy.ndarray[Any, numpy.dtype[{int64}]]] +reveal_type(np.ndenumerate(AR_LIKE_f).iter) # E: numpy.flatiter[numpy.ndarray[Any, numpy.dtype[{double}]]] +reveal_type(np.ndenumerate(AR_LIKE_U).iter) # E: numpy.flatiter[numpy.ndarray[Any, numpy.dtype[numpy.str_]]] + +reveal_type(next(np.ndenumerate(AR_i8))) # E: Tuple[builtins.tuple[builtins.int], {int64}] +reveal_type(next(np.ndenumerate(AR_LIKE_f))) # E: Tuple[builtins.tuple[builtins.int], {double}] +reveal_type(next(np.ndenumerate(AR_LIKE_U))) # E: Tuple[builtins.tuple[builtins.int], numpy.str_] + +reveal_type(iter(np.ndenumerate(AR_i8))) # E: Iterator[Tuple[builtins.tuple[builtins.int], {int64}]] +reveal_type(iter(np.ndenumerate(AR_LIKE_f))) # E: Iterator[Tuple[builtins.tuple[builtins.int], {double}]] +reveal_type(iter(np.ndenumerate(AR_LIKE_U))) # E: Iterator[Tuple[builtins.tuple[builtins.int], numpy.str_]] + +reveal_type(iter(np.ndindex(1, 2, 3))) # E: Iterator[builtins.tuple[builtins.int]] +reveal_type(next(np.ndindex(1, 2, 3))) # E: builtins.tuple[builtins.int] + +reveal_type(np.unravel_index([22, 41, 37], (7, 6))) # E: tuple[numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unravel_index([31, 41, 13], (7, 6), order="F")) # E: tuple[numpy.ndarray[Any, numpy.dtype[{intp}]]] +reveal_type(np.unravel_index(1621, (6, 7, 8, 9))) # E: tuple[{intp}] + +reveal_type(np.ravel_multi_index([[1]], (7, 6))) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] +reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6))) # E: {intp} +reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F")) # E: {intp} +reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip")) # E: {intp} +reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap"))) # E: {intp} +reveal_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))) # E: {intp} + +reveal_type(np.mgrid[1:1:2]) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.mgrid[1:1:2, None:10]) # E: numpy.ndarray[Any, numpy.dtype[Any]] + +reveal_type(np.ogrid[1:1:2]) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(np.ogrid[1:1:2, None:10]) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(np.index_exp[0:1]) # E: Tuple[builtins.slice] +reveal_type(np.index_exp[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice] +reveal_type(np.index_exp[0, 0:1, ..., [0, 1, 3]]) # E: Tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] + +reveal_type(np.s_[0:1]) # E: builtins.slice +reveal_type(np.s_[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice] +reveal_type(np.s_[0, 0:1, ..., [0, 1, 3]]) # E: Tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]] + +reveal_type(np.ix_(AR_LIKE_b)) # E: tuple[numpy.ndarray[Any, numpy.dtype[numpy.bool_]]] +reveal_type(np.ix_(AR_LIKE_i, AR_LIKE_f)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{double}]]] +reveal_type(np.ix_(AR_i8)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int64}]]] + +reveal_type(np.fill_diagonal(AR_i8, 5)) # E: None + +reveal_type(np.diag_indices(4)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int_}]]] +reveal_type(np.diag_indices(2, 3)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int_}]]] + +reveal_type(np.diag_indices_from(AR_i8)) # E: tuple[numpy.ndarray[Any, numpy.dtype[{int_}]]] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/lib_utils.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/lib_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..eeff0e3ca78e643e2b500390d13c8c721cd7d604 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/lib_utils.py @@ -0,0 +1,30 @@ +from io import StringIO +from typing import Any, Dict + +import numpy as np + +AR: np.ndarray[Any, np.dtype[np.float64]] +AR_DICT: Dict[str, np.ndarray[Any, np.dtype[np.float64]]] +FILE: StringIO + +def func(a: int) -> bool: ... + +reveal_type(np.deprecate(func)) # E: def (a: builtins.int) -> builtins.bool +reveal_type(np.deprecate()) # E: _Deprecate + +reveal_type(np.deprecate_with_doc("test")) # E: _Deprecate +reveal_type(np.deprecate_with_doc(None)) # E: _Deprecate + +reveal_type(np.byte_bounds(AR)) # E: Tuple[builtins.int, builtins.int] +reveal_type(np.byte_bounds(np.float64())) # E: Tuple[builtins.int, builtins.int] + +reveal_type(np.who(None)) # E: None +reveal_type(np.who(AR_DICT)) # E: None + +reveal_type(np.info(1, output=FILE)) # E: None + +reveal_type(np.source(np.interp, output=FILE)) # E: None + +reveal_type(np.lookfor("binary representation", output=FILE)) # E: None + +reveal_type(np.safe_eval("1 + 1")) # E: Any diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/lib_version.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/lib_version.py new file mode 100644 index 0000000000000000000000000000000000000000..0c6450d970606f0dc18df8fe8dacb6bdf96e6341 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/lib_version.py @@ -0,0 +1,18 @@ +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +reveal_type(version.vstring) # E: str +reveal_type(version.version) # E: str +reveal_type(version.major) # E: int +reveal_type(version.minor) # E: int +reveal_type(version.bugfix) # E: int +reveal_type(version.pre_release) # E: str +reveal_type(version.is_devversion) # E: bool + +reveal_type(version == version) # E: bool +reveal_type(version != version) # E: bool +reveal_type(version < "1.8.0") # E: bool +reveal_type(version <= version) # E: bool +reveal_type(version > version) # E: bool +reveal_type(version >= "1.8.0") # E: bool diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/mod.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/mod.py new file mode 100644 index 0000000000000000000000000000000000000000..c352b5f14006d4d55daa19cdcdb4ca330343d5ae --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/mod.py @@ -0,0 +1,147 @@ +from typing import Any +import numpy as np + +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +td = np.timedelta64(0, "D") +b_ = np.bool_() + +b = bool() +f = float() +i = int() + +AR_b: np.ndarray[Any, np.dtype[np.bool_]] +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] + +# Time structures + +reveal_type(td % td) # E: numpy.timedelta64 +reveal_type(AR_m % td) # E: Any +reveal_type(td % AR_m) # E: Any + +reveal_type(divmod(td, td)) # E: Tuple[{int64}, numpy.timedelta64] +reveal_type(divmod(AR_m, td)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] +reveal_type(divmod(td, AR_m)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]] + +# Bool + +reveal_type(b_ % b) # E: {int8} +reveal_type(b_ % i) # E: {int_} +reveal_type(b_ % f) # E: {float64} +reveal_type(b_ % b_) # E: {int8} +reveal_type(b_ % i8) # E: {int64} +reveal_type(b_ % u8) # E: {uint64} +reveal_type(b_ % f8) # E: {float64} +reveal_type(b_ % AR_b) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] + +reveal_type(divmod(b_, b)) # E: Tuple[{int8}, {int8}] +reveal_type(divmod(b_, i)) # E: Tuple[{int_}, {int_}] +reveal_type(divmod(b_, f)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}] +reveal_type(divmod(b_, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(b_, u8)) # E: Tuple[{uint64}, {uint64}] +reveal_type(divmod(b_, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(b_, AR_b)) # E: numpy.ndarray[Any, numpy.dtype[{int8}]], numpy.ndarray[Any, numpy.dtype[{int8}]]] + +reveal_type(b % b_) # E: {int8} +reveal_type(i % b_) # E: {int_} +reveal_type(f % b_) # E: {float64} +reveal_type(b_ % b_) # E: {int8} +reveal_type(i8 % b_) # E: {int64} +reveal_type(u8 % b_) # E: {uint64} +reveal_type(f8 % b_) # E: {float64} +reveal_type(AR_b % b_) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] + +reveal_type(divmod(b, b_)) # E: Tuple[{int8}, {int8}] +reveal_type(divmod(i, b_)) # E: Tuple[{int_}, {int_}] +reveal_type(divmod(f, b_)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}] +reveal_type(divmod(i8, b_)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(u8, b_)) # E: Tuple[{uint64}, {uint64}] +reveal_type(divmod(f8, b_)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(AR_b, b_)) # E: numpy.ndarray[Any, numpy.dtype[{int8}]], numpy.ndarray[Any, numpy.dtype[{int8}]]] + +# int + +reveal_type(i8 % b) # E: {int64} +reveal_type(i8 % i) # E: {int64} +reveal_type(i8 % f) # E: {float64} +reveal_type(i8 % i8) # E: {int64} +reveal_type(i8 % f8) # E: {float64} +reveal_type(i4 % i8) # E: {int64} +reveal_type(i4 % f8) # E: {float64} +reveal_type(i4 % i4) # E: {int32} +reveal_type(i4 % f4) # E: {float32} +reveal_type(i8 % AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] + +reveal_type(divmod(i8, b)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i8, i)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i8, f)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i8, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i8, i4)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i8, f4)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] +reveal_type(divmod(i4, f4)) # E: Tuple[{float32}, {float32}] +reveal_type(divmod(i8, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] + +reveal_type(b % i8) # E: {int64} +reveal_type(i % i8) # E: {int64} +reveal_type(f % i8) # E: {float64} +reveal_type(i8 % i8) # E: {int64} +reveal_type(f8 % i8) # E: {float64} +reveal_type(i8 % i4) # E: {int64} +reveal_type(f8 % i4) # E: {float64} +reveal_type(i4 % i4) # E: {int32} +reveal_type(f4 % i4) # E: {float32} +reveal_type(AR_b % i8) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] + +reveal_type(divmod(b, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(i, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(f, i8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(f8, i8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i4, i8)) # E: Tuple[{int64}, {int64}] +reveal_type(divmod(f4, i8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}] +reveal_type(divmod(f4, i4)) # E: Tuple[{float32}, {float32}] +reveal_type(divmod(AR_b, i8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]] + +# float + +reveal_type(f8 % b) # E: {float64} +reveal_type(f8 % i) # E: {float64} +reveal_type(f8 % f) # E: {float64} +reveal_type(i8 % f4) # E: {float64} +reveal_type(f4 % f4) # E: {float32} +reveal_type(f8 % AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] + +reveal_type(divmod(f8, b)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, i)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, f)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, f4)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] +reveal_type(divmod(f8, AR_b)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] + +reveal_type(b % f8) # E: {float64} +reveal_type(i % f8) # E: {float64} +reveal_type(f % f8) # E: {float64} +reveal_type(f8 % f8) # E: {float64} +reveal_type(f8 % f8) # E: {float64} +reveal_type(f4 % f4) # E: {float32} +reveal_type(AR_b % f8) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] + +reveal_type(divmod(b, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(i, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f4, f8)) # E: Tuple[{float64}, {float64}] +reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}] +reveal_type(divmod(AR_b, f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/modules.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..45ae575f8b0ce06a8ba88a65f2ba0fa251fd891d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/modules.py @@ -0,0 +1,48 @@ +import numpy as np +from numpy import f2py + +reveal_type(np) # E: ModuleType + +reveal_type(np.char) # E: ModuleType +reveal_type(np.ctypeslib) # E: ModuleType +reveal_type(np.emath) # E: ModuleType +reveal_type(np.fft) # E: ModuleType +reveal_type(np.lib) # E: ModuleType +reveal_type(np.linalg) # E: ModuleType +reveal_type(np.ma) # E: ModuleType +reveal_type(np.matrixlib) # E: ModuleType +reveal_type(np.polynomial) # E: ModuleType +reveal_type(np.random) # E: ModuleType +reveal_type(np.rec) # E: ModuleType +reveal_type(np.testing) # E: ModuleType +reveal_type(np.version) # E: ModuleType + +reveal_type(np.lib.format) # E: ModuleType +reveal_type(np.lib.mixins) # E: ModuleType +reveal_type(np.lib.scimath) # E: ModuleType +reveal_type(np.lib.stride_tricks) # E: ModuleType +reveal_type(np.ma.extras) # E: ModuleType +reveal_type(np.polynomial.chebyshev) # E: ModuleType +reveal_type(np.polynomial.hermite) # E: ModuleType +reveal_type(np.polynomial.hermite_e) # E: ModuleType +reveal_type(np.polynomial.laguerre) # E: ModuleType +reveal_type(np.polynomial.legendre) # E: ModuleType +reveal_type(np.polynomial.polynomial) # E: ModuleType + +# TODO: Remove when annotations have been added to `np.testing.assert_equal` +reveal_type(np.testing.assert_equal) # E: Any + +reveal_type(np.__path__) # E: list[builtins.str] +reveal_type(np.__version__) # E: str +reveal_type(np.__git_version__) # E: str + +reveal_type(np.__all__) # E: list[builtins.str] +reveal_type(np.char.__all__) # E: list[builtins.str] +reveal_type(np.ctypeslib.__all__) # E: list[builtins.str] +reveal_type(np.emath.__all__) # E: list[builtins.str] +reveal_type(np.lib.__all__) # E: list[builtins.str] +reveal_type(np.ma.__all__) # E: list[builtins.str] +reveal_type(np.random.__all__) # E: list[builtins.str] +reveal_type(np.rec.__all__) # E: list[builtins.str] +reveal_type(np.testing.__all__) # E: list[builtins.str] +reveal_type(f2py.__all__) # E: list[builtins.str] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/multiarray.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/multiarray.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4e07a16c6fdd66c7e3a0a529034005d92e48b3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/multiarray.py @@ -0,0 +1,35 @@ +from typing import Any +import numpy as np + +AR_f8: np.ndarray[Any, np.dtype[np.float64]] +AR_i8: np.ndarray[Any, np.dtype[np.int64]] + +b_f8 = np.broadcast(AR_f8) +b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) + +reveal_type(next(b_f8)) # E: tuple[Any] +reveal_type(next(b_i8_f8_f8)) # E: tuple[Any] + +reveal_type(b_f8.reset()) # E: None +reveal_type(b_i8_f8_f8.reset()) # E: None + +reveal_type(b_f8.index) # E: int +reveal_type(b_i8_f8_f8.index) # E: int + +reveal_type(b_f8.iters) # E: tuple[numpy.flatiter[Any]] +reveal_type(b_i8_f8_f8.iters) # E: tuple[numpy.flatiter[Any]] + +reveal_type(b_f8.nd) # E: int +reveal_type(b_i8_f8_f8.nd) # E: int + +reveal_type(b_f8.ndim) # E: int +reveal_type(b_i8_f8_f8.ndim) # E: int + +reveal_type(b_f8.numiter) # E: int +reveal_type(b_i8_f8_f8.numiter) # E: int + +reveal_type(b_f8.shape) # E: tuple[builtins.int] +reveal_type(b_i8_f8_f8.shape) # E: tuple[builtins.int] + +reveal_type(b_f8.size) # E: int +reveal_type(b_i8_f8_f8.size) # E: int diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0a726f5efe9d7d786973e54bb62d2f7ae7f2f4 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.py @@ -0,0 +1,19 @@ +from typing import TypeVar, Union +import numpy as np +import numpy.typing as npt + +T1 = TypeVar("T1", bound=npt.NBitBase) +T2 = TypeVar("T2", bound=npt.NBitBase) + +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[Union[T1, T2]]: + return a + b + +i8: np.int64 +i4: np.int32 +f8: np.float64 +f4: np.float32 + +reveal_type(add(f8, i8)) # E: {float64} +reveal_type(add(f4, i8)) # E: {float64} +reveal_type(add(f8, i4)) # E: {float64} +reveal_type(add(f4, i4)) # E: {float32} diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..9a6da656f3ee19a39c8648d35672c0cfcd36d25d --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.py @@ -0,0 +1,54 @@ +import numpy as np + +nd = np.array([[1, 2], [3, 4]]) + +# item +reveal_type(nd.item()) # E: Any +reveal_type(nd.item(1)) # E: Any +reveal_type(nd.item(0, 1)) # E: Any +reveal_type(nd.item((0, 1))) # E: Any + +# tolist +reveal_type(nd.tolist()) # E: Any + +# itemset does not return a value +# tostring is pretty simple +# tobytes is pretty simple +# tofile does not return a value +# dump does not return a value +# dumps is pretty simple + +# astype +reveal_type(nd.astype("float")) # E: numpy.ndarray +reveal_type(nd.astype(float)) # E: numpy.ndarray +reveal_type(nd.astype(float, "K")) # E: numpy.ndarray +reveal_type(nd.astype(float, "K", "unsafe")) # E: numpy.ndarray +reveal_type(nd.astype(float, "K", "unsafe", True)) # E: numpy.ndarray +reveal_type(nd.astype(float, "K", "unsafe", True, True)) # E: numpy.ndarray + +# byteswap +reveal_type(nd.byteswap()) # E: numpy.ndarray +reveal_type(nd.byteswap(True)) # E: numpy.ndarray + +# copy +reveal_type(nd.copy()) # E: numpy.ndarray +reveal_type(nd.copy("C")) # E: numpy.ndarray + +# view +class SubArray(np.ndarray): + pass + + +reveal_type(nd.view()) # E: numpy.ndarray +reveal_type(nd.view(np.int64)) # E: numpy.ndarray +# replace `Any` with `numpy.matrix` when `matrix` will be added to stubs +reveal_type(nd.view(np.int64, np.matrix)) # E: Any +reveal_type(nd.view(np.int64, SubArray)) # E: SubArray + +# getfield +reveal_type(nd.getfield("float")) # E: numpy.ndarray +reveal_type(nd.getfield(float)) # E: numpy.ndarray +reveal_type(nd.getfield(float, 8)) # E: numpy.ndarray + +# setflags does not return a value +# fill does not return a value diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..111523bd310d2c1e0f60d38496c831f1e4dcee8f --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.py @@ -0,0 +1,191 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +import operator +import ctypes as ct +from typing import Any + +import numpy as np + +class SubClass(np.ndarray): ... + +f8: np.float64 +B: SubClass +AR_f8: np.ndarray[Any, np.dtype[np.float64]] +AR_i8: np.ndarray[Any, np.dtype[np.int64]] +AR_U: np.ndarray[Any, np.dtype[np.str_]] + +ctypes_obj = AR_f8.ctypes + +reveal_type(ctypes_obj.data) # E: int +reveal_type(ctypes_obj.shape) # E: ctypes.Array[ctypes.c_int64] +reveal_type(ctypes_obj.strides) # E: ctypes.Array[ctypes.c_int64] +reveal_type(ctypes_obj._as_parameter_) # E: ctypes.c_void_p + +reveal_type(ctypes_obj.data_as(ct.c_void_p)) # E: ctypes.c_void_p +reveal_type(ctypes_obj.shape_as(ct.c_longlong)) # E: ctypes.Array[ctypes.c_longlong] +reveal_type(ctypes_obj.strides_as(ct.c_ubyte)) # E: ctypes.Array[ctypes.c_ubyte] + +reveal_type(f8.all()) # E: numpy.bool_ +reveal_type(AR_f8.all()) # E: numpy.bool_ +reveal_type(AR_f8.all(axis=0)) # E: Any +reveal_type(AR_f8.all(keepdims=True)) # E: Any +reveal_type(AR_f8.all(out=B)) # E: SubClass + +reveal_type(f8.any()) # E: numpy.bool_ +reveal_type(AR_f8.any()) # E: numpy.bool_ +reveal_type(AR_f8.any(axis=0)) # E: Any +reveal_type(AR_f8.any(keepdims=True)) # E: Any +reveal_type(AR_f8.any(out=B)) # E: SubClass + +reveal_type(f8.argmax()) # E: {intp} +reveal_type(AR_f8.argmax()) # E: {intp} +reveal_type(AR_f8.argmax(axis=0)) # E: Any +reveal_type(AR_f8.argmax(out=B)) # E: SubClass + +reveal_type(f8.argmin()) # E: {intp} +reveal_type(AR_f8.argmin()) # E: {intp} +reveal_type(AR_f8.argmin(axis=0)) # E: Any +reveal_type(AR_f8.argmin(out=B)) # E: SubClass + +reveal_type(f8.argsort()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.argsort()) # E: numpy.ndarray[Any, Any] + +reveal_type(f8.astype(np.int64).choose([()])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.choose([0])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.choose([0], out=B)) # E: SubClass + +reveal_type(f8.clip(1)) # E: Any +reveal_type(AR_f8.clip(1)) # E: Any +reveal_type(AR_f8.clip(None, 1)) # E: Any +reveal_type(AR_f8.clip(1, out=B)) # E: SubClass +reveal_type(AR_f8.clip(None, 1, out=B)) # E: SubClass + +reveal_type(f8.compress([0])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.compress([0])) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.compress([0], out=B)) # E: SubClass + +reveal_type(f8.conj()) # E: {float64} +reveal_type(AR_f8.conj()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(B.conj()) # E: SubClass + +reveal_type(f8.conjugate()) # E: {float64} +reveal_type(AR_f8.conjugate()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(B.conjugate()) # E: SubClass + +reveal_type(f8.cumprod()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.cumprod()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.cumprod(out=B)) # E: SubClass + +reveal_type(f8.cumsum()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.cumsum()) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.cumsum(out=B)) # E: SubClass + +reveal_type(f8.max()) # E: Any +reveal_type(AR_f8.max()) # E: Any +reveal_type(AR_f8.max(axis=0)) # E: Any +reveal_type(AR_f8.max(keepdims=True)) # E: Any +reveal_type(AR_f8.max(out=B)) # E: SubClass + +reveal_type(f8.mean()) # E: Any +reveal_type(AR_f8.mean()) # E: Any +reveal_type(AR_f8.mean(axis=0)) # E: Any +reveal_type(AR_f8.mean(keepdims=True)) # E: Any +reveal_type(AR_f8.mean(out=B)) # E: SubClass + +reveal_type(f8.min()) # E: Any +reveal_type(AR_f8.min()) # E: Any +reveal_type(AR_f8.min(axis=0)) # E: Any +reveal_type(AR_f8.min(keepdims=True)) # E: Any +reveal_type(AR_f8.min(out=B)) # E: SubClass + +reveal_type(f8.newbyteorder()) # E: {float64} +reveal_type(AR_f8.newbyteorder()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(B.newbyteorder('|')) # E: SubClass + +reveal_type(f8.prod()) # E: Any +reveal_type(AR_f8.prod()) # E: Any +reveal_type(AR_f8.prod(axis=0)) # E: Any +reveal_type(AR_f8.prod(keepdims=True)) # E: Any +reveal_type(AR_f8.prod(out=B)) # E: SubClass + +reveal_type(f8.ptp()) # E: Any +reveal_type(AR_f8.ptp()) # E: Any +reveal_type(AR_f8.ptp(axis=0)) # E: Any +reveal_type(AR_f8.ptp(keepdims=True)) # E: Any +reveal_type(AR_f8.ptp(out=B)) # E: SubClass + +reveal_type(f8.round()) # E: {float64} +reveal_type(AR_f8.round()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_f8.round(out=B)) # E: SubClass + +reveal_type(f8.repeat(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_f8.repeat(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(B.repeat(1)) # E: numpy.ndarray[Any, Any] + +reveal_type(f8.std()) # E: Any +reveal_type(AR_f8.std()) # E: Any +reveal_type(AR_f8.std(axis=0)) # E: Any +reveal_type(AR_f8.std(keepdims=True)) # E: Any +reveal_type(AR_f8.std(out=B)) # E: SubClass + +reveal_type(f8.sum()) # E: Any +reveal_type(AR_f8.sum()) # E: Any +reveal_type(AR_f8.sum(axis=0)) # E: Any +reveal_type(AR_f8.sum(keepdims=True)) # E: Any +reveal_type(AR_f8.sum(out=B)) # E: SubClass + +reveal_type(f8.take(0)) # E: {float64} +reveal_type(AR_f8.take(0)) # E: {float64} +reveal_type(AR_f8.take([0])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_f8.take(0, out=B)) # E: SubClass +reveal_type(AR_f8.take([0], out=B)) # E: SubClass + +reveal_type(f8.var()) # E: Any +reveal_type(AR_f8.var()) # E: Any +reveal_type(AR_f8.var(axis=0)) # E: Any +reveal_type(AR_f8.var(keepdims=True)) # E: Any +reveal_type(AR_f8.var(out=B)) # E: SubClass + +reveal_type(AR_f8.argpartition([0])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] + +reveal_type(AR_f8.diagonal()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] + +reveal_type(AR_f8.dot(1)) # E: numpy.ndarray[Any, Any] +reveal_type(AR_f8.dot([1])) # E: Any +reveal_type(AR_f8.dot(1, out=B)) # E: SubClass + +reveal_type(AR_f8.nonzero()) # E: tuple[numpy.ndarray[Any, numpy.dtype[{intp}]]] + +reveal_type(AR_f8.searchsorted(1)) # E: {intp} +reveal_type(AR_f8.searchsorted([1])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]] + +reveal_type(AR_f8.trace()) # E: Any +reveal_type(AR_f8.trace(out=B)) # E: SubClass + +reveal_type(AR_f8.item()) # E: float +reveal_type(AR_U.item()) # E: str + +reveal_type(AR_f8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_U.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] + +reveal_type(AR_f8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_U.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] + +reveal_type(AR_f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(AR_U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] + +reveal_type(int(AR_f8)) # E: int +reveal_type(int(AR_U)) # E: int + +reveal_type(float(AR_f8)) # E: float +reveal_type(float(AR_U)) # E: float + +reveal_type(complex(AR_f8)) # E: complex + +reveal_type(operator.index(AR_i8)) # E: int diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py new file mode 100644 index 0000000000000000000000000000000000000000..506e57fbaad145f188d3d8126c716e432278d4b6 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py @@ -0,0 +1,35 @@ +import numpy as np + +nd = np.array([[1, 2], [3, 4]]) + +# reshape +reveal_type(nd.reshape()) # E: numpy.ndarray +reveal_type(nd.reshape(4)) # E: numpy.ndarray +reveal_type(nd.reshape(2, 2)) # E: numpy.ndarray +reveal_type(nd.reshape((2, 2))) # E: numpy.ndarray + +reveal_type(nd.reshape((2, 2), order="C")) # E: numpy.ndarray +reveal_type(nd.reshape(4, order="C")) # E: numpy.ndarray + +# resize does not return a value + +# transpose +reveal_type(nd.transpose()) # E: numpy.ndarray +reveal_type(nd.transpose(1, 0)) # E: numpy.ndarray +reveal_type(nd.transpose((1, 0))) # E: numpy.ndarray + +# swapaxes +reveal_type(nd.swapaxes(0, 1)) # E: numpy.ndarray + +# flatten +reveal_type(nd.flatten()) # E: numpy.ndarray +reveal_type(nd.flatten("C")) # E: numpy.ndarray + +# ravel +reveal_type(nd.ravel()) # E: numpy.ndarray +reveal_type(nd.ravel("C")) # E: numpy.ndarray + +# squeeze +reveal_type(nd.squeeze()) # E: numpy.ndarray +reveal_type(nd.squeeze(0)) # E: numpy.ndarray +reveal_type(nd.squeeze((0, 2))) # E: numpy.ndarray diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/nditer.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/nditer.py new file mode 100644 index 0000000000000000000000000000000000000000..865805b998b84ffac896cc54a5afb21f3c323ef3 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/nditer.py @@ -0,0 +1,19 @@ +import copy +import numpy as np + +nditer_obj: np.nditer + +with nditer_obj as context: + reveal_type(context) # E: numpy.nditer + +reveal_type(len(nditer_obj)) # E: builtins.int +reveal_type(copy.copy(nditer_obj)) # E: numpy.nditer +reveal_type(next(nditer_obj)) # E: Any +reveal_type(iter(nditer_obj)) # E: typing.Iterator[Any] +reveal_type(nditer_obj[1]) # E: Any +reveal_type(nditer_obj[1:5]) # E: Any + +nditer_obj[1] = 1 +nditer_obj[1:5] = 1 +del nditer_obj[1] +del nditer_obj[1:5] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/numeric.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..33c33c94a56744599b19d14f972bb5c51f49a6d5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/numeric.py @@ -0,0 +1,89 @@ +""" +Tests for :mod:`numpy.core.numeric`. + +Does not include tests which fall under ``array_constructors``. + +""" + +from typing import List +import numpy as np + +class SubClass(np.ndarray): + ... + +i8: np.int64 + +A: np.ndarray +B: List[int] +C: SubClass + +reveal_type(np.count_nonzero(i8)) # E: int +reveal_type(np.count_nonzero(A)) # E: int +reveal_type(np.count_nonzero(B)) # E: int +reveal_type(np.count_nonzero(A, keepdims=True)) # E: Any +reveal_type(np.count_nonzero(A, axis=0)) # E: Any + +reveal_type(np.isfortran(i8)) # E: bool +reveal_type(np.isfortran(A)) # E: bool + +reveal_type(np.argwhere(i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.argwhere(A)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.flatnonzero(i8)) # E: numpy.ndarray[Any, Any] +reveal_type(np.flatnonzero(A)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.correlate(B, A, mode="valid")) # E: numpy.ndarray[Any, Any] +reveal_type(np.correlate(A, A, mode="same")) # E: numpy.ndarray[Any, Any] + +reveal_type(np.convolve(B, A, mode="valid")) # E: numpy.ndarray[Any, Any] +reveal_type(np.convolve(A, A, mode="same")) # E: numpy.ndarray[Any, Any] + +reveal_type(np.outer(i8, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.outer(B, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.outer(A, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.outer(A, A, out=C)) # E: SubClass + +reveal_type(np.tensordot(B, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.tensordot(A, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.tensordot(A, A, axes=0)) # E: numpy.ndarray[Any, Any] +reveal_type(np.tensordot(A, A, axes=(0, 1))) # E: numpy.ndarray[Any, Any] + +reveal_type(np.isscalar(i8)) # E: bool +reveal_type(np.isscalar(A)) # E: bool +reveal_type(np.isscalar(B)) # E: bool + +reveal_type(np.roll(A, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.roll(A, (1, 2))) # E: numpy.ndarray[Any, Any] +reveal_type(np.roll(B, 1)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.rollaxis(A, 0, 1)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.moveaxis(A, 0, 1)) # E: numpy.ndarray[Any, Any] +reveal_type(np.moveaxis(A, (0, 1), (1, 2))) # E: numpy.ndarray[Any, Any] + +reveal_type(np.cross(B, A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.cross(A, A)) # E: numpy.ndarray[Any, Any] + +reveal_type(np.indices([0, 1, 2])) # E: numpy.ndarray[Any, Any] +reveal_type(np.indices([0, 1, 2], sparse=False)) # E: numpy.ndarray[Any, Any] +reveal_type(np.indices([0, 1, 2], sparse=True)) # E: tuple[numpy.ndarray[Any, Any]] + +reveal_type(np.binary_repr(1)) # E: str + +reveal_type(np.base_repr(1)) # E: str + +reveal_type(np.allclose(i8, A)) # E: bool +reveal_type(np.allclose(B, A)) # E: bool +reveal_type(np.allclose(A, A)) # E: bool + +reveal_type(np.isclose(i8, A)) # E: Any +reveal_type(np.isclose(B, A)) # E: Any +reveal_type(np.isclose(A, A)) # E: Any + +reveal_type(np.array_equal(i8, A)) # E: bool +reveal_type(np.array_equal(B, A)) # E: bool +reveal_type(np.array_equal(A, A)) # E: bool + +reveal_type(np.array_equiv(i8, A)) # E: bool +reveal_type(np.array_equiv(B, A)) # E: bool +reveal_type(np.array_equiv(A, A)) # E: bool diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/numerictypes.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/numerictypes.py new file mode 100644 index 0000000000000000000000000000000000000000..970c9b97e86c01809f04376efeafe622994b5660 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/numerictypes.py @@ -0,0 +1,36 @@ +import numpy as np + +reveal_type(np.issctype(np.generic)) # E: bool +reveal_type(np.issctype("foo")) # E: bool + +reveal_type(np.obj2sctype("S8")) # E: Union[numpy.generic, None] +reveal_type(np.obj2sctype("S8", default=None)) # E: Union[numpy.generic, None] +reveal_type( + np.obj2sctype("foo", default=int) # E: Union[numpy.generic, Type[builtins.int*]] +) + +reveal_type(np.issubclass_(np.float64, float)) # E: bool +reveal_type(np.issubclass_(np.float64, (int, float))) # E: bool + +reveal_type(np.sctype2char("S8")) # E: str +reveal_type(np.sctype2char(list)) # E: str + +reveal_type(np.find_common_type([np.int64], [np.int64])) # E: numpy.dtype + +reveal_type(np.cast[int]) # E: _CastFunc +reveal_type(np.cast["i8"]) # E: _CastFunc +reveal_type(np.cast[np.int64]) # E: _CastFunc + +reveal_type(np.nbytes[int]) # E: int +reveal_type(np.nbytes["i8"]) # E: int +reveal_type(np.nbytes[np.int64]) # E: int + +reveal_type(np.ScalarType) # E: Tuple +reveal_type(np.ScalarType[0]) # E: Type[builtins.int] +reveal_type(np.ScalarType[4]) # E: Type[builtins.bool] +reveal_type(np.ScalarType[9]) # E: Type[{csingle}] +reveal_type(np.ScalarType[11]) # E: Type[{clongdouble}] + +reveal_type(np.typecodes["Character"]) # E: Literal['c'] +reveal_type(np.typecodes["Complex"]) # E: Literal['FDG'] +reveal_type(np.typecodes["All"]) # E: Literal['?bhilqpBHILQPefdgFDGSUVOMm'] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/random.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/random.py new file mode 100644 index 0000000000000000000000000000000000000000..3693af3dd8361c482510403eb11e0c1400cbb41b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/random.py @@ -0,0 +1,1539 @@ +from __future__ import annotations + +from typing import Any, List + +import numpy as np + +def_rng = np.random.default_rng() +seed_seq = np.random.SeedSequence() +mt19937 = np.random.MT19937() +pcg64 = np.random.PCG64() +sfc64 = np.random.SFC64() +philox = np.random.Philox() +seedless_seq = np.random.bit_generator.SeedlessSeedSequence() + +reveal_type(def_rng) # E: numpy.random._generator.Generator +reveal_type(mt19937) # E: numpy.random._mt19937.MT19937 +reveal_type(pcg64) # E: numpy.random._pcg64.PCG64 +reveal_type(sfc64) # E: numpy.random._sfc64.SFC64 +reveal_type(philox) # E: numpy.random._philox.Philox +reveal_type(seed_seq) # E: numpy.random.bit_generator.SeedSequence +reveal_type(seedless_seq) # E: numpy.random.bit_generator.SeedlessSeedSequence + +mt19937_jumped = mt19937.jumped() +mt19937_jumped3 = mt19937.jumped(3) +mt19937_raw = mt19937.random_raw() +mt19937_raw_arr = mt19937.random_raw(5) + +reveal_type(mt19937_jumped) # E: numpy.random._mt19937.MT19937 +reveal_type(mt19937_jumped3) # E: numpy.random._mt19937.MT19937 +reveal_type(mt19937_raw) # E: int +reveal_type(mt19937_raw_arr) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(mt19937.lock) # E: threading.Lock + +pcg64_jumped = pcg64.jumped() +pcg64_jumped3 = pcg64.jumped(3) +pcg64_adv = pcg64.advance(3) +pcg64_raw = pcg64.random_raw() +pcg64_raw_arr = pcg64.random_raw(5) + +reveal_type(pcg64_jumped) # E: numpy.random._pcg64.PCG64 +reveal_type(pcg64_jumped3) # E: numpy.random._pcg64.PCG64 +reveal_type(pcg64_adv) # E: numpy.random._pcg64.PCG64 +reveal_type(pcg64_raw) # E: int +reveal_type(pcg64_raw_arr) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(pcg64.lock) # E: threading.Lock + +philox_jumped = philox.jumped() +philox_jumped3 = philox.jumped(3) +philox_adv = philox.advance(3) +philox_raw = philox.random_raw() +philox_raw_arr = philox.random_raw(5) + +reveal_type(philox_jumped) # E: numpy.random._philox.Philox +reveal_type(philox_jumped3) # E: numpy.random._philox.Philox +reveal_type(philox_adv) # E: numpy.random._philox.Philox +reveal_type(philox_raw) # E: int +reveal_type(philox_raw_arr) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(philox.lock) # E: threading.Lock + +sfc64_raw = sfc64.random_raw() +sfc64_raw_arr = sfc64.random_raw(5) + +reveal_type(sfc64_raw) # E: int +reveal_type(sfc64_raw_arr) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(sfc64.lock) # E: threading.Lock + +reveal_type(seed_seq.pool) # numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(seed_seq.entropy) # E:Union[None, int, Sequence[int]] +reveal_type(seed_seq.spawn(1)) # E: list[numpy.random.bit_generator.SeedSequence] +reveal_type(seed_seq.generate_state(8, "uint32")) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.unsignedinteger[numpy.typing._32Bit], numpy.unsignedinteger[numpy.typing._64Bit]]]] +reveal_type(seed_seq.generate_state(8, "uint64")) # E: numpy.ndarray[Any, numpy.dtype[Union[numpy.unsignedinteger[numpy.typing._32Bit], numpy.unsignedinteger[numpy.typing._64Bit]]]] + + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1]) +D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5]) +D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) +D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) +I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) +I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) +D_arr_like_0p1: List[float] = [0.1] +D_arr_like_0p5: List[float] = [0.5] +D_arr_like_0p9: List[float] = [0.9] +D_arr_like_1p5: List[float] = [1.5] +I_arr_like_10: List[int] = [10] +I_arr_like_20: List[int] = [20] +D_2D_like: List[List[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) +S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) +D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1) + +reveal_type(def_gen.standard_normal()) # E: float +reveal_type(def_gen.standard_normal(dtype=np.float32)) # E: float +reveal_type(def_gen.standard_normal(dtype="float32")) # E: float +reveal_type(def_gen.standard_normal(dtype="double")) # E: float +reveal_type(def_gen.standard_normal(dtype=np.float64)) # E: float +reveal_type(def_gen.standard_normal(size=None)) # E: float +reveal_type(def_gen.standard_normal(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="f4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="float32", out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_normal(dtype=np.float32, out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="f8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_normal(size=1, dtype="float64", out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(def_gen.random()) # E: float +reveal_type(def_gen.random(dtype=np.float32)) # E: float +reveal_type(def_gen.random(dtype="float32")) # E: float +reveal_type(def_gen.random(dtype="double")) # E: float +reveal_type(def_gen.random(dtype=np.float64)) # E: float +reveal_type(def_gen.random(size=None)) # E: float +reveal_type(def_gen.random(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.random(size=1, dtype="f4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.random(size=1, dtype="float32", out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.random(dtype=np.float32, out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.random(size=1, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype="f8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.random(size=1, dtype="float64", out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(def_gen.standard_cauchy()) # E: float +reveal_type(def_gen.standard_cauchy(size=None)) # E: float +reveal_type(def_gen.standard_cauchy(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.standard_exponential()) # E: float +reveal_type(def_gen.standard_exponential(method="inv")) # E: float +reveal_type(def_gen.standard_exponential(dtype=np.float32)) # E: float +reveal_type(def_gen.standard_exponential(dtype="float32")) # E: float +reveal_type(def_gen.standard_exponential(dtype="double")) # E: float +reveal_type(def_gen.standard_exponential(dtype=np.float64)) # E: float +reveal_type(def_gen.standard_exponential(size=None)) # E: float +reveal_type(def_gen.standard_exponential(size=None, method="inv")) # E: float +reveal_type(def_gen.standard_exponential(size=1, method="inv")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="f4", method="inv")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="float32", out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_exponential(dtype=np.float32, out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype=np.float64, method="inv")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="f8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="float64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_exponential(size=1, dtype="float64", out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(def_gen.zipf(1.5)) # E: int +reveal_type(def_gen.zipf(1.5, size=None)) # E: int +reveal_type(def_gen.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.weibull(0.5)) # E: float +reveal_type(def_gen.weibull(0.5, size=None)) # E: float +reveal_type(def_gen.weibull(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.weibull(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.weibull(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.weibull(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.weibull(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.standard_t(0.5)) # E: float +reveal_type(def_gen.standard_t(0.5, size=None)) # E: float +reveal_type(def_gen.standard_t(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.standard_t(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.standard_t(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.standard_t(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.standard_t(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.poisson(0.5)) # E: int +reveal_type(def_gen.poisson(0.5, size=None)) # E: int +reveal_type(def_gen.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.power(0.5)) # E: float +reveal_type(def_gen.power(0.5, size=None)) # E: float +reveal_type(def_gen.power(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.power(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.power(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.power(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.power(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.pareto(0.5)) # E: float +reveal_type(def_gen.pareto(0.5, size=None)) # E: float +reveal_type(def_gen.pareto(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.pareto(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.pareto(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.pareto(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.pareto(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.chisquare(0.5)) # E: float +reveal_type(def_gen.chisquare(0.5, size=None)) # E: float +reveal_type(def_gen.chisquare(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.chisquare(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.chisquare(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.chisquare(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.chisquare(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.exponential(0.5)) # E: float +reveal_type(def_gen.exponential(0.5, size=None)) # E: float +reveal_type(def_gen.exponential(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.exponential(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.exponential(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.exponential(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.exponential(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.geometric(0.5)) # E: int +reveal_type(def_gen.geometric(0.5, size=None)) # E: int +reveal_type(def_gen.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.logseries(0.5)) # E: int +reveal_type(def_gen.logseries(0.5, size=None)) # E: int +reveal_type(def_gen.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.rayleigh(0.5)) # E: float +reveal_type(def_gen.rayleigh(0.5, size=None)) # E: float +reveal_type(def_gen.rayleigh(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.rayleigh(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.rayleigh(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.rayleigh(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.rayleigh(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.standard_gamma(0.5)) # E: float +reveal_type(def_gen.standard_gamma(0.5, size=None)) # E: float +reveal_type(def_gen.standard_gamma(0.5, dtype="float32")) # E: float +reveal_type(def_gen.standard_gamma(0.5, size=None, dtype="float32")) # E: float +reveal_type(def_gen.standard_gamma(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5, dtype="f4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._32Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(0.5, out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5, out=D_out)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(def_gen.vonmises(0.5, 0.5)) # E: float +reveal_type(def_gen.vonmises(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.vonmises(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.wald(0.5, 0.5)) # E: float +reveal_type(def_gen.wald(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.wald(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.uniform(0.5, 0.5)) # E: float +reveal_type(def_gen.uniform(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.uniform(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.beta(0.5, 0.5)) # E: float +reveal_type(def_gen.beta(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.beta(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.f(0.5, 0.5)) # E: float +reveal_type(def_gen.f(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.f(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.gamma(0.5, 0.5)) # E: float +reveal_type(def_gen.gamma(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.gamma(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.gumbel(0.5, 0.5)) # E: float +reveal_type(def_gen.gumbel(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.gumbel(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.laplace(0.5, 0.5)) # E: float +reveal_type(def_gen.laplace(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.laplace(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.logistic(0.5, 0.5)) # E: float +reveal_type(def_gen.logistic(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.logistic(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.lognormal(0.5, 0.5)) # E: float +reveal_type(def_gen.lognormal(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.lognormal(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.noncentral_chisquare(0.5, 0.5)) # E: float +reveal_type(def_gen.noncentral_chisquare(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.noncentral_chisquare(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.normal(0.5, 0.5)) # E: float +reveal_type(def_gen.normal(0.5, 0.5, size=None)) # E: float +reveal_type(def_gen.normal(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.triangular(0.1, 0.5, 0.9)) # E: float +reveal_type(def_gen.triangular(0.1, 0.5, 0.9, size=None)) # E: float +reveal_type(def_gen.triangular(0.1, 0.5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_0p1, 0.5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(0.1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(0.5, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9)) # E: float +reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=None)) # E: float +reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.binomial(10, 0.5)) # E: int +reveal_type(def_gen.binomial(10, 0.5, size=None)) # E: int +reveal_type(def_gen.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.negative_binomial(10, 0.5)) # E: int +reveal_type(def_gen.negative_binomial(10, 0.5, size=None)) # E: int +reveal_type(def_gen.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.hypergeometric(20, 20, 10)) # E: int +reveal_type(def_gen.hypergeometric(20, 20, 10, size=None)) # E: int +reveal_type(def_gen.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64) + +reveal_type(def_gen.integers(0, 100)) # E: int +reveal_type(def_gen.integers(100)) # E: int +reveal_type(def_gen.integers([100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_) +I_bool_low_like: List[int] = [0] +I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) +I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_) + +reveal_type(def_gen.integers(2, dtype=bool)) # E: builtins.bool +reveal_type(def_gen.integers(0, 2, dtype=bool)) # E: builtins.bool +reveal_type(def_gen.integers(1, dtype=bool, endpoint=True)) # E: builtins.bool +reveal_type(def_gen.integers(0, 1, dtype=bool, endpoint=True)) # E: builtins.bool +reveal_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(0, I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] + +reveal_type(def_gen.integers(2, dtype=np.bool_)) # E: builtins.bool +reveal_type(def_gen.integers(0, 2, dtype=np.bool_)) # E: builtins.bool +reveal_type(def_gen.integers(1, dtype=np.bool_, endpoint=True)) # E: builtins.bool +reveal_type(def_gen.integers(0, 1, dtype=np.bool_, endpoint=True)) # E: builtins.bool +reveal_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] + +I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) +I_u1_low_like: List[int] = [0] +I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) +I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) + +reveal_type(def_gen.integers(256, dtype="u1")) # E: int +reveal_type(def_gen.integers(0, 256, dtype="u1")) # E: int +reveal_type(def_gen.integers(255, dtype="u1", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 255, dtype="u1", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] + +reveal_type(def_gen.integers(256, dtype="uint8")) # E: int +reveal_type(def_gen.integers(0, 256, dtype="uint8")) # E: int +reveal_type(def_gen.integers(255, dtype="uint8", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] + +reveal_type(def_gen.integers(256, dtype=np.uint8)) # E: int +reveal_type(def_gen.integers(0, 256, dtype=np.uint8)) # E: int +reveal_type(def_gen.integers(255, dtype=np.uint8, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] + +I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) +I_u2_low_like: List[int] = [0] +I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) + +reveal_type(def_gen.integers(65536, dtype="u2")) # E: int +reveal_type(def_gen.integers(0, 65536, dtype="u2")) # E: int +reveal_type(def_gen.integers(65535, dtype="u2", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] + +reveal_type(def_gen.integers(65536, dtype="uint16")) # E: int +reveal_type(def_gen.integers(0, 65536, dtype="uint16")) # E: int +reveal_type(def_gen.integers(65535, dtype="uint16", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] + +reveal_type(def_gen.integers(65536, dtype=np.uint16)) # E: int +reveal_type(def_gen.integers(0, 65536, dtype=np.uint16)) # E: int +reveal_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] + +I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) +I_u4_low_like: List[int] = [0] +I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) + +reveal_type(def_gen.integers(4294967296, dtype=np.int_)) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype=np.int_)) # E: int +reveal_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(I_u4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + + +reveal_type(def_gen.integers(4294967296, dtype="u4")) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype="u4")) # E: int +reveal_type(def_gen.integers(4294967295, dtype="u4", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] + +reveal_type(def_gen.integers(4294967296, dtype="uint32")) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype="uint32")) # E: int +reveal_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] + +reveal_type(def_gen.integers(4294967296, dtype=np.uint32)) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype=np.uint32)) # E: int +reveal_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] + +reveal_type(def_gen.integers(4294967296, dtype=np.uint)) # E: int +reveal_type(def_gen.integers(0, 4294967296, dtype=np.uint)) # E: int +reveal_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] + +I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) +I_u8_low_like: List[int] = [0] +I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) + +reveal_type(def_gen.integers(18446744073709551616, dtype="u8")) # E: int +reveal_type(def_gen.integers(0, 18446744073709551616, dtype="u8")) # E: int +reveal_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.integers(18446744073709551616, dtype="uint64")) # E: int +reveal_type(def_gen.integers(0, 18446744073709551616, dtype="uint64")) # E: int +reveal_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.integers(18446744073709551616, dtype=np.uint64)) # E: int +reveal_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64)) # E: int +reveal_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int +reveal_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] + +I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) +I_i1_low_like: List[int] = [-128] +I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) +I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) + +reveal_type(def_gen.integers(128, dtype="i1")) # E: int +reveal_type(def_gen.integers(-128, 128, dtype="i1")) # E: int +reveal_type(def_gen.integers(127, dtype="i1", endpoint=True)) # E: int +reveal_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] + +reveal_type(def_gen.integers(128, dtype="int8")) # E: int +reveal_type(def_gen.integers(-128, 128, dtype="int8")) # E: int +reveal_type(def_gen.integers(127, dtype="int8", endpoint=True)) # E: int +reveal_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] + +reveal_type(def_gen.integers(128, dtype=np.int8)) # E: int +reveal_type(def_gen.integers(-128, 128, dtype=np.int8)) # E: int +reveal_type(def_gen.integers(127, dtype=np.int8, endpoint=True)) # E: int +reveal_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] + +I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) +I_i2_low_like: List[int] = [-32768] +I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) +I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) + +reveal_type(def_gen.integers(32768, dtype="i2")) # E: int +reveal_type(def_gen.integers(-32768, 32768, dtype="i2")) # E: int +reveal_type(def_gen.integers(32767, dtype="i2", endpoint=True)) # E: int +reveal_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] + +reveal_type(def_gen.integers(32768, dtype="int16")) # E: int +reveal_type(def_gen.integers(-32768, 32768, dtype="int16")) # E: int +reveal_type(def_gen.integers(32767, dtype="int16", endpoint=True)) # E: int +reveal_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] + +reveal_type(def_gen.integers(32768, dtype=np.int16)) # E: int +reveal_type(def_gen.integers(-32768, 32768, dtype=np.int16)) # E: int +reveal_type(def_gen.integers(32767, dtype=np.int16, endpoint=True)) # E: int +reveal_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] + +I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: List[int] = [-2147483648] +I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) + +reveal_type(def_gen.integers(2147483648, dtype="i4")) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="i4")) # E: int +reveal_type(def_gen.integers(2147483647, dtype="i4", endpoint=True)) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] + +reveal_type(def_gen.integers(2147483648, dtype="int32")) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="int32")) # E: int +reveal_type(def_gen.integers(2147483647, dtype="int32", endpoint=True)) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] + +reveal_type(def_gen.integers(2147483648, dtype=np.int32)) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32)) # E: int +reveal_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True)) # E: int +reveal_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] + +I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: List[int] = [-9223372036854775808] +I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) + +reveal_type(def_gen.integers(9223372036854775808, dtype="i8")) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int +reveal_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True)) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.integers(9223372036854775808, dtype="int64")) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int +reveal_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True)) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.integers(9223372036854775808, dtype=np.int64)) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int +reveal_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True)) # E: int +reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: int +reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + + +reveal_type(def_gen.bit_generator) # E: BitGenerator + +reveal_type(def_gen.bytes(2)) # E: bytes + +reveal_type(def_gen.choice(5)) # E: int +reveal_type(def_gen.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))) # E: numpy.ndarray[Any, Any] + +reveal_type(def_gen.dirichlet([0.5, 0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]), size=3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7))) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(def_gen.multivariate_normal([0.0], [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_normal(np.array([0.0]), [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(def_gen.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(def_gen.permutation([1, 2, 3, 4])) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permutation(np.array([1, 2, 3, 4]))) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permutation(D_2D, axis=1)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D_like)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D, axis=1)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D, out=D_2D)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D_like, out=D_2D)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D_like, out=D_2D)) # E: numpy.ndarray[Any, Any] +reveal_type(def_gen.permuted(D_2D, axis=1, out=D_2D)) # E: numpy.ndarray[Any, Any] + +reveal_type(def_gen.shuffle(np.arange(10))) # E: None +reveal_type(def_gen.shuffle([1, 2, 3, 4, 5])) # E: None +reveal_type(def_gen.shuffle(D_2D, axis=1)) # E: None + +reveal_type(np.random.Generator(pcg64)) # E: Generator +reveal_type(def_gen.__str__()) # E: str +reveal_type(def_gen.__repr__()) # E: str +def_gen_state = def_gen.__getstate__() +reveal_type(def_gen_state) # E: builtins.dict[builtins.str, Any] +reveal_type(def_gen.__setstate__(def_gen_state)) # E: None + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +reveal_type(random_st.standard_normal()) # E: float +reveal_type(random_st.standard_normal(size=None)) # E: float +reveal_type(random_st.standard_normal(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(random_st.random()) # E: float +reveal_type(random_st.random(size=None)) # E: float +reveal_type(random_st.random(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(random_st.standard_cauchy()) # E: float +reveal_type(random_st.standard_cauchy(size=None)) # E: float +reveal_type(random_st.standard_cauchy(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.standard_exponential()) # E: float +reveal_type(random_st.standard_exponential(size=None)) # E: float +reveal_type(random_st.standard_exponential(size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(random_st.zipf(1.5)) # E: int +reveal_type(random_st.zipf(1.5, size=None)) # E: int +reveal_type(random_st.zipf(1.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.zipf(D_arr_1p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.zipf(D_arr_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.zipf(D_arr_like_1p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.zipf(D_arr_like_1p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.weibull(0.5)) # E: float +reveal_type(random_st.weibull(0.5, size=None)) # E: float +reveal_type(random_st.weibull(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.weibull(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.weibull(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.weibull(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.weibull(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.standard_t(0.5)) # E: float +reveal_type(random_st.standard_t(0.5, size=None)) # E: float +reveal_type(random_st.standard_t(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.standard_t(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.standard_t(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.standard_t(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.standard_t(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.poisson(0.5)) # E: int +reveal_type(random_st.poisson(0.5, size=None)) # E: int +reveal_type(random_st.poisson(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.poisson(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.poisson(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.poisson(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.poisson(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.power(0.5)) # E: float +reveal_type(random_st.power(0.5, size=None)) # E: float +reveal_type(random_st.power(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.power(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.power(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.power(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.power(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.pareto(0.5)) # E: float +reveal_type(random_st.pareto(0.5, size=None)) # E: float +reveal_type(random_st.pareto(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.pareto(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.pareto(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.pareto(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.pareto(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.chisquare(0.5)) # E: float +reveal_type(random_st.chisquare(0.5, size=None)) # E: float +reveal_type(random_st.chisquare(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.chisquare(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.chisquare(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.chisquare(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.chisquare(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.exponential(0.5)) # E: float +reveal_type(random_st.exponential(0.5, size=None)) # E: float +reveal_type(random_st.exponential(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.exponential(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.exponential(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.exponential(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.exponential(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.geometric(0.5)) # E: int +reveal_type(random_st.geometric(0.5, size=None)) # E: int +reveal_type(random_st.geometric(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.geometric(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.geometric(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.geometric(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.geometric(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.logseries(0.5)) # E: int +reveal_type(random_st.logseries(0.5, size=None)) # E: int +reveal_type(random_st.logseries(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.logseries(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.logseries(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.logseries(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.logseries(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.rayleigh(0.5)) # E: float +reveal_type(random_st.rayleigh(0.5, size=None)) # E: float +reveal_type(random_st.rayleigh(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rayleigh(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rayleigh(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rayleigh(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rayleigh(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.standard_gamma(0.5)) # E: float +reveal_type(random_st.standard_gamma(0.5, size=None)) # E: float +reveal_type(random_st.standard_gamma(0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] +reveal_type(random_st.standard_gamma(D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]]] + +reveal_type(random_st.vonmises(0.5, 0.5)) # E: float +reveal_type(random_st.vonmises(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.vonmises(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.wald(0.5, 0.5)) # E: float +reveal_type(random_st.wald(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.wald(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.uniform(0.5, 0.5)) # E: float +reveal_type(random_st.uniform(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.uniform(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.beta(0.5, 0.5)) # E: float +reveal_type(random_st.beta(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.beta(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.f(0.5, 0.5)) # E: float +reveal_type(random_st.f(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.f(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.gamma(0.5, 0.5)) # E: float +reveal_type(random_st.gamma(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.gamma(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.gumbel(0.5, 0.5)) # E: float +reveal_type(random_st.gumbel(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.gumbel(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.laplace(0.5, 0.5)) # E: float +reveal_type(random_st.laplace(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.laplace(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.logistic(0.5, 0.5)) # E: float +reveal_type(random_st.logistic(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.logistic(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.lognormal(0.5, 0.5)) # E: float +reveal_type(random_st.lognormal(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.lognormal(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.noncentral_chisquare(0.5, 0.5)) # E: float +reveal_type(random_st.noncentral_chisquare(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.noncentral_chisquare(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.normal(0.5, 0.5)) # E: float +reveal_type(random_st.normal(0.5, 0.5, size=None)) # E: float +reveal_type(random_st.normal(0.5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(0.5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_0p5, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(0.5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_like_0p5, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(0.5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_0p5, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.triangular(0.1, 0.5, 0.9)) # E: float +reveal_type(random_st.triangular(0.1, 0.5, 0.9, size=None)) # E: float +reveal_type(random_st.triangular(0.1, 0.5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_0p1, 0.5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(0.1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9)) # E: float +reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None)) # E: float +reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.binomial(10, 0.5)) # E: int +reveal_type(random_st.binomial(10, 0.5, size=None)) # E: int +reveal_type(random_st.binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.negative_binomial(10, 0.5)) # E: int +reveal_type(random_st.negative_binomial(10, 0.5, size=None)) # E: int +reveal_type(random_st.negative_binomial(10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_10, 0.5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_like_10, 0.5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.hypergeometric(20, 20, 10)) # E: int +reveal_type(random_st.hypergeometric(20, 20, 10, size=None)) # E: int +reveal_type(random_st.hypergeometric(20, 20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_20, 20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(20, I_arr_20, 10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.randint(0, 100)) # E: int +reveal_type(random_st.randint(100)) # E: int +reveal_type(random_st.randint([100])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.randint(0, [100])) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.randint(2, dtype=bool)) # E: builtins.bool +reveal_type(random_st.randint(0, 2, dtype=bool)) # E: builtins.bool +reveal_type(random_st.randint(I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(random_st.randint(0, I_bool_high_open, dtype=bool)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] + +reveal_type(random_st.randint(2, dtype=np.bool_)) # E: builtins.bool +reveal_type(random_st.randint(0, 2, dtype=np.bool_)) # E: builtins.bool +reveal_type(random_st.randint(I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] +reveal_type(random_st.randint(0, I_bool_high_open, dtype=np.bool_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_] + +reveal_type(random_st.randint(256, dtype="u1")) # E: int +reveal_type(random_st.randint(0, 256, dtype="u1")) # E: int +reveal_type(random_st.randint(I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(0, I_u1_high_open, dtype="u1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] + +reveal_type(random_st.randint(256, dtype="uint8")) # E: int +reveal_type(random_st.randint(0, 256, dtype="uint8")) # E: int +reveal_type(random_st.randint(I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(0, I_u1_high_open, dtype="uint8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] + +reveal_type(random_st.randint(256, dtype=np.uint8)) # E: int +reveal_type(random_st.randint(0, 256, dtype=np.uint8)) # E: int +reveal_type(random_st.randint(I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._8Bit]]] + +reveal_type(random_st.randint(65536, dtype="u2")) # E: int +reveal_type(random_st.randint(0, 65536, dtype="u2")) # E: int +reveal_type(random_st.randint(I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(0, I_u2_high_open, dtype="u2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] + +reveal_type(random_st.randint(65536, dtype="uint16")) # E: int +reveal_type(random_st.randint(0, 65536, dtype="uint16")) # E: int +reveal_type(random_st.randint(I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(0, I_u2_high_open, dtype="uint16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] + +reveal_type(random_st.randint(65536, dtype=np.uint16)) # E: int +reveal_type(random_st.randint(0, 65536, dtype=np.uint16)) # E: int +reveal_type(random_st.randint(I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._16Bit]]] + +reveal_type(random_st.randint(4294967296, dtype="u4")) # E: int +reveal_type(random_st.randint(0, 4294967296, dtype="u4")) # E: int +reveal_type(random_st.randint(I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype="u4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] + +reveal_type(random_st.randint(4294967296, dtype="uint32")) # E: int +reveal_type(random_st.randint(0, 4294967296, dtype="uint32")) # E: int +reveal_type(random_st.randint(I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype="uint32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] + +reveal_type(random_st.randint(4294967296, dtype=np.uint32)) # E: int +reveal_type(random_st.randint(0, 4294967296, dtype=np.uint32)) # E: int +reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]] + +reveal_type(random_st.randint(4294967296, dtype=np.uint)) # E: int +reveal_type(random_st.randint(0, 4294967296, dtype=np.uint)) # E: int +reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] +reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint)) # E: numpy.ndarray[Any, numpy.dtype[{uint}]] + +reveal_type(random_st.randint(18446744073709551616, dtype="u8")) # E: int +reveal_type(random_st.randint(0, 18446744073709551616, dtype="u8")) # E: int +reveal_type(random_st.randint(I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(0, I_u8_high_open, dtype="u8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] + +reveal_type(random_st.randint(18446744073709551616, dtype="uint64")) # E: int +reveal_type(random_st.randint(0, 18446744073709551616, dtype="uint64")) # E: int +reveal_type(random_st.randint(I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(0, I_u8_high_open, dtype="uint64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] + +reveal_type(random_st.randint(18446744073709551616, dtype=np.uint64)) # E: int +reveal_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64)) # E: int +reveal_type(random_st.randint(I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._64Bit]]] + +reveal_type(random_st.randint(128, dtype="i1")) # E: int +reveal_type(random_st.randint(-128, 128, dtype="i1")) # E: int +reveal_type(random_st.randint(I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(-128, I_i1_high_open, dtype="i1")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] + +reveal_type(random_st.randint(128, dtype="int8")) # E: int +reveal_type(random_st.randint(-128, 128, dtype="int8")) # E: int +reveal_type(random_st.randint(I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(-128, I_i1_high_open, dtype="int8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] + +reveal_type(random_st.randint(128, dtype=np.int8)) # E: int +reveal_type(random_st.randint(-128, 128, dtype=np.int8)) # E: int +reveal_type(random_st.randint(I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] +reveal_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._8Bit]]] + +reveal_type(random_st.randint(32768, dtype="i2")) # E: int +reveal_type(random_st.randint(-32768, 32768, dtype="i2")) # E: int +reveal_type(random_st.randint(I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="i2")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(32768, dtype="int16")) # E: int +reveal_type(random_st.randint(-32768, 32768, dtype="int16")) # E: int +reveal_type(random_st.randint(I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="int16")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(32768, dtype=np.int16)) # E: int +reveal_type(random_st.randint(-32768, 32768, dtype=np.int16)) # E: int +reveal_type(random_st.randint(I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] +reveal_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._16Bit]]] + +reveal_type(random_st.randint(2147483648, dtype="i4")) # E: int +reveal_type(random_st.randint(-2147483648, 2147483648, dtype="i4")) # E: int +reveal_type(random_st.randint(I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] + +reveal_type(random_st.randint(2147483648, dtype="int32")) # E: int +reveal_type(random_st.randint(-2147483648, 2147483648, dtype="int32")) # E: int +reveal_type(random_st.randint(I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] + +reveal_type(random_st.randint(2147483648, dtype=np.int32)) # E: int +reveal_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32)) # E: int +reveal_type(random_st.randint(I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._32Bit]]] + +reveal_type(random_st.randint(2147483648, dtype=np.int_)) # E: int +reveal_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_)) # E: int +reveal_type(random_st.randint(I_i4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.randint(9223372036854775808, dtype="i8")) # E: int +reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int +reveal_type(random_st.randint(I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(random_st.randint(9223372036854775808, dtype="int64")) # E: int +reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int +reveal_type(random_st.randint(I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(random_st.randint(9223372036854775808, dtype=np.int64)) # E: int +reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int +reveal_type(random_st.randint(I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] +reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]] + +reveal_type(random_st._bit_generator) # E: BitGenerator + +reveal_type(random_st.bytes(2)) # E: bytes + +reveal_type(random_st.choice(5)) # E: int +reveal_type(random_st.choice(5, 3)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.choice(5, 3, replace=True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))) # E: numpy.ndarray[Any, Any] + +reveal_type(random_st.dirichlet([0.5, 0.5])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.dirichlet(np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.multinomial(20, [1 / 6.0] * 6)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.multinomial(20, np.array([0.5, 0.5]))) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] + +reveal_type(random_st.multivariate_normal([0.0], [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]])) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.permutation(10)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.permutation([1, 2, 3, 4])) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.permutation(np.array([1, 2, 3, 4]))) # E: numpy.ndarray[Any, Any] +reveal_type(random_st.permutation(D_2D)) # E: numpy.ndarray[Any, Any] + +reveal_type(random_st.shuffle(np.arange(10))) # E: None +reveal_type(random_st.shuffle([1, 2, 3, 4, 5])) # E: None +reveal_type(random_st.shuffle(D_2D)) # E: None + +reveal_type(np.random.RandomState(pcg64)) # E: RandomState +reveal_type(np.random.RandomState(0)) # E: RandomState +reveal_type(np.random.RandomState([0, 1, 2])) # E: RandomState +reveal_type(random_st.__str__()) # E: str +reveal_type(random_st.__repr__()) # E: str +random_st_state = random_st.__getstate__() +reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any] +reveal_type(random_st.__setstate__(random_st_state)) # E: None +reveal_type(random_st.seed()) # E: None +reveal_type(random_st.seed(1)) # E: None +reveal_type(random_st.seed([0, 1])) # E: None +random_st_get_state = random_st.get_state() +reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any] +random_st_get_state_legacy = random_st.get_state(legacy=True) +reveal_type(random_st_get_state_legacy) # E: Union[builtins.dict[builtins.str, Any], Tuple[builtins.str, numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[numpy.typing._32Bit]]], builtins.int, builtins.int, builtins.float]] +reveal_type(random_st.set_state(random_st_get_state)) # E: None + +reveal_type(random_st.rand()) # E: float +reveal_type(random_st.rand(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.rand(1, 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.randn()) # E: float +reveal_type(random_st.randn(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.randn(1, 2)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.random_sample()) # E: float +reveal_type(random_st.random_sample(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] +reveal_type(random_st.random_sample(size=(1, 2))) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[numpy.typing._64Bit]] + +reveal_type(random_st.tomaxint()) # E: int +reveal_type(random_st.tomaxint(1)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] +reveal_type(random_st.tomaxint((1,))) # E: numpy.ndarray[Any, numpy.dtype[{int_}]] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/scalars.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/scalars.py new file mode 100644 index 0000000000000000000000000000000000000000..03254dbad10b875154a7f9d2e67821dd6a02d9b1 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/scalars.py @@ -0,0 +1,146 @@ +import sys +import numpy as np + +b: np.bool_ +u8: np.uint64 +i8: np.int64 +f8: np.float64 +c8: np.complex64 +c16: np.complex128 +m: np.timedelta64 +U: np.str_ +S: np.bytes_ + +reveal_type(c8.real) # E: {float32} +reveal_type(c8.imag) # E: {float32} + +reveal_type(c8.real.real) # E: {float32} +reveal_type(c8.real.imag) # E: {float32} + +reveal_type(c8.itemsize) # E: int +reveal_type(c8.shape) # E: Tuple[] +reveal_type(c8.strides) # E: Tuple[] + +reveal_type(c8.ndim) # E: Literal[0] +reveal_type(c8.size) # E: Literal[1] + +reveal_type(c8.squeeze()) # E: {complex64} +reveal_type(c8.byteswap()) # E: {complex64} +reveal_type(c8.transpose()) # E: {complex64} + +reveal_type(c8.dtype) # E: numpy.dtype[{complex64}] + +reveal_type(c8.real) # E: {float32} +reveal_type(c16.imag) # E: {float64} + +reveal_type(np.unicode_('foo')) # E: numpy.str_ +reveal_type(np.str0('foo')) # E: numpy.str_ + +# Aliases +reveal_type(np.unicode_()) # E: numpy.str_ +reveal_type(np.str0()) # E: numpy.str_ +reveal_type(np.bool8()) # E: numpy.bool_ +reveal_type(np.bytes0()) # E: numpy.bytes_ +reveal_type(np.string_()) # E: numpy.bytes_ +reveal_type(np.object0()) # E: numpy.object_ +reveal_type(np.void0(0)) # E: numpy.void + +reveal_type(np.byte()) # E: {byte} +reveal_type(np.short()) # E: {short} +reveal_type(np.intc()) # E: {intc} +reveal_type(np.intp()) # E: {intp} +reveal_type(np.int0()) # E: {intp} +reveal_type(np.int_()) # E: {int_} +reveal_type(np.longlong()) # E: {longlong} + +reveal_type(np.ubyte()) # E: {ubyte} +reveal_type(np.ushort()) # E: {ushort} +reveal_type(np.uintc()) # E: {uintc} +reveal_type(np.uintp()) # E: {uintp} +reveal_type(np.uint0()) # E: {uintp} +reveal_type(np.uint()) # E: {uint} +reveal_type(np.ulonglong()) # E: {ulonglong} + +reveal_type(np.half()) # E: {half} +reveal_type(np.single()) # E: {single} +reveal_type(np.double()) # E: {double} +reveal_type(np.float_()) # E: {double} +reveal_type(np.longdouble()) # E: {longdouble} +reveal_type(np.longfloat()) # E: {longdouble} + +reveal_type(np.csingle()) # E: {csingle} +reveal_type(np.singlecomplex()) # E: {csingle} +reveal_type(np.cdouble()) # E: {cdouble} +reveal_type(np.complex_()) # E: {cdouble} +reveal_type(np.cfloat()) # E: {cdouble} +reveal_type(np.clongdouble()) # E: {clongdouble} +reveal_type(np.clongfloat()) # E: {clongdouble} +reveal_type(np.longcomplex()) # E: {clongdouble} + +reveal_type(b.item()) # E: bool +reveal_type(i8.item()) # E: int +reveal_type(u8.item()) # E: int +reveal_type(f8.item()) # E: float +reveal_type(c16.item()) # E: complex +reveal_type(U.item()) # E: str +reveal_type(S.item()) # E: bytes + +reveal_type(b.tolist()) # E: bool +reveal_type(i8.tolist()) # E: int +reveal_type(u8.tolist()) # E: int +reveal_type(f8.tolist()) # E: float +reveal_type(c16.tolist()) # E: complex +reveal_type(U.tolist()) # E: str +reveal_type(S.tolist()) # E: bytes + +reveal_type(b.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(u8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]] +reveal_type(f8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(c16.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] +reveal_type(U.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(S.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(b.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(u8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]] +reveal_type(f8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(c16.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] +reveal_type(U.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(S.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(b.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(i8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] +reveal_type(u8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{uint64}]] +reveal_type(f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(c16.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{complex128}]] +reveal_type(U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] +reveal_type(S.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]] + +reveal_type(f8.as_integer_ratio()) # E: Tuple[builtins.int, builtins.int] +reveal_type(f8.is_integer()) # E: bool +reveal_type(f8.__trunc__()) # E: int +reveal_type(f8.__getformat__("float")) # E: str +reveal_type(f8.hex()) # E: str +reveal_type(np.float64.fromhex("0x0.0p+0")) # E: {float64} + +reveal_type(f8.__getnewargs__()) # E: Tuple[builtins.float] +reveal_type(c16.__getnewargs__()) # E: Tuple[builtins.float, builtins.float] + +reveal_type(i8.numerator) # E: {int64} +reveal_type(i8.denominator) # E: Literal[1] +reveal_type(u8.numerator) # E: {uint64} +reveal_type(u8.denominator) # E: Literal[1] +reveal_type(m.numerator) # E: numpy.timedelta64 +reveal_type(m.denominator) # E: Literal[1] + +reveal_type(round(i8)) # E: int +reveal_type(round(i8, 3)) # E: {int64} +reveal_type(round(u8)) # E: int +reveal_type(round(u8, 3)) # E: {uint64} +reveal_type(round(f8)) # E: int +reveal_type(round(f8, 3)) # E: {float64} + +if sys.version_info >= (3, 9): + reveal_type(f8.__ceil__()) # E: int + reveal_type(f8.__floor__()) # E: int diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ufunc_config.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ufunc_config.py new file mode 100644 index 0000000000000000000000000000000000000000..11a27b564bf56ed798c6d5ad956539a3973db460 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ufunc_config.py @@ -0,0 +1,25 @@ +"""Typing tests for `numpy.core._ufunc_config`.""" + +import numpy as np + +def func(a: str, b: int) -> None: ... + +class Write: + def write(self, value: str) -> None: ... + +reveal_type(np.seterr(all=None)) # E: TypedDict('numpy.core._ufunc_config._ErrDict' +reveal_type(np.seterr(divide="ignore")) # E: TypedDict('numpy.core._ufunc_config._ErrDict' +reveal_type(np.seterr(over="warn")) # E: TypedDict('numpy.core._ufunc_config._ErrDict' +reveal_type(np.seterr(under="call")) # E: TypedDict('numpy.core._ufunc_config._ErrDict' +reveal_type(np.seterr(invalid="raise")) # E: TypedDict('numpy.core._ufunc_config._ErrDict' +reveal_type(np.geterr()) # E: TypedDict('numpy.core._ufunc_config._ErrDict' + +reveal_type(np.setbufsize(4096)) # E: int +reveal_type(np.getbufsize()) # E: int + +reveal_type(np.seterrcall(func)) # E: Union[None, def (builtins.str, builtins.int) -> Any, numpy.core._ufunc_config._SupportsWrite] +reveal_type(np.seterrcall(Write())) # E: Union[None, def (builtins.str, builtins.int) -> Any, numpy.core._ufunc_config._SupportsWrite] +reveal_type(np.geterrcall()) # E: Union[None, def (builtins.str, builtins.int) -> Any, numpy.core._ufunc_config._SupportsWrite] + +reveal_type(np.errstate(call=func, all="call")) # E: numpy.errstate[def (a: builtins.str, b: builtins.int)] +reveal_type(np.errstate(call=Write(), divide="log", over="log")) # E: numpy.errstate[ufunc_config.Write] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ufunclike.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ufunclike.py new file mode 100644 index 0000000000000000000000000000000000000000..e874cdbc593ce0fd321150e6b7d149ebd886ef58 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ufunclike.py @@ -0,0 +1,29 @@ +from typing import List, Any +import numpy as np + +AR_LIKE_b: List[bool] +AR_LIKE_u: List[np.uint32] +AR_LIKE_i: List[int] +AR_LIKE_f: List[float] +AR_LIKE_O: List[np.object_] + +AR_U: np.ndarray[Any, np.dtype[np.str_]] + +reveal_type(np.fix(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.fix(AR_LIKE_u)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.fix(AR_LIKE_i)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.fix(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] +reveal_type(np.fix(AR_LIKE_O)) # E: Any +reveal_type(np.fix(AR_LIKE_f, out=AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] + +reveal_type(np.isposinf(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isposinf(AR_LIKE_u)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isposinf(AR_LIKE_i)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isposinf(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isposinf(AR_LIKE_f, out=AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] + +reveal_type(np.isneginf(AR_LIKE_b)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isneginf(AR_LIKE_u)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isneginf(AR_LIKE_i)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isneginf(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[numpy.bool_]] +reveal_type(np.isneginf(AR_LIKE_f, out=AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]] diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ufuncs.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..04f34d56ebfb3c98e67efeefd44bb01eba10f1bb --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/ufuncs.py @@ -0,0 +1,68 @@ +import numpy as np +import numpy.typing as npt + +f8: np.float64 +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] + +reveal_type(np.absolute.__doc__) # E: str +reveal_type(np.absolute.types) # E: builtins.list[builtins.str] + +reveal_type(np.absolute.__name__) # E: Literal['absolute'] +reveal_type(np.absolute.ntypes) # E: Literal[20] +reveal_type(np.absolute.identity) # E: None +reveal_type(np.absolute.nin) # E: Literal[1] +reveal_type(np.absolute.nin) # E: Literal[1] +reveal_type(np.absolute.nout) # E: Literal[1] +reveal_type(np.absolute.nargs) # E: Literal[2] +reveal_type(np.absolute.signature) # E: None +reveal_type(np.absolute(f8)) # E: Any +reveal_type(np.absolute(AR_f8)) # E: numpy.ndarray +reveal_type(np.absolute.at(AR_f8, AR_i8)) # E: None + +reveal_type(np.add.__name__) # E: Literal['add'] +reveal_type(np.add.ntypes) # E: Literal[22] +reveal_type(np.add.identity) # E: Literal[0] +reveal_type(np.add.nin) # E: Literal[2] +reveal_type(np.add.nout) # E: Literal[1] +reveal_type(np.add.nargs) # E: Literal[3] +reveal_type(np.add.signature) # E: None +reveal_type(np.add(f8, f8)) # E: Any +reveal_type(np.add(AR_f8, f8)) # E: numpy.ndarray +reveal_type(np.add.at(AR_f8, AR_i8, f8)) # E: None +reveal_type(np.add.reduce(AR_f8, axis=0)) # E: Any +reveal_type(np.add.accumulate(AR_f8)) # E: numpy.ndarray +reveal_type(np.add.reduceat(AR_f8, AR_i8)) # E: numpy.ndarray +reveal_type(np.add.outer(f8, f8)) # E: Any +reveal_type(np.add.outer(AR_f8, f8)) # E: numpy.ndarray + +reveal_type(np.frexp.__name__) # E: Literal['frexp'] +reveal_type(np.frexp.ntypes) # E: Literal[4] +reveal_type(np.frexp.identity) # E: None +reveal_type(np.frexp.nin) # E: Literal[1] +reveal_type(np.frexp.nout) # E: Literal[2] +reveal_type(np.frexp.nargs) # E: Literal[3] +reveal_type(np.frexp.signature) # E: None +reveal_type(np.frexp(f8)) # E: Tuple[Any, Any] +reveal_type(np.frexp(AR_f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(np.divmod.__name__) # E: Literal['divmod'] +reveal_type(np.divmod.ntypes) # E: Literal[15] +reveal_type(np.divmod.identity) # E: None +reveal_type(np.divmod.nin) # E: Literal[2] +reveal_type(np.divmod.nout) # E: Literal[2] +reveal_type(np.divmod.nargs) # E: Literal[4] +reveal_type(np.divmod.signature) # E: None +reveal_type(np.divmod(f8, f8)) # E: Tuple[Any, Any] +reveal_type(np.divmod(AR_f8, f8)) # E: Tuple[numpy.ndarray[Any, numpy.dtype[Any]], numpy.ndarray[Any, numpy.dtype[Any]]] + +reveal_type(np.matmul.__name__) # E: Literal['matmul'] +reveal_type(np.matmul.ntypes) # E: Literal[19] +reveal_type(np.matmul.identity) # E: None +reveal_type(np.matmul.nin) # E: Literal[2] +reveal_type(np.matmul.nout) # E: Literal[1] +reveal_type(np.matmul.nargs) # E: Literal[3] +reveal_type(np.matmul.signature) # E: Literal['(n?,k),(k,m?)->(n?,m?)'] +reveal_type(np.matmul.identity) # E: None +reveal_type(np.matmul(AR_f8, AR_f8)) # E: Any +reveal_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)])) # E: Any diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.py b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..1a8bb3b91a1b564eeec43451ad3120715e0883aa --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.py @@ -0,0 +1,10 @@ +from typing import Type + +import numpy as np + +reveal_type(np.ModuleDeprecationWarning()) # E: numpy.ModuleDeprecationWarning +reveal_type(np.VisibleDeprecationWarning()) # E: numpy.VisibleDeprecationWarning +reveal_type(np.ComplexWarning()) # E: numpy.ComplexWarning +reveal_type(np.RankWarning()) # E: numpy.RankWarning +reveal_type(np.TooHardError()) # E: numpy.TooHardError +reveal_type(np.AxisError(1)) # E: numpy.AxisError diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/test_generic_alias.py b/MLPY/Lib/site-packages/numpy/typing/tests/test_generic_alias.py new file mode 100644 index 0000000000000000000000000000000000000000..26d9d047aa1a8896c248bf1e8e9dfa4bad1a65a8 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/test_generic_alias.py @@ -0,0 +1,122 @@ +from __future__ import annotations + +import sys +import types +import pickle +import weakref +from typing import TypeVar, Any, Callable, Tuple, Type, Union + +import pytest +import numpy as np +from numpy.typing._generic_alias import _GenericAlias + +ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True) +T1 = TypeVar("T1") +T2 = TypeVar("T2") +DType = _GenericAlias(np.dtype, (ScalarType,)) +NDArray = _GenericAlias(np.ndarray, (Any, DType)) + +if sys.version_info >= (3, 9): + DType_ref = types.GenericAlias(np.dtype, (ScalarType,)) + NDArray_ref = types.GenericAlias(np.ndarray, (Any, DType_ref)) + FuncType = Callable[[Union[_GenericAlias, types.GenericAlias]], Any] +else: + DType_ref = Any + NDArray_ref = Any + FuncType = Callable[[_GenericAlias], Any] + +GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS) + +BUFFER = np.array([1], dtype=np.int64) +BUFFER.setflags(write=False) + +def _get_subclass_mro(base: type) -> Tuple[type, ...]: + class Subclass(base): # type: ignore[misc,valid-type] + pass + return Subclass.__mro__[1:] + + +class TestGenericAlias: + """Tests for `numpy.typing._generic_alias._GenericAlias`.""" + + @pytest.mark.parametrize("name,func", [ + ("__init__", lambda n: n), + ("__origin__", lambda n: n.__origin__), + ("__args__", lambda n: n.__args__), + ("__parameters__", lambda n: n.__parameters__), + ("__reduce__", lambda n: n.__reduce__()[1:]), + ("__reduce_ex__", lambda n: n.__reduce_ex__(1)[1:]), + ("__mro_entries__", lambda n: n.__mro_entries__([object])), + ("__hash__", lambda n: hash(n)), + ("__repr__", lambda n: repr(n)), + ("__getitem__", lambda n: n[np.float64]), + ("__getitem__", lambda n: n[ScalarType][np.float64]), + ("__getitem__", lambda n: n[Union[np.int64, ScalarType]][np.float64]), + ("__getitem__", lambda n: n[Union[T1, T2]][np.float32, np.float64]), + ("__eq__", lambda n: n == n), + ("__ne__", lambda n: n != np.ndarray), + ("__dir__", lambda n: dir(n)), + ("__call__", lambda n: n((1,), np.int64, BUFFER)), + ("__call__", lambda n: n(shape=(1,), dtype=np.int64, buffer=BUFFER)), + ("subclassing", lambda n: _get_subclass_mro(n)), + ("pickle", lambda n: n == pickle.loads(pickle.dumps(n))), + ]) + def test_pass(self, name: str, func: FuncType) -> None: + """Compare `types.GenericAlias` with its numpy-based backport. + + Checker whether ``func`` runs as intended and that both `GenericAlias` + and `_GenericAlias` return the same result. + + """ + value = func(NDArray) + + if sys.version_info >= (3, 9): + value_ref = func(NDArray_ref) + assert value == value_ref + + def test_weakref(self) -> None: + """Test ``__weakref__``.""" + value = weakref.ref(NDArray)() + + if sys.version_info >= (3, 9, 1): # xref bpo-42332 + value_ref = weakref.ref(NDArray_ref)() + assert value == value_ref + + @pytest.mark.parametrize("name", GETATTR_NAMES) + def test_getattr(self, name: str) -> None: + """Test that `getattr` wraps around the underlying type, + aka ``__origin__``. + + """ + value = getattr(NDArray, name) + value_ref1 = getattr(np.ndarray, name) + + if sys.version_info >= (3, 9): + value_ref2 = getattr(NDArray_ref, name) + assert value == value_ref1 == value_ref2 + else: + assert value == value_ref1 + + @pytest.mark.parametrize("name,exc_type,func", [ + ("__getitem__", TypeError, lambda n: n[()]), + ("__getitem__", TypeError, lambda n: n[Any, Any]), + ("__getitem__", TypeError, lambda n: n[Any][Any]), + ("isinstance", TypeError, lambda n: isinstance(np.array(1), n)), + ("issublass", TypeError, lambda n: issubclass(np.ndarray, n)), + ("setattr", AttributeError, lambda n: setattr(n, "__origin__", int)), + ("setattr", AttributeError, lambda n: setattr(n, "test", int)), + ("getattr", AttributeError, lambda n: getattr(n, "test")), + ]) + def test_raise( + self, + name: str, + exc_type: Type[BaseException], + func: FuncType, + ) -> None: + """Test operations that are supposed to raise.""" + with pytest.raises(exc_type): + func(NDArray) + + if sys.version_info >= (3, 9): + with pytest.raises(exc_type): + func(NDArray_ref) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/test_isfile.py b/MLPY/Lib/site-packages/numpy/typing/tests/test_isfile.py new file mode 100644 index 0000000000000000000000000000000000000000..92b146c036135534b1f5cd3ac38eca1a04f1cb99 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/test_isfile.py @@ -0,0 +1,32 @@ +import os +from pathlib import Path + +import numpy as np +from numpy.testing import assert_ + +ROOT = Path(np.__file__).parents[0] +FILES = [ + ROOT / "py.typed", + ROOT / "__init__.pyi", + ROOT / "char.pyi", + ROOT / "ctypeslib.pyi", + ROOT / "rec.pyi", + ROOT / "core" / "__init__.pyi", + ROOT / "distutils" / "__init__.pyi", + ROOT / "f2py" / "__init__.pyi", + ROOT / "fft" / "__init__.pyi", + ROOT / "lib" / "__init__.pyi", + ROOT / "linalg" / "__init__.pyi", + ROOT / "ma" / "__init__.pyi", + ROOT / "matrixlib" / "__init__.pyi", + ROOT / "polynomial" / "__init__.pyi", + ROOT / "random" / "__init__.pyi", + ROOT / "testing" / "__init__.pyi", +] + + +class TestIsFile: + def test_isfile(self): + """Test if all ``.pyi`` files are properly installed.""" + for file in FILES: + assert_(os.path.isfile(file)) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/test_runtime.py b/MLPY/Lib/site-packages/numpy/typing/tests/test_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..fc86b059607873ffbde3b51699baa3d314e696df --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/test_runtime.py @@ -0,0 +1,90 @@ +"""Test the runtime usage of `numpy.typing`.""" + +from __future__ import annotations + +import sys +from typing import get_type_hints, Union, Tuple, NamedTuple + +import pytest +import numpy as np +import numpy.typing as npt + +try: + from typing_extensions import get_args, get_origin + SKIP = False +except ImportError: + SKIP = True + + +class TypeTup(NamedTuple): + typ: type + args: Tuple[type, ...] + origin: None | type + + +if sys.version_info >= (3, 9): + NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) +else: + NDArrayTup = TypeTup(npt.NDArray, (), None) + +TYPES = { + "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), + "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), + "NBitBase": TypeTup(npt.NBitBase, (), None), + "NDArray": NDArrayTup, +} + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +@pytest.mark.skipif(SKIP, reason="requires typing-extensions") +def test_get_args(name: type, tup: TypeTup) -> None: + """Test `typing.get_args`.""" + typ, ref = tup.typ, tup.args + out = get_args(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +@pytest.mark.skipif(SKIP, reason="requires typing-extensions") +def test_get_origin(name: type, tup: TypeTup) -> None: + """Test `typing.get_origin`.""" + typ, ref = tup.typ, tup.origin + out = get_origin(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints`.""" + typ = tup.typ + + # Explicitly set `__annotations__` in order to circumvent the + # stringification performed by `from __future__ import annotations` + def func(a): pass + func.__annotations__ = {"a": typ, "return": None} + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints_str(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints` with string-representation of types.""" + typ_str, typ = f"npt.{name}", tup.typ + + # Explicitly set `__annotations__` in order to circumvent the + # stringification performed by `from __future__ import annotations` + def func(a): pass + func.__annotations__ = {"a": typ_str, "return": None} + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +def test_keys() -> None: + """Test that ``TYPES.keys()`` and ``numpy.typing.__all__`` are synced.""" + keys = TYPES.keys() + ref = set(npt.__all__) + assert keys == ref diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/test_typing.py b/MLPY/Lib/site-packages/numpy/typing/tests/test_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..b1aee56d7ee483b70e98de00ca626f4468ecc0c5 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/test_typing.py @@ -0,0 +1,342 @@ +import importlib.util +import itertools +import os +import re +import shutil +from collections import defaultdict +from typing import Optional, IO, Dict, List + +import pytest +import numpy as np +from numpy.typing.mypy_plugin import _PRECISION_DICT, _EXTENDED_PRECISION_LIST + +try: + from mypy import api +except ImportError: + NO_MYPY = True +else: + NO_MYPY = False + + +DATA_DIR = os.path.join(os.path.dirname(__file__), "data") +PASS_DIR = os.path.join(DATA_DIR, "pass") +FAIL_DIR = os.path.join(DATA_DIR, "fail") +REVEAL_DIR = os.path.join(DATA_DIR, "reveal") +MISC_DIR = os.path.join(DATA_DIR, "misc") +MYPY_INI = os.path.join(DATA_DIR, "mypy.ini") +CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") + +#: A dictionary with file names as keys and lists of the mypy stdout as values. +#: To-be populated by `run_mypy`. +OUTPUT_MYPY: Dict[str, List[str]] = {} + + +def _key_func(key: str) -> str: + """Split at the first occurance of the ``:`` character. + + Windows drive-letters (*e.g.* ``C:``) are ignored herein. + """ + drive, tail = os.path.splitdrive(key) + return os.path.join(drive, tail.split(":", 1)[0]) + + +def _strip_filename(msg: str) -> str: + """Strip the filename from a mypy message.""" + _, tail = os.path.splitdrive(msg) + return tail.split(":", 1)[-1] + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.fixture(scope="module", autouse=True) +def run_mypy() -> None: + """Clears the cache and run mypy before running any of the typing tests. + + The mypy results are cached in `OUTPUT_MYPY` for further use. + + The cache refresh can be skipped using + + NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests + """ + if os.path.isdir(CACHE_DIR) and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)): + shutil.rmtree(CACHE_DIR) + + for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): + # Run mypy + stdout, stderr, exit_code = api.run([ + "--config-file", + MYPY_INI, + "--cache-dir", + CACHE_DIR, + directory, + ]) + if stderr: + pytest.fail(f"Unexpected mypy standard error\n\n{stderr}") + elif exit_code not in {0, 1}: + pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}") + stdout = stdout.replace('*', '') + + # Parse the output + iterator = itertools.groupby(stdout.split("\n"), key=_key_func) + OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k) + + +def get_test_cases(directory): + for root, _, files in os.walk(directory): + for fname in files: + if os.path.splitext(fname)[-1] == ".py": + fullpath = os.path.join(root, fname) + # Use relative path for nice py.test name + relpath = os.path.relpath(fullpath, start=directory) + + yield pytest.param( + fullpath, + # Manually specify a name for the test + id=relpath, + ) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) +def test_success(path): + # Alias `OUTPUT_MYPY` so that it appears in the local namespace + output_mypy = OUTPUT_MYPY + if path in output_mypy: + msg = "Unexpected mypy output\n\n" + msg += "\n".join(_strip_filename(v) for v in output_mypy[path]) + raise AssertionError(msg) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR)) +def test_fail(path): + __tracebackhide__ = True + + with open(path) as fin: + lines = fin.readlines() + + errors = defaultdict(lambda: "") + + output_mypy = OUTPUT_MYPY + assert path in output_mypy + for error_line in output_mypy[path]: + error_line = _strip_filename(error_line) + match = re.match( + r"(?P\d+): (error|note): .+$", + error_line, + ) + if match is None: + raise ValueError(f"Unexpected error line format: {error_line}") + lineno = int(match.group('lineno')) + errors[lineno] += f'{error_line}\n' + + for i, line in enumerate(lines): + lineno = i + 1 + if line.startswith('#') or (" E:" not in line and lineno not in errors): + continue + + target_line = lines[lineno - 1] + if "# E:" in target_line: + marker = target_line.split("# E:")[-1].strip() + expected_error = errors.get(lineno) + _test_fail(path, marker, expected_error, lineno) + else: + pytest.fail(f"Unexpected mypy output\n\n{errors[lineno]}") + + +_FAIL_MSG1 = """Extra error at line {} + +Extra error: {!r} +""" + +_FAIL_MSG2 = """Error mismatch at line {} + +Expected error: {!r} +Observed error: {!r} +""" + + +def _test_fail(path: str, error: str, expected_error: Optional[str], lineno: int) -> None: + if expected_error is None: + raise AssertionError(_FAIL_MSG1.format(lineno, error)) + elif error not in expected_error: + raise AssertionError(_FAIL_MSG2.format(lineno, expected_error, error)) + + +def _construct_format_dict(): + dct = {k.split(".")[-1]: v.replace("numpy", "numpy.typing") for + k, v in _PRECISION_DICT.items()} + + return { + "uint8": "numpy.unsignedinteger[numpy.typing._8Bit]", + "uint16": "numpy.unsignedinteger[numpy.typing._16Bit]", + "uint32": "numpy.unsignedinteger[numpy.typing._32Bit]", + "uint64": "numpy.unsignedinteger[numpy.typing._64Bit]", + "uint128": "numpy.unsignedinteger[numpy.typing._128Bit]", + "uint256": "numpy.unsignedinteger[numpy.typing._256Bit]", + "int8": "numpy.signedinteger[numpy.typing._8Bit]", + "int16": "numpy.signedinteger[numpy.typing._16Bit]", + "int32": "numpy.signedinteger[numpy.typing._32Bit]", + "int64": "numpy.signedinteger[numpy.typing._64Bit]", + "int128": "numpy.signedinteger[numpy.typing._128Bit]", + "int256": "numpy.signedinteger[numpy.typing._256Bit]", + "float16": "numpy.floating[numpy.typing._16Bit]", + "float32": "numpy.floating[numpy.typing._32Bit]", + "float64": "numpy.floating[numpy.typing._64Bit]", + "float80": "numpy.floating[numpy.typing._80Bit]", + "float96": "numpy.floating[numpy.typing._96Bit]", + "float128": "numpy.floating[numpy.typing._128Bit]", + "float256": "numpy.floating[numpy.typing._256Bit]", + "complex64": "numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit]", + "complex128": "numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit]", + "complex160": "numpy.complexfloating[numpy.typing._80Bit, numpy.typing._80Bit]", + "complex192": "numpy.complexfloating[numpy.typing._96Bit, numpy.typing._96Bit]", + "complex256": "numpy.complexfloating[numpy.typing._128Bit, numpy.typing._128Bit]", + "complex512": "numpy.complexfloating[numpy.typing._256Bit, numpy.typing._256Bit]", + + "ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]", + "ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]", + "uintc": f"numpy.unsignedinteger[{dct['_NBitIntC']}]", + "uintp": f"numpy.unsignedinteger[{dct['_NBitIntP']}]", + "uint": f"numpy.unsignedinteger[{dct['_NBitInt']}]", + "ulonglong": f"numpy.unsignedinteger[{dct['_NBitLongLong']}]", + "byte": f"numpy.signedinteger[{dct['_NBitByte']}]", + "short": f"numpy.signedinteger[{dct['_NBitShort']}]", + "intc": f"numpy.signedinteger[{dct['_NBitIntC']}]", + "intp": f"numpy.signedinteger[{dct['_NBitIntP']}]", + "int_": f"numpy.signedinteger[{dct['_NBitInt']}]", + "longlong": f"numpy.signedinteger[{dct['_NBitLongLong']}]", + + "half": f"numpy.floating[{dct['_NBitHalf']}]", + "single": f"numpy.floating[{dct['_NBitSingle']}]", + "double": f"numpy.floating[{dct['_NBitDouble']}]", + "longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]", + "csingle": f"numpy.complexfloating[{dct['_NBitSingle']}, {dct['_NBitSingle']}]", + "cdouble": f"numpy.complexfloating[{dct['_NBitDouble']}, {dct['_NBitDouble']}]", + "clongdouble": f"numpy.complexfloating[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]", + + # numpy.typing + "_NBitInt": dct['_NBitInt'], + } + + +#: A dictionary with all supported format keys (as keys) +#: and matching values +FORMAT_DICT: Dict[str, str] = _construct_format_dict() + + +def _parse_reveals(file: IO[str]) -> List[str]: + """Extract and parse all ``" # E: "`` comments from the passed file-like object. + + All format keys will be substituted for their respective value from `FORMAT_DICT`, + *e.g.* ``"{float64}"`` becomes ``"numpy.floating[numpy.typing._64Bit]"``. + """ + string = file.read().replace("*", "") + + # Grab all `# E:`-based comments + comments_array = np.char.partition(string.split("\n"), sep=" # E: ")[:, 2] + comments = "/n".join(comments_array) + + # Only search for the `{*}` pattern within comments, + # otherwise there is the risk of accidently grabbing dictionaries and sets + key_set = set(re.findall(r"\{(.*?)\}", comments)) + kwargs = { + k: FORMAT_DICT.get(k, f"") for k in key_set + } + fmt_str = comments.format(**kwargs) + + return fmt_str.split("/n") + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR)) +def test_reveal(path): + __tracebackhide__ = True + + with open(path) as fin: + lines = _parse_reveals(fin) + + output_mypy = OUTPUT_MYPY + assert path in output_mypy + for error_line in output_mypy[path]: + error_line = _strip_filename(error_line) + match = re.match( + r"(?P\d+): note: .+$", + error_line, + ) + if match is None: + raise ValueError(f"Unexpected reveal line format: {error_line}") + lineno = int(match.group('lineno')) - 1 + assert "Revealed type is" in error_line + + marker = lines[lineno] + _test_reveal(path, marker, error_line, 1 + lineno) + + +_REVEAL_MSG = """Reveal mismatch at line {} + +Expected reveal: {!r} +Observed reveal: {!r} +""" + + +def _test_reveal(path: str, reveal: str, expected_reveal: str, lineno: int) -> None: + if reveal not in expected_reveal: + raise AssertionError(_REVEAL_MSG.format(lineno, expected_reveal, reveal)) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) +def test_code_runs(path): + path_without_extension, _ = os.path.splitext(path) + dirname, filename = path.split(os.sep)[-2:] + spec = importlib.util.spec_from_file_location(f"{dirname}.{filename}", path) + test_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(test_module) + + +LINENO_MAPPING = { + 3: "uint128", + 4: "uint256", + 6: "int128", + 7: "int256", + 9: "float80", + 10: "float96", + 11: "float128", + 12: "float256", + 14: "complex160", + 15: "complex192", + 16: "complex256", + 17: "complex512", +} + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +def test_extended_precision() -> None: + path = os.path.join(MISC_DIR, "extended_precision.py") + output_mypy = OUTPUT_MYPY + assert path in output_mypy + + for _msg in output_mypy[path]: + *_, _lineno, msg_typ, msg = _msg.split(":") + + msg = _strip_filename(msg) + lineno = int(_lineno) + msg_typ = msg_typ.strip() + assert msg_typ in {"error", "note"} + + if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST: + if msg_typ == "error": + raise ValueError(f"Unexpected reveal line format: {lineno}") + else: + marker = FORMAT_DICT[LINENO_MAPPING[lineno]] + _test_reveal(path, marker, msg, lineno) + else: + if msg_typ == "error": + marker = "Module has no attribute" + _test_fail(path, marker, msg, lineno) diff --git a/MLPY/Lib/site-packages/numpy/typing/tests/test_typing_extensions.py b/MLPY/Lib/site-packages/numpy/typing/tests/test_typing_extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..25f9fe17dcacd89d5af134f68a458aab01aab44b --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/typing/tests/test_typing_extensions.py @@ -0,0 +1,35 @@ +"""Tests for the optional typing-extensions dependency.""" + +import sys +import textwrap +import subprocess + +CODE = textwrap.dedent(r""" + import sys + import importlib + + assert "typing_extensions" not in sys.modules + assert "numpy.typing" not in sys.modules + + # Importing `typing_extensions` will now raise an `ImportError` + sys.modules["typing_extensions"] = None + assert importlib.import_module("numpy.typing") +""") + + +def test_no_typing_extensions() -> None: + """Import `numpy.typing` in the absence of typing-extensions. + + Notes + ----- + Ideally, we'd just run the normal typing tests in an environment where + typing-extensions is not installed, but unfortunatelly this is currently + impossible as it is an indirect hard dependency of pytest. + + """ + p = subprocess.run([sys.executable, '-c', CODE], capture_output=True) + if p.returncode: + raise AssertionError( + f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" + ) + diff --git a/MLPY/Lib/site-packages/numpy/version.py b/MLPY/Lib/site-packages/numpy/version.py new file mode 100644 index 0000000000000000000000000000000000000000..0cb231f1845573a468b6e474ac5dde6acb67f4c0 --- /dev/null +++ b/MLPY/Lib/site-packages/numpy/version.py @@ -0,0 +1,12 @@ +from ._version import get_versions + +__ALL__ = ['version', 'full_version', 'git_revision', 'release'] + +vinfo = get_versions() +version: str = vinfo["version"] +full_version: str = vinfo['version'] +git_revision: str = vinfo['full-revisionid'] +release = 'dev0' not in version and '+' not in version +short_version: str = vinfo['version'].split("+")[0] + +del get_versions, vinfo diff --git a/MLPY/Lib/site-packages/onnx/__init__.py b/MLPY/Lib/site-packages/onnx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..05e2609477c06731fcbf7b86080685912dbdb1b2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/__init__.py @@ -0,0 +1,354 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +__all__ = [ + # Constants + "ONNX_ML", + "IR_VERSION", + "IR_VERSION_2017_10_10", + "IR_VERSION_2017_10_30", + "IR_VERSION_2017_11_3", + "IR_VERSION_2019_1_22", + "IR_VERSION_2019_3_18", + "IR_VERSION_2019_9_19", + "IR_VERSION_2020_5_8", + "IR_VERSION_2021_7_30", + "IR_VERSION_2023_5_5", + "EXPERIMENTAL", + "STABLE", + # Modules + "checker", + "compose", + "defs", + "gen_proto", + "helper", + "hub", + "mapping", + "numpy_helper", + "parser", + "printer", + "shape_inference", + "utils", + "version_converter", + # Proto classes + "AttributeProto", + "FunctionProto", + "GraphProto", + "MapProto", + "ModelProto", + "NodeProto", + "OperatorProto", + "OperatorSetIdProto", + "OperatorSetProto", + "OperatorStatus", + "OptionalProto", + "SequenceProto", + "SparseTensorProto", + "StringStringEntryProto", + "TensorAnnotation", + "TensorProto", + "TensorShapeProto", + "TrainingInfoProto", + "TypeProto", + "ValueInfoProto", + "Version", + # Utility functions + "convert_model_to_external_data", + "load_external_data_for_model", + "load_model_from_string", + "load_model", + "load_tensor_from_string", + "load_tensor", + "save_model", + "save_tensor", + "write_external_data_tensors", +] +# isort:skip_file + +import os +import typing +from typing import IO, Literal, Union + + +from onnx import serialization +from onnx.onnx_cpp2py_export import ONNX_ML +from onnx.external_data_helper import ( + load_external_data_for_model, + write_external_data_tensors, + convert_model_to_external_data, +) +from onnx.onnx_pb import ( + AttributeProto, + EXPERIMENTAL, + FunctionProto, + GraphProto, + IR_VERSION, + IR_VERSION_2017_10_10, + IR_VERSION_2017_10_30, + IR_VERSION_2017_11_3, + IR_VERSION_2019_1_22, + IR_VERSION_2019_3_18, + IR_VERSION_2019_9_19, + IR_VERSION_2020_5_8, + IR_VERSION_2021_7_30, + IR_VERSION_2023_5_5, + ModelProto, + NodeProto, + OperatorSetIdProto, + OperatorStatus, + STABLE, + SparseTensorProto, + StringStringEntryProto, + TensorAnnotation, + TensorProto, + TensorShapeProto, + TrainingInfoProto, + TypeProto, + ValueInfoProto, + Version, +) +from onnx.onnx_operators_pb import OperatorProto, OperatorSetProto +from onnx.onnx_data_pb import MapProto, OptionalProto, SequenceProto +from onnx.version import version as __version__ + +# Import common subpackages so they're available when you 'import onnx' +from onnx import ( + checker, + compose, + defs, + gen_proto, + helper, + hub, + mapping, + numpy_helper, + parser, + printer, + shape_inference, + utils, + version_converter, +) + +# Supported model formats that can be loaded from and saved to +# The literals are formats with built-in support. But we also allow users to +# register their own formats. So we allow str as well. +_SupportedFormat = Union[Literal["protobuf", "textproto"], str] +# Default serialization format +_DEFAULT_FORMAT = "protobuf" + + +def _load_bytes(f: IO[bytes] | str | os.PathLike) -> bytes: + if hasattr(f, "read") and callable(typing.cast(IO[bytes], f).read): + content = typing.cast(IO[bytes], f).read() + else: + f = typing.cast(Union[str, os.PathLike], f) + with open(f, "rb") as readable: + content = readable.read() + return content + + +def _save_bytes(content: bytes, f: IO[bytes] | str | os.PathLike) -> None: + if hasattr(f, "write") and callable(typing.cast(IO[bytes], f).write): + typing.cast(IO[bytes], f).write(content) + else: + f = typing.cast(Union[str, os.PathLike], f) + with open(f, "wb") as writable: + writable.write(content) + + +def _get_file_path(f: IO[bytes] | str | os.PathLike | None) -> str | None: + if isinstance(f, (str, os.PathLike)): + return os.path.abspath(f) + if hasattr(f, "name"): + assert f is not None + return os.path.abspath(f.name) + return None + + +def _get_serializer( + fmt: _SupportedFormat | None, f: str | os.PathLike | IO[bytes] | None = None +) -> serialization.ProtoSerializer: + """Get the serializer for the given path and format from the serialization registry.""" + # Use fmt if it is specified + if fmt is not None: + return serialization.registry.get(fmt) + + if (file_path := _get_file_path(f)) is not None: + _, ext = os.path.splitext(file_path) + fmt = serialization.registry.get_format_from_file_extension(ext) + + # Failed to resolve format if fmt is None. Use protobuf as default + fmt = fmt or _DEFAULT_FORMAT + assert fmt is not None + + return serialization.registry.get(fmt) + + +def load_model( + f: IO[bytes] | str | os.PathLike, + format: _SupportedFormat | None = None, # noqa: A002 + load_external_data: bool = True, +) -> ModelProto: + """Loads a serialized ModelProto into memory. + + Args: + f: can be a file-like object (has "read" function) or a string/PathLike containing a file name + format: The serialization format. When it is not specified, it is inferred + from the file extension when ``f`` is a path. If not specified _and_ + ``f`` is not a path, 'protobuf' is used. The encoding is assumed to + be "utf-8" when the format is a text format. + load_external_data: Whether to load the external data. + Set to True if the data is under the same directory of the model. + If not, users need to call :func:`load_external_data_for_model` + with directory to load external data from. + + Returns: + Loaded in-memory ModelProto. + """ + model = _get_serializer(format, f).deserialize_proto(_load_bytes(f), ModelProto()) + + if load_external_data: + model_filepath = _get_file_path(f) + if model_filepath: + base_dir = os.path.dirname(model_filepath) + load_external_data_for_model(model, base_dir) + + return model + + +def load_tensor( + f: IO[bytes] | str | os.PathLike, + format: _SupportedFormat | None = None, # noqa: A002 +) -> TensorProto: + """Loads a serialized TensorProto into memory. + + Args: + f: can be a file-like object (has "read" function) or a string/PathLike containing a file name + format: The serialization format. When it is not specified, it is inferred + from the file extension when ``f`` is a path. If not specified _and_ + ``f`` is not a path, 'protobuf' is used. The encoding is assumed to + be "utf-8" when the format is a text format. + + Returns: + Loaded in-memory TensorProto. + """ + return _get_serializer(format, f).deserialize_proto(_load_bytes(f), TensorProto()) + + +def load_model_from_string( + s: bytes | str, + format: _SupportedFormat = _DEFAULT_FORMAT, # noqa: A002 +) -> ModelProto: + """Loads a binary string (bytes) that contains serialized ModelProto. + + Args: + s: a string, which contains serialized ModelProto + format: The serialization format. When it is not specified, it is inferred + from the file extension when ``f`` is a path. If not specified _and_ + ``f`` is not a path, 'protobuf' is used. The encoding is assumed to + be "utf-8" when the format is a text format. + + Returns: + Loaded in-memory ModelProto. + """ + return _get_serializer(format).deserialize_proto(s, ModelProto()) + + +def load_tensor_from_string( + s: bytes, + format: _SupportedFormat = _DEFAULT_FORMAT, # noqa: A002 +) -> TensorProto: + """Loads a binary string (bytes) that contains serialized TensorProto. + + Args: + s: a string, which contains serialized TensorProto + format: The serialization format. When it is not specified, it is inferred + from the file extension when ``f`` is a path. If not specified _and_ + ``f`` is not a path, 'protobuf' is used. The encoding is assumed to + be "utf-8" when the format is a text format. + + Returns: + Loaded in-memory TensorProto. + """ + return _get_serializer(format).deserialize_proto(s, TensorProto()) + + +def save_model( + proto: ModelProto | bytes, + f: IO[bytes] | str | os.PathLike, + format: _SupportedFormat | None = None, # noqa: A002 + *, + save_as_external_data: bool = False, + all_tensors_to_one_file: bool = True, + location: str | None = None, + size_threshold: int = 1024, + convert_attribute: bool = False, +) -> None: + """Saves the ModelProto to the specified path and optionally, serialize tensors with raw data as external data before saving. + + Args: + proto: should be a in-memory ModelProto + f: can be a file-like object (has "write" function) or a string containing + a file name or a pathlike object + format: The serialization format. When it is not specified, it is inferred + from the file extension when ``f`` is a path. If not specified _and_ + ``f`` is not a path, 'protobuf' is used. The encoding is assumed to + be "utf-8" when the format is a text format. + save_as_external_data: If true, save tensors to external file(s). + all_tensors_to_one_file: Effective only if save_as_external_data is True. + If true, save all tensors to one external file specified by location. + If false, save each tensor to a file named with the tensor name. + location: Effective only if save_as_external_data is true. + Specify the external file that all tensors to save to. + Path is relative to the model path. + If not specified, will use the model name. + size_threshold: Effective only if save_as_external_data is True. + Threshold for size of data. Only when tensor's data is >= the size_threshold it will be converted + to external data. To convert every tensor with raw data to external data set size_threshold=0. + convert_attribute: Effective only if save_as_external_data is True. + If true, convert all tensors to external data + If false, convert only non-attribute tensors to external data + """ + if isinstance(proto, bytes): + proto = _get_serializer(_DEFAULT_FORMAT).deserialize_proto(proto, ModelProto()) + + if save_as_external_data: + convert_model_to_external_data( + proto, all_tensors_to_one_file, location, size_threshold, convert_attribute + ) + + model_filepath = _get_file_path(f) + if model_filepath is not None: + basepath = os.path.dirname(model_filepath) + proto = write_external_data_tensors(proto, basepath) + + serialized = _get_serializer(format, model_filepath).serialize_proto(proto) + _save_bytes(serialized, f) + + +def save_tensor( + proto: TensorProto, + f: IO[bytes] | str | os.PathLike, + format: _SupportedFormat | None = None, # noqa: A002 +) -> None: + """Saves the TensorProto to the specified path. + + Args: + proto: should be a in-memory TensorProto + f: can be a file-like object (has "write" function) or a string + containing a file name or a pathlike object. + format: The serialization format. When it is not specified, it is inferred + from the file extension when ``f`` is a path. If not specified _and_ + ``f`` is not a path, 'protobuf' is used. The encoding is assumed to + be "utf-8" when the format is a text format. + """ + serialized = _get_serializer(format, f).serialize_proto(proto) + _save_bytes(serialized, f) + + +# For backward compatibility +load = load_model +load_from_string = load_model_from_string +save = save_model diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..242dcbe109d13b87e37f9d7a285a2b6f7d80f399 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/checker.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/checker.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b213fd45f37de28011fc2396f52348899384613 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/checker.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/compose.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/compose.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed18f8b2d990e205f392101263d95f0d023b873d Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/compose.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/external_data_helper.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/external_data_helper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1eb596fb275d2bdd87a18011ab19ed269d7021ee Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/external_data_helper.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/gen_proto.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/gen_proto.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..658dfb84ede92107ef4868061fa5fefcc1a508bc Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/gen_proto.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/helper.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/helper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b7ec359a1bfe9edc7544945d653e432289089fe Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/helper.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/hub.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/hub.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af5412d753178086f9f045f5dda11f56db058a5c Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/hub.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/inliner.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/inliner.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a66d363821ca9ad9d37c75f70ca1f6bfe0c9d97 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/inliner.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/mapping.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/mapping.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2b9e7da43190e3435d59adda134cdeb20a9ccdd Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/mapping.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/model_container.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/model_container.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e90fe0c107e0ac20a775fe0d92846dcdb310eaa5 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/model_container.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/numpy_helper.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/numpy_helper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3710aadd8d4b2b4dfca2e79ad3f5f3a19749a055 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/numpy_helper.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/onnx_data_pb.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_data_pb.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43b5d61225fd521a4a77da58a3e34f60cd57c8b0 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_data_pb.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/onnx_data_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_data_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52e1aa51450f6e8481000d1401a047531999827a Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_data_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/onnx_ml_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_ml_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..140a99e988e3c6f5bc1095a261757b120798e03a Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_ml_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/onnx_operators_ml_pb2.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_operators_ml_pb2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fff2a97c0a379ed5285831c613fb37018782b378 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_operators_ml_pb2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/onnx_operators_pb.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_operators_pb.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bee4fb4553850b7ea93cae2afa03c54fd7293cc2 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_operators_pb.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/onnx_pb.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_pb.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cb29fa9b5ae47bf583a845e88097a429c139cd4 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/onnx_pb.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/parser.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/parser.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bca378deb2f2a8baea4bb3593627c7f36630b5b Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/parser.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/printer.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/printer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..480cdbbbfb8d41400e70e4997eaf0162f2b8b365 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/printer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/serialization.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/serialization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..babc1deb60454279d495681c0d3490f26cefea44 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/serialization.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/shape_inference.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/shape_inference.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0195fbeb1fed5586d5b216e239ad534a6ccb85b1 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/shape_inference.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/subbyte.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/subbyte.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c20dde37dff1a7e3c032f9c20efeab5dc3883c5d Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/subbyte.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec8282c1fa55a4ae2a1b7d86cca42757416fa22b Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/version.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b4599566a5a86716749792c27b120d938524e57 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/version.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/__pycache__/version_converter.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/__pycache__/version_converter.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2929217e7219eb9415feee8598cfff4267961c91 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/__pycache__/version_converter.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/__init__.py b/MLPY/Lib/site-packages/onnx/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..de8e3a385434d6c541b8b544342983b60f8eb0c3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/MLPY/Lib/site-packages/onnx/backend/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7b5addf1d30cac1d8dd344ae6b67e61db59b4f9 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/__pycache__/base.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/__pycache__/base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1f85c6a3814498038d861ee57577eacca683dc4 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/__pycache__/base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/base.py b/MLPY/Lib/site-packages/onnx/backend/base.py new file mode 100644 index 0000000000000000000000000000000000000000..0fed717404ddc7978d8ae16359c65c17ab7a5b92 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/base.py @@ -0,0 +1,137 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + + +from collections import namedtuple +from typing import Any, Dict, NewType, Optional, Sequence, Tuple, Type + +import numpy + +import onnx.checker +import onnx.onnx_cpp2py_export.checker as c_checker +from onnx import IR_VERSION, ModelProto, NodeProto + + +class DeviceType: + """Describes device type.""" + + _Type = NewType("_Type", int) + CPU: _Type = _Type(0) + CUDA: _Type = _Type(1) + + +class Device: + """Describes device type and device id + syntax: device_type:device_id(optional) + example: 'CPU', 'CUDA', 'CUDA:1' + """ + + def __init__(self, device: str) -> None: + options = device.split(":") + self.type = getattr(DeviceType, options[0]) + self.device_id = 0 + if len(options) > 1: + self.device_id = int(options[1]) + + +def namedtupledict( + typename: str, field_names: Sequence[str], *args: Any, **kwargs: Any +) -> Type[Tuple[Any, ...]]: + field_names_map = {n: i for i, n in enumerate(field_names)} + # Some output names are invalid python identifier, e.g. "0" + kwargs.setdefault("rename", True) + data = namedtuple(typename, field_names, *args, **kwargs) # type: ignore + + def getitem(self: Any, key: Any) -> Any: + if isinstance(key, str): + key = field_names_map[key] + return super(type(self), self).__getitem__(key) # type: ignore + + data.__getitem__ = getitem # type: ignore[assignment] + return data + + +class BackendRep: + """BackendRep is the handle that a Backend returns after preparing to execute + a model repeatedly. Users will then pass inputs to the run function of + BackendRep to retrieve the corresponding results. + """ + + def run(self, inputs: Any, **kwargs: Any) -> Tuple[Any, ...]: + """Abstract function.""" + return (None,) + + +class Backend: + """Backend is the entity that will take an ONNX model with inputs, + perform a computation, and then return the output. + + For one-off execution, users can use run_node and run_model to obtain results quickly. + + For repeated execution, users should use prepare, in which the Backend + does all of the preparation work for executing the model repeatedly + (e.g., loading initializers), and returns a BackendRep handle. + """ + + @classmethod + def is_compatible( + cls, model: ModelProto, device: str = "CPU", **kwargs: Any + ) -> bool: + # Return whether the model is compatible with the backend. + return True + + @classmethod + def prepare( + cls, model: ModelProto, device: str = "CPU", **kwargs: Any + ) -> Optional[BackendRep]: + # TODO Remove Optional from return type + onnx.checker.check_model(model) + return None + + @classmethod + def run_model( + cls, model: ModelProto, inputs: Any, device: str = "CPU", **kwargs: Any + ) -> Tuple[Any, ...]: + backend = cls.prepare(model, device, **kwargs) + assert backend is not None + return backend.run(inputs) + + @classmethod + def run_node( + cls, + node: NodeProto, + inputs: Any, + device: str = "CPU", + outputs_info: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]] = None, + **kwargs: Dict[str, Any], + ) -> Optional[Tuple[Any, ...]]: + """Simple run one operator and return the results. + + Args: + node: The node proto. + inputs: Inputs to the node. + device: The device to run on. + outputs_info: a list of tuples, which contains the element type and + shape of each output. First element of the tuple is the dtype, and + the second element is the shape. More use case can be found in + https://github.com/onnx/onnx/blob/main/onnx/backend/test/runner/__init__.py + kwargs: Other keyword arguments. + """ + # TODO Remove Optional from return type + if "opset_version" in kwargs: + special_context = c_checker.CheckerContext() + special_context.ir_version = IR_VERSION + special_context.opset_imports = {"": kwargs["opset_version"]} # type: ignore + onnx.checker.check_node(node, special_context) + else: + onnx.checker.check_node(node) + + return None + + @classmethod + def supports_device(cls, device: str) -> bool: + """Checks whether the backend is compiled with particular device support. + In particular it's used in the testing suite. + """ + return True diff --git a/MLPY/Lib/site-packages/onnx/backend/sample/__init__.py b/MLPY/Lib/site-packages/onnx/backend/sample/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1772a77dcab736dd48287b71f55345158068c13f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/sample/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 diff --git a/MLPY/Lib/site-packages/onnx/backend/sample/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/sample/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36a4b275642cc180e7ad4c496dace51a9d0d3aac Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/sample/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/sample/ops/__init__.py b/MLPY/Lib/site-packages/onnx/backend/sample/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c89d36b33941bcdd413e84243b5ccabaf78d1b7c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/sample/ops/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import importlib +import inspect +import pkgutil +import sys +from types import ModuleType +from typing import Dict + + +def collect_sample_implementations() -> Dict[str, str]: + dict_: Dict[str, str] = {} + _recursive_scan(sys.modules[__name__], dict_) + return dict_ + + +def _recursive_scan(package: ModuleType, dict_: Dict[str, str]) -> None: + pkg_dir = package.__path__ # type: ignore + module_location = package.__name__ + for _module_loader, name, ispkg in pkgutil.iter_modules(pkg_dir): # type: ignore + module_name = f"{module_location}.{name}" # Module/package + module = importlib.import_module(module_name) + dict_[name] = inspect.getsource(module) + if ispkg: + _recursive_scan(module, dict_) diff --git a/MLPY/Lib/site-packages/onnx/backend/sample/ops/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/sample/ops/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a239ec275744dacaa4c34e8eb1a558fb4570940 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/sample/ops/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/sample/ops/__pycache__/abs.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/sample/ops/__pycache__/abs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e57dc8e431cc67ea899633fda5f345500b16557f Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/sample/ops/__pycache__/abs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/sample/ops/abs.py b/MLPY/Lib/site-packages/onnx/backend/sample/ops/abs.py new file mode 100644 index 0000000000000000000000000000000000000000..1f03f6694ce3cd847911b6e805bba28a28a8c877 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/sample/ops/abs.py @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + + +def abs(input: np.ndarray) -> np.ndarray: # noqa: A001 + return np.abs(input) # type: ignore[no-any-return] diff --git a/MLPY/Lib/site-packages/onnx/backend/test/__init__.py b/MLPY/Lib/site-packages/onnx/backend/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f03ed7c0d43a08cb5f2ebd6472e525d63d383110 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +__all__ = ["BackendTest"] +# for backward compatibility +from onnx.backend.test.runner import Runner as BackendTest diff --git a/MLPY/Lib/site-packages/onnx/backend/test/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ea33d2d2fe595d5af4187609176ba806241174d Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/__pycache__/cmd_tools.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/__pycache__/cmd_tools.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b069834f32f9da7b0c812a5417dab125f1eab01 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/__pycache__/cmd_tools.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/__pycache__/stat_coverage.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/__pycache__/stat_coverage.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32546f5bc98ef886965edbe85a310c1e7ff67cb3 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/__pycache__/stat_coverage.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/__init__.py b/MLPY/Lib/site-packages/onnx/backend/test/case/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8df46538838e81bf61dcd847365c269879e775a7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import sys +from typing import Dict, List, Tuple + +from onnx.backend.test.case.base import Snippets +from onnx.backend.test.case.utils import import_recursive + + +def collect_snippets() -> Dict[str, List[Tuple[str, str]]]: + import_recursive(sys.modules[__name__]) + return Snippets diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd3283e189c165f646cbf5689a76d5702283151d Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/base.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d20d8d473beec7935e12c5f992d6ec7adc96bf9c Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/test_case.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/test_case.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48d448dec425fa4f9734e6fb4ca8bd6b32d2ccc4 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/test_case.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb63ef860d0beeb2dbebce7bc10e7df2e9d094d6 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/base.py b/MLPY/Lib/site-packages/onnx/backend/test/case/base.py new file mode 100644 index 0000000000000000000000000000000000000000..e7659bd3de7bda1279f76ae6294afcca3f7d0439 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/base.py @@ -0,0 +1,46 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import inspect +from collections import defaultdict +from textwrap import dedent +from typing import Any, ClassVar, Dict, List, Tuple, Type + +import numpy as np + + +def process_snippet(op_name: str, name: str, export: Any) -> Tuple[str, str]: + snippet_name = name[len("export_") :] or op_name.lower() + source_code = dedent(inspect.getsource(export)) + # remove the function signature line + lines = source_code.splitlines() + assert lines[0] == "@staticmethod" + assert lines[1].startswith("def export") + return snippet_name, dedent("\n".join(lines[2:])) + + +Snippets: Dict[str, List[Tuple[str, str]]] = defaultdict(list) + + +class _Exporter(type): + exports: ClassVar[Dict[str, List[Tuple[str, str]]]] = defaultdict(list) + + def __init__( + cls, name: str, bases: Tuple[Type[Any], ...], dct: Dict[str, Any] + ) -> None: + for k, v in dct.items(): + if k.startswith("export"): + if not isinstance(v, staticmethod): + raise ValueError("Only staticmethods could be named as export.*") + export = getattr(cls, k) + Snippets[name].append(process_snippet(name, k, export)) + # export functions should call expect and so populate + # TestCases + np.random.seed(seed=0) + export() + super().__init__(name, bases, dct) + + +class Base(metaclass=_Exporter): + pass diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/__init__.py b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..87b420b33b690ddd960e478d90c22e53bca4d77d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__init__.py @@ -0,0 +1,77 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import sys +from typing import List, Optional, Sequence + +import numpy as np + +from onnx import ModelProto +from onnx.backend.test.case.test_case import TestCase +from onnx.backend.test.case.utils import import_recursive + +_SimpleModelTestCases = [] + + +def expect( + model: ModelProto, + inputs: Sequence[np.ndarray], + outputs: Sequence[np.ndarray], + name: Optional[str] = None, +) -> None: + name = name or model.graph.name + _SimpleModelTestCases.append( + TestCase( + name=name, + model_name=model.graph.name, + url=None, + model_dir=None, + model=model, + data_sets=[(inputs, outputs)], + kind="simple", + rtol=1e-3, + atol=1e-7, + ) + ) + + +# BASE_URL = "https://download.onnxruntime.ai/onnx/models" +BASE_URL = "onnx/backend/test/data/light/light_%s.onnx" + + +def collect_testcases() -> List[TestCase]: + """Collect model test cases defined in python/numpy code.""" + real_model_testcases = [] + + model_tests = [ + ("test_bvlc_alexnet", "bvlc_alexnet", 1e-3, 1e-7), + ("test_densenet121", "densenet121", 2e-3, 1e-7), + ("test_inception_v1", "inception_v1", 1e-3, 1e-7), + ("test_inception_v2", "inception_v2", 1e-3, 1e-7), + ("test_resnet50", "resnet50", 1e-3, 1e-7), + ("test_shufflenet", "shufflenet", 1e-3, 1e-7), + ("test_squeezenet", "squeezenet", 1e-3, 1e-7), + ("test_vgg19", "vgg19", 1e-3, 1e-7), + ("test_zfnet512", "zfnet512", 1e-3, 1e-7), + ] + + for test_name, model_name, rtol, atol in model_tests: + url = BASE_URL % model_name + real_model_testcases.append( + TestCase( + name=test_name, + model_name=model_name, + url=url, + model_dir=None, + model=None, + data_sets=None, + kind="real", + rtol=rtol, + atol=atol, + ) + ) + + import_recursive(sys.modules[__name__]) + + return real_model_testcases + _SimpleModelTestCases diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3579d87c982a0a607a3cb8be257200396e6f6dbb Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/expand.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/expand.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc36852477a45a734ac7a2639805bfc80e1f427c Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/expand.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/gradient.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/gradient.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..291b1b99e12deb9331af13c6621265bdac331a80 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/gradient.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sequence.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sequence.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0779867622cb33313b9159eb524d34fe32e9939b Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sequence.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/shrink.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/shrink.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7262dcaf7b44944ef7f2929717fdb16aac8bffa Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/shrink.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sign.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sign.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f8c18590f04e9ea3c60e55b2ec7302ee7907cf6 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sign.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/single-relu.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/single-relu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bec9f0abd4fbb26a465ab22397278c0abd1d26ae Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/single-relu.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/stringnormalizer.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/stringnormalizer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80969beb8c7f9deb70a2baf14ec7a59b49584a31 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/model/__pycache__/stringnormalizer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/expand.py b/MLPY/Lib/site-packages/onnx/backend/test/case/model/expand.py new file mode 100644 index 0000000000000000000000000000000000000000..3fe3ae7cb3a4bee3995dbde8e414e2655562d1b0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/model/expand.py @@ -0,0 +1,88 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Sequence + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.model import expect + + +class ExpandDynamicShape(Base): + @staticmethod + def export() -> None: + def make_graph( + node: onnx.helper.NodeProto, + input_shape: Sequence[int], + shape_shape: Sequence[int], + output_shape: Sequence[int], + ) -> onnx.helper.GraphProto: + graph = onnx.helper.make_graph( + nodes=[node], + name="Expand", + inputs=[ + onnx.helper.make_tensor_value_info( + "X", onnx.TensorProto.FLOAT, input_shape + ), + onnx.helper.make_tensor_value_info( + "shape", onnx.TensorProto.INT64, shape_shape + ), + ], + outputs=[ + onnx.helper.make_tensor_value_info( + "Y", onnx.TensorProto.FLOAT, output_shape + ) + ], + ) + return graph + + node = onnx.helper.make_node("Expand", ["X", "shape"], ["Y"], name="test") + input_shape = [1, 3, 1] + x = np.ones(input_shape, dtype=np.float32) + + # 1st testcase + shape = np.array([3, 1], dtype=np.int64) + y = x * np.ones(shape, dtype=np.float32) + graph = make_graph(node, input_shape, shape.shape, y.shape) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 9)], + ) + expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model1") + + # 2nd testcase + shape = np.array([1, 3], dtype=np.int64) + y = x * np.ones(shape, dtype=np.float32) + graph = make_graph(node, input_shape, shape.shape, y.shape) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 9)], + ) + expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model2") + + # 3rd testcase + shape = np.array([3, 1, 3], dtype=np.int64) + y = x * np.ones(shape, dtype=np.float32) + graph = make_graph(node, input_shape, shape.shape, y.shape) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 9)], + ) + expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model3") + + # 4th testcase + shape = np.array([3, 3, 1, 3], dtype=np.int64) + y = x * np.ones(shape, dtype=np.float32) + graph = make_graph(node, input_shape, shape.shape, y.shape) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 9)], + ) + expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model4") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/gradient.py b/MLPY/Lib/site-packages/onnx/backend/test/case/model/gradient.py new file mode 100644 index 0000000000000000000000000000000000000000..b104c917ddd1591c7365b9a079f144293cf8eca5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/model/gradient.py @@ -0,0 +1,109 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.model import expect +from onnx.defs import AI_ONNX_PREVIEW_TRAINING_DOMAIN, ONNX_DOMAIN + + +class Gradient(Base): + @staticmethod + def export_gradient_scalar_add() -> None: + add_node = onnx.helper.make_node("Add", ["a", "b"], ["c"], name="my_add") + gradient_node = onnx.helper.make_node( + "Gradient", + ["a", "b"], + ["dc_da", "dc_db"], + name="my_gradient", + domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN, + xs=["a", "b"], + y="c", + ) + + a = np.array(1.0).astype(np.float32) + b = np.array(2.0).astype(np.float32) + c = a + b + # dc / da = d(a+b) / da = 1 + dc_da = np.array(1).astype(np.float32) + # db / db = d(a+b) / db = 1 + dc_db = np.array(1).astype(np.float32) + + graph = onnx.helper.make_graph( + nodes=[add_node, gradient_node], + name="GradientOfAdd", + inputs=[ + onnx.helper.make_tensor_value_info("a", onnx.TensorProto.FLOAT, []), + onnx.helper.make_tensor_value_info("b", onnx.TensorProto.FLOAT, []), + ], + outputs=[ + onnx.helper.make_tensor_value_info("c", onnx.TensorProto.FLOAT, []), + onnx.helper.make_tensor_value_info("dc_da", onnx.TensorProto.FLOAT, []), + onnx.helper.make_tensor_value_info("dc_db", onnx.TensorProto.FLOAT, []), + ], + ) + opsets = [ + onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12), + onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1), + ] + model = onnx.helper.make_model_gen_version( + graph, producer_name="backend-test", opset_imports=opsets + ) + expect( + model, inputs=[a, b], outputs=[c, dc_da, dc_db], name="test_gradient_of_add" + ) + + @staticmethod + def export_gradient_scalar_add_and_mul() -> None: + add_node = onnx.helper.make_node("Add", ["a", "b"], ["c"], name="my_add") + mul_node = onnx.helper.make_node("Mul", ["c", "a"], ["d"], name="my_mul") + gradient_node = onnx.helper.make_node( + "Gradient", + ["a", "b"], + ["dd_da", "dd_db"], + name="my_gradient", + domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN, + xs=["a", "b"], + y="d", + ) + + a = np.array(1.0).astype(np.float32) + b = np.array(2.0).astype(np.float32) + c = a + b + # d = a * c = a * (a + b) + d = a * c + # dd / da = d(a*a+a*b) / da = 2 * a + b + dd_da = (2 * a + b).astype(np.float32) + # dd / db = d(a*a+a*b) / db = a + dd_db = a + + graph = onnx.helper.make_graph( + nodes=[add_node, mul_node, gradient_node], + name="GradientOfTwoOperators", + inputs=[ + onnx.helper.make_tensor_value_info("a", onnx.TensorProto.FLOAT, []), + onnx.helper.make_tensor_value_info("b", onnx.TensorProto.FLOAT, []), + ], + outputs=[ + onnx.helper.make_tensor_value_info("d", onnx.TensorProto.FLOAT, []), + onnx.helper.make_tensor_value_info("dd_da", onnx.TensorProto.FLOAT, []), + onnx.helper.make_tensor_value_info("dd_db", onnx.TensorProto.FLOAT, []), + ], + ) + + opsets = [ + onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12), + onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1), + ] + model = onnx.helper.make_model_gen_version( + graph, producer_name="backend-test", opset_imports=opsets + ) + expect( + model, + inputs=[a, b], + outputs=[d, dd_da, dd_db], + name="test_gradient_of_add_and_mul", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/sequence.py b/MLPY/Lib/site-packages/onnx/backend/test/case/model/sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..07ba9a958911bcea370953f537fdf87af9a3f717 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/model/sequence.py @@ -0,0 +1,455 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +import typing + +import numpy as np + +import onnx +from onnx import TensorProto +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.model import expect + + +def SequenceEmptyImpl() -> list[np.ndarray | None]: + return [] + + +def SequenceConstructImpl(*tensors: np.ndarray) -> list[np.ndarray]: + return list(tensors) + + +def SequenceInsertImpl( + sequence: list[np.ndarray], tensor: np.ndarray, position: int | None = None +) -> list[np.ndarray]: + if position is None: + position = len(sequence) + sequence.insert(position, tensor) + return sequence + + +def SequenceAtImpl(sequence: list[np.ndarray], position: int) -> np.ndarray: + return sequence[position] + + +def SequenceEraseImpl( + sequence: list[np.ndarray], position: int | None = None +) -> list[np.ndarray | None]: + if position is None: + position = -1 + del sequence[position] + return sequence + + +def SequenceLengthImpl(sequence: list[np.ndarray]) -> np.int64: + return np.int64(len(sequence)) + + +def SplitToSequenceImpl( + tensor: np.ndarray, + split: int | list[int] | None = None, + axis: int = 0, + keepdims: int = 1, +) -> list[np.ndarray]: + dim_size = tensor.shape[axis] + if split is None: + split = 1 + split_indices = [ + i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size + ] + if not keepdims: + results = np.array_split(tensor, split_indices, axis) + return [np.squeeze(res, axis) for res in results] + if np.isscalar(split): + split_indices = [i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size] # type: ignore + else: + split_indices = np.cumsum(split) + 1 + return np.array_split(tensor, split_indices, axis) # type: ignore + + +def ConcatFromSequenceImpl( + sequence: list[np.ndarray], axis: int, new_axis: int | None = 0 +) -> np.ndarray: + if not new_axis: + return np.concatenate(sequence, axis) + return np.stack(sequence, axis) + + +class Sequence(Base): + @staticmethod + def export() -> None: + def make_graph( + nodes: list[onnx.helper.NodeProto], + input_shapes: list[typing.Sequence[str | int] | None], + output_shapes: list[typing.Sequence[str | int] | None], + input_names: list[str], + output_names: list[str], + input_types: list[TensorProto.DataType], + output_types: list[TensorProto.DataType], + initializers: list[TensorProto] | None = None, + ) -> onnx.helper.GraphProto: + graph = onnx.helper.make_graph( + nodes=nodes, + name="Sequence", + inputs=[ + onnx.helper.make_tensor_value_info(name, input_type, input_shape) + for name, input_type, input_shape in zip( + input_names, input_types, input_shapes + ) + ], + outputs=[ + onnx.helper.make_tensor_value_info(name, output_type, output_shape) + for name, output_type, output_shape in zip( + output_names, output_types, output_shapes + ) + ], + initializer=initializers, + ) + return graph + + # 1st testcase - insert and at. + # 1. SequenceEmpty: -> [] + # 2. SequenceInsert(x): -> [x] + # 3. SequenceInsert(y): -> [x, y] + # 4. SequenceInsert(z, 1): -> [x, z, y] + # 5. SequenceAt(2): -> y + seq_empty_node = onnx.helper.make_node("SequenceEmpty", [], ["Seq_empty"]) + seq_insert_node = onnx.helper.make_node( + "SequenceInsert", ["Seq_empty", "X"], ["Seq_1"] + ) + seq_insert_node2 = onnx.helper.make_node( + "SequenceInsert", ["Seq_1", "Y"], ["Seq_2"] + ) + seq_insert_node3 = onnx.helper.make_node( + "SequenceInsert", ["Seq_2", "Z", "pos"], ["Seq_3"] + ) + seq_at_node = onnx.helper.make_node("SequenceAt", ["Seq_3", "pos_at"], ["out"]) + + x_shape = [2, 3, 4] + y_shape = [1, 3, 4] + z_shape = [3, 3, 4] + out_shape = [None, 3, 4] + + x = np.ones(x_shape, dtype=np.float32) + y = np.zeros(y_shape, dtype=np.float32) + z = np.ones(z_shape, dtype=np.float32) * 2 + pos_val = 1 + pos_at_val = 2 + + out = SequenceEmptyImpl() + out = SequenceInsertImpl(out, x) + out = SequenceInsertImpl(out, y) + out = SequenceInsertImpl(out, z, pos_val) + out = SequenceAtImpl(out, pos_at_val) + assert np.array_equal(out, y) + + pos = onnx.helper.make_tensor("pos", TensorProto.INT64, (), (pos_val,)) + pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,)) + + graph = make_graph( + [ + seq_empty_node, + seq_insert_node, + seq_insert_node2, + seq_insert_node3, + seq_at_node, + ], + [x_shape, y_shape, z_shape, [], []], # type: ignore + [out_shape], # type: ignore + ["X", "Y", "Z", "pos", "pos_at"], + ["out"], + [onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2, # type: ignore + [onnx.TensorProto.FLOAT], + [pos, pos_at], + ) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 12)], + ) + expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model1") + + # 2nd testcase - erase and at. + # 1. SequenceConstruct(x, y, z): -> [x, y, z] + # 2. SequenceErase(1): -> [x, z] + # 3. SequenceAt(1): -> z + seq_construct_node = onnx.helper.make_node( + "SequenceConstruct", ["X", "Y", "Z"], ["seq_1"] + ) + seq_erase_node = onnx.helper.make_node( + "SequenceErase", ["seq_1", "pos_erase"], ["seq_2"] + ) + seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_2", "pos_at"], ["out"]) + + tensor_shape = [2, 3, 4] + + x = np.ones(tensor_shape, dtype=np.float32) + y = np.zeros(tensor_shape, dtype=np.float32) + z = np.ones(tensor_shape, dtype=np.float32) * 2 + pos_erase_val = 1 + pos_at_val = 1 + + out = SequenceConstructImpl(x, y, z) + out = SequenceEraseImpl(out, pos_erase_val) + out = SequenceAtImpl(out, pos_at_val) + assert np.array_equal(out, z) + + pos_erase = onnx.helper.make_tensor( + "pos_erase", TensorProto.INT64, (), (pos_erase_val,) + ) + pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,)) + + graph = make_graph( + [seq_construct_node, seq_erase_node, seq_at_node], + [tensor_shape, tensor_shape, tensor_shape, [], []], # type: ignore + [tensor_shape], # type: ignore + ["X", "Y", "Z", "pos_erase", "pos_at"], + ["out"], + [onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2, # type: ignore + [onnx.TensorProto.FLOAT], + [pos_erase, pos_at], + ) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 12)], + ) + expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model2") + + # 3rd testcase - erase, insert and at, with negative index value. + # 1. SequenceConstruct(x, y, z): -> [x, y, z] + # 2. SequenceErase(-3): -> [y, z] + # 3. SequenceInsert(x, -1): -> [y, x, z] + # 4. SequenceAt(-1): -> z + seq_construct_node = onnx.helper.make_node( + "SequenceConstruct", ["X", "Y", "Z"], ["seq_1"] + ) + seq_erase_node = onnx.helper.make_node( + "SequenceErase", ["seq_1", "pos_erase"], ["seq_2"] + ) + seq_insert_node = onnx.helper.make_node( + "SequenceInsert", ["seq_2", "X", "pos_insert"], ["seq_3"] + ) + seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_3", "pos_at"], ["out"]) + + tensor_shape = [2, 3, 4] + + x = np.ones(tensor_shape, dtype=np.float32) + y = np.zeros(tensor_shape, dtype=np.float32) + z = np.ones(tensor_shape, dtype=np.float32) * 2 + pos_erase_val = -3 + pos_insert_val = -1 + pos_at_val = -1 + out = SequenceConstructImpl(x, y, z) + out = SequenceEraseImpl(out, pos_erase_val) + out = SequenceInsertImpl(out, x, pos_insert_val) + out = SequenceAtImpl(out, pos_at_val) + assert np.array_equal(out, z) + + pos_erase = onnx.helper.make_tensor( + "pos_erase", TensorProto.INT64, (), (pos_erase_val,) + ) + pos_insert = onnx.helper.make_tensor( + "pos_insert", TensorProto.INT64, (), (pos_insert_val,) + ) + pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,)) + + graph = make_graph( + [seq_construct_node, seq_erase_node, seq_insert_node, seq_at_node], + [tensor_shape, tensor_shape, tensor_shape, [], [], []], # type: ignore + [tensor_shape], # type: ignore + ["X", "Y", "Z", "pos_erase", "pos_insert", "pos_at"], + ["out"], + [onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 3, # type: ignore + [onnx.TensorProto.FLOAT], + [pos_erase, pos_insert, pos_at], + ) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 12)], + ) + expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model3") + + # 4th testcase - concat + seq_construct_node = onnx.helper.make_node( + "SequenceConstruct", ["X", "Y", "Z"], ["seq_1"] + ) + seq_concat_node = onnx.helper.make_node( + "ConcatFromSequence", ["seq_1"], ["out"], axis=1 + ) + + tensor_shape = [2, 3, 4] + concat_out_shape = [2, None, 4] + + x = np.ones(tensor_shape, dtype=np.float32) + y = np.zeros(tensor_shape, dtype=np.float32) + z = np.ones(tensor_shape, dtype=np.float32) * 2 + out = SequenceConstructImpl(x, y, z) + concat_out = ConcatFromSequenceImpl(out, 1) + + graph = make_graph( + [seq_construct_node, seq_concat_node], + [tensor_shape] * 3, # type: ignore + [concat_out_shape], # type: ignore + ["X", "Y", "Z"], + ["out"], + [onnx.TensorProto.FLOAT] * 3, # type: ignore + [onnx.TensorProto.FLOAT], + ) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 12)], + ) + expect( + model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model4" + ) + + # 5th testcase - concat with new_axis = 1 + seq_construct_node = onnx.helper.make_node( + "SequenceConstruct", ["X", "Y", "Z"], ["seq_1"] + ) + seq_concat_node = onnx.helper.make_node( + "ConcatFromSequence", ["seq_1"], ["out"], axis=-1, new_axis=1 + ) + + tensor_shape = [2, 3, 4] + concat_out_shape = [2, 3, 4, 3] + + x = np.ones(tensor_shape, dtype=np.float32) + y = np.zeros(tensor_shape, dtype=np.float32) + z = np.ones(tensor_shape, dtype=np.float32) * 2 + out = SequenceConstructImpl(x, y, z) + concat_out = ConcatFromSequenceImpl(out, -1, 1) + + graph = make_graph( + [seq_construct_node, seq_concat_node], + [tensor_shape] * 3, # type: ignore + [concat_out_shape], # type: ignore + ["X", "Y", "Z"], + ["out"], + [onnx.TensorProto.FLOAT] * 3, # type: ignore + [onnx.TensorProto.FLOAT], + ) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 12)], + ) + expect( + model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model5" + ) + + # 6th testcase - split and len + seq_split_node = onnx.helper.make_node( + "SplitToSequence", ["X"], ["seq_1"], axis=-1 + ) + seq_len_node = onnx.helper.make_node("SequenceLength", ["seq_1"], ["len"]) + + tensor_shape = [2, 3, 4] + len_shape = [] # type: ignore + + x = np.ones(tensor_shape, dtype=np.float32) + out = SplitToSequenceImpl(x, axis=-1) + out = SequenceLengthImpl(out) + assert np.array_equal(out, np.int64(4)) + + graph = onnx.helper.make_graph( + nodes=[seq_split_node, seq_len_node], + name="Sequence", + inputs=[ + onnx.helper.make_tensor_value_info( + "X", onnx.TensorProto.FLOAT, tensor_shape + ) + ], + outputs=[ + onnx.helper.make_tensor_value_info( + "len", onnx.TensorProto.INT64, len_shape + ) + ], + ) # type: ignore + + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 12)], + ) + expect(model, inputs=[x], outputs=[out], name="test_sequence_model6") + + # 7th testcase - split with keepdims=0, and SequenceAt + seq_split_node = onnx.helper.make_node( + "SplitToSequence", ["X"], ["seq_1"], axis=0, keepdims=0 + ) + seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_1", "pos_at"], ["out"]) + + tensor_shape = [2, 3, 4] + out_shape = [3, 4] + + x = np.random.rand(*tensor_shape) + pos_at_val = 1 + out = SplitToSequenceImpl(x, axis=0, keepdims=0) + out = SequenceAtImpl(out, pos_at_val) + assert np.array_equal(out, x[pos_at_val]) + + pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,)) + + graph = make_graph( + [seq_split_node, seq_at_node], + [tensor_shape, []], # type: ignore + [out_shape], # type: ignore + ["X", "pos_at"], + ["out"], + [onnx.TensorProto.DOUBLE, onnx.TensorProto.INT64], + [onnx.TensorProto.DOUBLE], + [pos_at], + ) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 12)], + ) + expect(model, inputs=[x], outputs=[out], name="test_sequence_model7") + + # 8th testcase - split zero length + seq_split_node = onnx.helper.make_node( + "SplitToSequence", ["X", "Splits"], ["seq_1"] + ) + seq_len_node = onnx.helper.make_node("SequenceLength", ["seq_1"], ["len"]) + + tensor_shape = ["n"] # type: ignore + splits_shape = [3] # type: ignore + + x = np.array([]).astype(np.float32) + splits = np.array([0, 0, 0]).astype(np.int64) + out_len = np.int64(3) + + graph = onnx.helper.make_graph( + nodes=[seq_split_node, seq_len_node], + name="Sequence", + inputs=[ + onnx.helper.make_tensor_value_info( + "X", onnx.TensorProto.FLOAT, tensor_shape + ), # type: ignore + onnx.helper.make_tensor_value_info( + "Splits", onnx.TensorProto.INT64, splits_shape + ), + ], # type: ignore + outputs=[ + onnx.helper.make_tensor_value_info( + "len", onnx.TensorProto.INT64, len_shape + ) + ], + ) # type: ignore + + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 12)], + ) + expect( + model, inputs=[x, splits], outputs=[out_len], name="test_sequence_model8" + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/shrink.py b/MLPY/Lib/site-packages/onnx/backend/test/case/model/shrink.py new file mode 100644 index 0000000000000000000000000000000000000000..2b8016c62e0ce568c921623c40de4e8c3621542f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/model/shrink.py @@ -0,0 +1,41 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.model import expect + + +class ShrinkTest(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Shrink", + ["x"], + ["y"], + lambd=1.5, + bias=1.5, + ) + graph = onnx.helper.make_graph( + nodes=[node], + name="Shrink", + inputs=[ + onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [5]) + ], + outputs=[ + onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [5]) + ], + ) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 10)], + ) + + x = np.array([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32) + y = np.array([-0.5, 0.0, 0.0, 0.0, 0.5], dtype=np.float32) + + expect(model, inputs=[x], outputs=[y], name="test_shrink") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/sign.py b/MLPY/Lib/site-packages/onnx/backend/test/case/model/sign.py new file mode 100644 index 0000000000000000000000000000000000000000..fcdb25df23a733a41b48812dc574d1dac325c44e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/model/sign.py @@ -0,0 +1,35 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.model import expect + + +class SingleSign(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node("Sign", ["x"], ["y"], name="test") + + x = np.array([-1.0, 4.5, -4.5, 3.1, 0.0, 2.4, -5.5]).astype(np.float32) + y = np.array([-1.0, 1.0, -1.0, 1.0, 0.0, 1.0, -1.0]).astype(np.float32) + + graph = onnx.helper.make_graph( + nodes=[node], + name="SingleSign", + inputs=[ + onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [7]) + ], + outputs=[ + onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [7]) + ], + ) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 9)], + ) + expect(model, inputs=[x], outputs=[y], name="test_sign_model") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/single-relu.py b/MLPY/Lib/site-packages/onnx/backend/test/case/model/single-relu.py new file mode 100644 index 0000000000000000000000000000000000000000..46dde75342d10cdfe9baa34780ddcaf9f0351d23 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/model/single-relu.py @@ -0,0 +1,35 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.model import expect + + +class SingleRelu(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node("Relu", ["x"], ["y"], name="test") + graph = onnx.helper.make_graph( + nodes=[node], + name="SingleRelu", + inputs=[ + onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [1, 2]) + ], + outputs=[ + onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1, 2]) + ], + ) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 9)], + ) + + x = np.random.randn(1, 2).astype(np.float32) + y = np.maximum(x, 0) + + expect(model, inputs=[x], outputs=[y], name="test_single_relu_model") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/model/stringnormalizer.py b/MLPY/Lib/site-packages/onnx/backend/test/case/model/stringnormalizer.py new file mode 100644 index 0000000000000000000000000000000000000000..b58656e128e7f6f0062ee745f181f2a3a314eb7c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/model/stringnormalizer.py @@ -0,0 +1,203 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + + +from typing import Sequence + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.model import expect + + +class NormalizeStrings(Base): + @staticmethod + def export() -> None: + def make_graph( + node: onnx.helper.NodeProto, + input_shape: Sequence[int], + output_shape: Sequence[int], + ) -> onnx.helper.GraphProto: + graph = onnx.helper.make_graph( + nodes=[node], + name="StringNormalizer", + inputs=[ + onnx.helper.make_tensor_value_info( + "x", onnx.TensorProto.STRING, input_shape + ) + ], + outputs=[ + onnx.helper.make_tensor_value_info( + "y", onnx.TensorProto.STRING, output_shape + ) + ], + ) + return graph + + # 1st model_monday_casesensintive_nochangecase + stopwords = ["monday"] + node = onnx.helper.make_node( + "StringNormalizer", + inputs=["x"], + outputs=["y"], + is_case_sensitive=1, + stopwords=stopwords, + ) + + x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object) + y = np.array(["tuesday", "wednesday", "thursday"]).astype(object) + + graph = make_graph(node, [4], [3]) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 10)], + ) + expect( + model, + inputs=[x], + outputs=[y], + name="test_strnorm_model_monday_casesensintive_nochangecase", + ) + + # 2nd model_nostopwords_nochangecase + node = onnx.helper.make_node( + "StringNormalizer", inputs=["x"], outputs=["y"], is_case_sensitive=1 + ) + + x = np.array(["monday", "tuesday"]).astype(object) + y = x + + graph = make_graph(node, [2], [2]) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 10)], + ) + expect( + model, + inputs=[x], + outputs=[y], + name="test_strnorm_model_nostopwords_nochangecase", + ) + + # 3rd model_monday_casesensintive_lower + stopwords = ["monday"] + node = onnx.helper.make_node( + "StringNormalizer", + inputs=["x"], + outputs=["y"], + case_change_action="LOWER", + is_case_sensitive=1, + stopwords=stopwords, + ) + + x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object) + y = np.array(["tuesday", "wednesday", "thursday"]).astype(object) + + graph = make_graph(node, [4], [3]) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 10)], + ) + expect( + model, + inputs=[x], + outputs=[y], + name="test_strnorm_model_monday_casesensintive_lower", + ) + + # 4 model_monday_casesensintive_upper + stopwords = ["monday"] + node = onnx.helper.make_node( + "StringNormalizer", + inputs=["x"], + outputs=["y"], + case_change_action="UPPER", + is_case_sensitive=1, + stopwords=stopwords, + ) + + x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object) + y = np.array(["TUESDAY", "WEDNESDAY", "THURSDAY"]).astype(object) + + graph = make_graph(node, [4], [3]) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 10)], + ) + expect( + model, + inputs=[x], + outputs=[y], + name="test_strnorm_model_monday_casesensintive_upper", + ) + + # 5 monday_insensintive_upper_twodim + stopwords = ["monday"] + node = onnx.helper.make_node( + "StringNormalizer", + inputs=["x"], + outputs=["y"], + case_change_action="UPPER", + stopwords=stopwords, + ) + + input_shape = [1, 6] + output_shape = [1, 4] + x = ( + np.array( + ["Monday", "tuesday", "wednesday", "Monday", "tuesday", "wednesday"] + ) + .astype(object) + .reshape(input_shape) + ) + y = ( + np.array(["TUESDAY", "WEDNESDAY", "TUESDAY", "WEDNESDAY"]) + .astype(object) + .reshape(output_shape) + ) + + graph = make_graph(node, input_shape, output_shape) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 10)], + ) + expect( + model, + inputs=[x], + outputs=[y], + name="test_strnorm_model_monday_insensintive_upper_twodim", + ) + + # 6 monday_empty_output + stopwords = ["monday"] + node = onnx.helper.make_node( + "StringNormalizer", + inputs=["x"], + outputs=["y"], + case_change_action="UPPER", + is_case_sensitive=0, + stopwords=stopwords, + ) + + x = np.array(["monday", "monday"]).astype(object) + y = np.array([""]).astype(object) + + graph = make_graph(node, [2], [1]) + model = onnx.helper.make_model_gen_version( + graph, + producer_name="backend-test", + opset_imports=[onnx.helper.make_opsetid("", 10)], + ) + expect( + model, + inputs=[x], + outputs=[y], + name="test_strnorm_model_monday_empty_output", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/abs.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/abs.py new file mode 100644 index 0000000000000000000000000000000000000000..3b5f80877862ec34b7206adf017af721c27d0467 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/abs.py @@ -0,0 +1,24 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.sample.ops.abs import abs +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Abs(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Abs", + inputs=["x"], + outputs=["y"], + ) + x = np.random.randn(3, 4, 5).astype(np.float32) + y = abs(x) + + expect(node, inputs=[x], outputs=[y], name="test_abs") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/acos.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/acos.py new file mode 100644 index 0000000000000000000000000000000000000000..31232c592be6e1a4eb76a25f4da685ca967ce354 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/acos.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Acos(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Acos", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-0.5, 0, 0.5]).astype(np.float32) + y = np.arccos(x) + expect(node, inputs=[x], outputs=[y], name="test_acos_example") + + x = np.random.rand(3, 4, 5).astype(np.float32) + y = np.arccos(x) + expect(node, inputs=[x], outputs=[y], name="test_acos") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/acosh.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/acosh.py new file mode 100644 index 0000000000000000000000000000000000000000..b0c0e2aa209d47e96541c93f71c1446b84fbb278 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/acosh.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Acosh(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Acosh", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([10, np.e, 1]).astype(np.float32) + y = np.arccosh(x) # expected output [2.99322295, 1.65745449, 0.] + expect(node, inputs=[x], outputs=[y], name="test_acosh_example") + + x = np.random.uniform(1.0, 10.0, (3, 4, 5)).astype(np.float32) + y = np.arccosh(x) + expect(node, inputs=[x], outputs=[y], name="test_acosh") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/adagrad.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..05a203e3e7c0c4447858f2b4330096c256b45097 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/adagrad.py @@ -0,0 +1,115 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.defs import AI_ONNX_PREVIEW_TRAINING_DOMAIN + + +def apply_adagrad(r, t, x, g, h, norm_coefficient, epsilon, decay_factor): # type: ignore + # Compute adjusted learning-rate. + r_ = r / (1 + t * decay_factor) + # Add gradient of regularization term. + g_regularized = norm_coefficient * x + g + # Update squared accumulated gradient. + h_new = h + g_regularized * g_regularized + # Compute ADAGRAD's gradient scaling factors + h_sqrt = np.sqrt(h_new) + epsilon + # Apply ADAGRAD update rule. + x_new = x - r_ * g_regularized / h_sqrt + return (x_new, h_new) + + +class Adagrad(Base): + @staticmethod + def export_adagrad() -> None: + # Define operator attributes. + norm_coefficient = 0.001 + epsilon = 1e-5 + decay_factor = 0.1 + + # Create operator. + node = onnx.helper.make_node( + "Adagrad", + inputs=["R", "T", "X", "G", "H"], + outputs=["X_new", "H_new"], + norm_coefficient=norm_coefficient, + epsilon=epsilon, + decay_factor=decay_factor, + domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN, + ) + + # Define operator inputs. + r = np.array(0.1, dtype=np.float32) # scalar + t = np.array(0, dtype=np.int64) # scalar + x = np.array([1.0], dtype=np.float32) + g = np.array([-1.0], dtype=np.float32) + h = np.array([2.0], dtype=np.float32) + + # Compute expected outputs of Adagrad. + x_new, h_new = apply_adagrad( + r, t, x, g, h, norm_coefficient, epsilon, decay_factor + ) + + # Check results. + expect( + node, + inputs=[r, t, x, g, h], + outputs=[x_new, h_new], + name="test_adagrad", + opset_imports=[ + onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1) + ], + ) + + @staticmethod + def export_adagrad_multiple() -> None: + # Define operator attributes. + norm_coefficient = 0.001 + epsilon = 1e-5 + decay_factor = 0.1 + + node = onnx.helper.make_node( + "Adagrad", + inputs=["R", "T", "X1", "X2", "G1", "G2", "H1", "H2"], + outputs=["X1_new", "X2_new", "H1_new", "H2_new"], + norm_coefficient=norm_coefficient, + epsilon=epsilon, + decay_factor=decay_factor, + domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN, + ) + + # Define operator inputs. + r = np.array(0.1, dtype=np.float32) # scalar + t = np.array(0, dtype=np.int64) # scalar + + x1 = np.array([1.0], dtype=np.float32) + g1 = np.array([-1.0], dtype=np.float32) + h1 = np.array([2.0], dtype=np.float32) + + x2 = np.array([1.0, 2.0], dtype=np.float32) + g2 = np.array([-1.0, -3.0], dtype=np.float32) + h2 = np.array([4.0, 1.0], dtype=np.float32) + + # Compute expected outputs of Adagrad. + x1_new, h1_new = apply_adagrad( + r, t, x1, g1, h1, norm_coefficient, epsilon, decay_factor + ) + x2_new, h2_new = apply_adagrad( + r, t, x2, g2, h2, norm_coefficient, epsilon, decay_factor + ) + + # Check results. + expect( + node, + inputs=[r, t, x1, x2, g1, g2, h1, h2], + outputs=[x1_new, x2_new, h1_new, h2_new], + name="test_adagrad_multiple", + opset_imports=[ + onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1) + ], + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/adam.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/adam.py new file mode 100644 index 0000000000000000000000000000000000000000..cab3181219c57857e69cd71e9474a47e1eaede94 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/adam.py @@ -0,0 +1,131 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.defs import AI_ONNX_PREVIEW_TRAINING_DOMAIN + + +def apply_adam(r, t, x, g, v, h, norm_coefficient, norm_coefficient_post, alpha, beta, epsilon): # type: ignore + # Add gradient of regularization term. + g_regularized = norm_coefficient * x + g + # Update momentum. + v_new = alpha * v + (1 - alpha) * g_regularized + # Update second-order momentum. + h_new = beta * h + (1 - beta) * (g_regularized * g_regularized) + # Compute element-wise square root. + h_sqrt = np.sqrt(h_new) + epsilon + # Adjust learning rate. + r_adjusted = None + if t > 0: + # Consider bias correction on momentums. + r_adjusted = r * np.sqrt(1 - beta**t) / (1 - alpha**t) + else: + # No bias correction on momentums. + r_adjusted = r + # Apply Adam update rule. + x_new = x - r_adjusted * (v_new / h_sqrt) + # It's possible to apply regularization in the end. + x_final = (1 - norm_coefficient_post) * x_new + return x_final, v_new, h_new + + +class Adam(Base): + @staticmethod + def export_adam() -> None: + # Define operator attributes. + norm_coefficient = 0.001 + alpha = 0.95 + beta = 0.1 + epsilon = 1e-7 + + # Create operator. + node = onnx.helper.make_node( + "Adam", + inputs=["R", "T", "X", "G", "V", "H"], + outputs=["X_new", "V_new", "H_new"], + norm_coefficient=norm_coefficient, + alpha=alpha, + beta=beta, + epsilon=epsilon, + domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN, + ) + + # Define operator inputs. + r = np.array(0.1, dtype=np.float32) # scalar + t = np.array(0, dtype=np.int64) # scalar + x = np.array([1.2, 2.8], dtype=np.float32) + g = np.array([-0.94, -2.5], dtype=np.float32) + v = np.array([1.7, 3.6], dtype=np.float32) + h = np.array([0.1, 0.1], dtype=np.float32) + + # Compute expected outputs of Adam. + x_new, v_new, h_new = apply_adam( + r, t, x, g, v, h, norm_coefficient, 0.0, alpha, beta, epsilon + ) + + # Check results. + expect( + node, + inputs=[r, t, x, g, v, h], + outputs=[x_new, v_new, h_new], + name="test_adam", + opset_imports=[ + onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1) + ], + ) + + @staticmethod + def export_adam_multiple() -> None: + # Define operator attributes. + norm_coefficient = 0.001 + alpha = 0.95 + beta = 0.85 + epsilon = 1e-2 + + node = onnx.helper.make_node( + "Adam", + inputs=["R", "T", "X1", "X2", "G1", "G2", "V1", "V2", "H1", "H2"], + outputs=["X1_new", "X2_new", "V1_new", "V2_new", "H1_new", "H2_new"], + norm_coefficient=norm_coefficient, + alpha=alpha, + beta=beta, + domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN, + ) + + # Define operator inputs. + r = np.array(0.1, dtype=np.float32) # scalar + t = np.array(0, dtype=np.int64) # scalar + + x1 = np.array([1.0], dtype=np.float32) + g1 = np.array([-1.0], dtype=np.float32) + v1 = np.array([2.0], dtype=np.float32) + h1 = np.array([0.5], dtype=np.float32) + + x2 = np.array([1.0, 2.0], dtype=np.float32) + g2 = np.array([-1.0, -3.0], dtype=np.float32) + v2 = np.array([4.0, 1.0], dtype=np.float32) + h2 = np.array([1.0, 10.0], dtype=np.float32) + + # Compute expected outputs of Adam. + x1_new, v1_new, h1_new = apply_adam( + r, t, x1, g1, v1, h1, norm_coefficient, 0.0, alpha, beta, epsilon + ) + x2_new, v2_new, h2_new = apply_adam( + r, t, x2, g2, v2, h2, norm_coefficient, 0.0, alpha, beta, epsilon + ) + + # Check results. + expect( + node, + inputs=[r, t, x1, x2, g1, g2, v1, v2, h1, h2], + outputs=[x1_new, x2_new, v1_new, v2_new, h1_new, h2_new], + name="test_adam_multiple", + opset_imports=[ + onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1) + ], + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/add.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/add.py new file mode 100644 index 0000000000000000000000000000000000000000..fce9f17b17d1f6743157678c037d1e3d191c5f86 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/add.py @@ -0,0 +1,47 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Add(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Add", + inputs=["x", "y"], + outputs=["sum"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(3, 4, 5).astype(np.float32) + expect(node, inputs=[x, y], outputs=[x + y], name="test_add") + + @staticmethod + def export_add_uint8() -> None: + node = onnx.helper.make_node( + "Add", + inputs=["x", "y"], + outputs=["sum"], + ) + + x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8) + y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8) + expect(node, inputs=[x, y], outputs=[x + y], name="test_add_uint8") + + @staticmethod + def export_add_broadcast() -> None: + node = onnx.helper.make_node( + "Add", + inputs=["x", "y"], + outputs=["sum"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(5).astype(np.float32) + expect(node, inputs=[x, y], outputs=[x + y], name="test_add_bcast") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/affinegrid.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/affinegrid.py new file mode 100644 index 0000000000000000000000000000000000000000..fb114d3e4e820a65bd3e84579a0e8a44dc436ccd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/affinegrid.py @@ -0,0 +1,209 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.reference.ops.op_affine_grid import ( + apply_affine_transform, + construct_original_grid, +) + + +def create_affine_matrix_3d( + angle1, + angle2, + offset_x, + offset_y, + offset_z, + shear_x, + shear_y, + shear_z, + scale_x, + scale_y, + scale_z, +): + rot_x = np.stack( + [ + np.ones_like(angle1), + np.zeros_like(angle1), + np.zeros_like(angle1), + np.zeros_like(angle1), + np.cos(angle1), + -np.sin(angle1), + np.zeros_like(angle1), + np.sin(angle1), + np.cos(angle1), + ], + axis=-1, + ).reshape(-1, 3, 3) + rot_y = np.stack( + [ + np.cos(angle2), + np.zeros_like(angle2), + np.sin(angle2), + np.zeros_like(angle2), + np.ones_like(angle2), + np.zeros_like(angle2), + -np.sin(angle2), + np.zeros_like(angle2), + np.cos(angle2), + ], + axis=-1, + ).reshape(-1, 3, 3) + shear = np.stack( + [ + np.ones_like(shear_x), + shear_x, + shear_y, + shear_z, + np.ones_like(shear_x), + shear_x, + shear_y, + shear_x, + np.ones_like(shear_x), + ], + axis=-1, + ).reshape(-1, 3, 3) + scale = np.stack( + [ + scale_x, + np.zeros_like(scale_x), + np.zeros_like(scale_x), + np.zeros_like(scale_x), + scale_y, + np.zeros_like(scale_x), + np.zeros_like(scale_x), + np.zeros_like(scale_x), + scale_z, + ], + axis=-1, + ).reshape(-1, 3, 3) + translation = np.transpose(np.array([offset_x, offset_y, offset_z])).reshape( + -1, 1, 3 + ) + rotation_matrix = rot_y @ rot_x @ shear @ scale # (N, 3, 3) + rotation_matrix = np.transpose(rotation_matrix, (0, 2, 1)) + affine_matrix = np.hstack((rotation_matrix, translation)) + affine_matrix = np.transpose(affine_matrix, (0, 2, 1)) + return affine_matrix.astype(np.float32) + + +def create_affine_matrix_2d( + angle1, offset_x, offset_y, shear_x, shear_y, scale_x, scale_y +): + rot = np.stack( + [np.cos(angle1), -np.sin(angle1), np.sin(angle1), np.cos(angle1)], axis=-1 + ).reshape(-1, 2, 2) + shear = np.stack( + [np.ones_like(shear_x), shear_x, shear_y, np.ones_like(shear_x)], axis=-1 + ).reshape(-1, 2, 2) + scale = np.stack( + [scale_x, np.zeros_like(scale_x), np.zeros_like(scale_x), scale_y], axis=-1 + ).reshape(-1, 2, 2) + translation = np.transpose(np.array([offset_x, offset_y])).reshape(-1, 1, 2) + rotation_matrix = rot @ shear @ scale # (N, 3, 3) + rotation_matrix = np.transpose(rotation_matrix, (0, 2, 1)) + affine_matrix = np.hstack((rotation_matrix, translation)) + affine_matrix = np.transpose(affine_matrix, (0, 2, 1)) + return affine_matrix.astype(np.float32) + + +def create_theta_2d(): + angle = np.array([np.pi / 4, np.pi / 3]) + offset_x = np.array([5.0, 2.5]) + offset_y = np.array([-3.3, 1.1]) + shear_x = np.array([-0.5, 0.5]) + shear_y = np.array([0.3, -0.3]) + scale_x = np.array([2.2, 1.1]) + scale_y = np.array([3.1, 0.9]) + theta_2d = create_affine_matrix_2d( + angle, offset_x, offset_y, shear_x, shear_y, scale_x, scale_y + ) + return theta_2d + + +def create_theta_3d(): + angle1 = np.array([np.pi / 4, np.pi / 3]) + angle2 = np.array([np.pi / 6, np.pi / 2]) + offset_x = np.array([5.0, 2.5]) + offset_y = np.array([-3.3, 1.1]) + offset_z = np.array([-1.1, 2.2]) + shear_x = np.array([-0.5, 0.5]) + shear_y = np.array([0.3, -0.3]) + shear_z = np.array([0.7, -0.2]) + scale_x = np.array([2.2, 1.1]) + scale_y = np.array([3.1, 0.9]) + scale_z = np.array([0.5, 1.5]) + + theta_3d = create_affine_matrix_3d( + angle1, + angle2, + offset_x, + offset_y, + offset_z, + shear_x, + shear_y, + shear_z, + scale_x, + scale_y, + scale_z, + ) + return theta_3d + + +class AffineGrid(Base): + @staticmethod + def export_2d_no_reference_evaluator() -> None: + theta_2d = create_theta_2d() + N, C, H, W = len(theta_2d), 3, 5, 6 + data_size = (H, W) + for align_corners in (0, 1): + node = onnx.helper.make_node( + "AffineGrid", + inputs=["theta", "size"], + outputs=["grid"], + align_corners=align_corners, + ) + + original_grid = construct_original_grid(data_size, align_corners) + grid = apply_affine_transform(theta_2d, original_grid) + + test_name = "test_affine_grid_2d" + if align_corners == 1: + test_name += "_align_corners" + expect( + node, + inputs=[theta_2d, np.array([N, C, H, W], dtype=np.int64)], + outputs=[grid], + name=test_name, + ) + + @staticmethod + def export_3d_no_reference_evaluator() -> None: + theta_3d = create_theta_3d() + N, C, D, H, W = len(theta_3d), 3, 4, 5, 6 + data_size = (D, H, W) + for align_corners in (0, 1): + node = onnx.helper.make_node( + "AffineGrid", + inputs=["theta", "size"], + outputs=["grid"], + align_corners=align_corners, + ) + + original_grid = construct_original_grid(data_size, align_corners) + grid = apply_affine_transform(theta_3d, original_grid) + + test_name = "test_affine_grid_3d" + if align_corners == 1: + test_name += "_align_corners" + expect( + node, + inputs=[theta_3d, np.array([N, C, D, H, W], dtype=np.int64)], + outputs=[grid], + name=test_name, + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__init__.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dc1c3daca8192f53daf339d4f04bb4ba10f8328 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/array_feature_extractor.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/array_feature_extractor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bda2cd173d6fdcebad116484b26b1dd1d26ee77 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/array_feature_extractor.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/binarizer.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/binarizer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77204d1f1327a261c1c0ee102531b39be023b850 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/binarizer.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/label_encoder.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/label_encoder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2c1fbd007e7b830df92ddb9af4661b9ba72a0c0 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/label_encoder.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/tree_ensemble.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/tree_ensemble.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9ed741eb55cb73c9c03ac784aac4c3e0e7d74be Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/tree_ensemble.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/array_feature_extractor.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/array_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..ce08e074581804ed65fd22a7033fa6d5cbc14218 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/array_feature_extractor.py @@ -0,0 +1,30 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class ArrayFeatureExtractor(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "ArrayFeatureExtractor", + inputs=["x", "y"], + outputs=["z"], + domain="ai.onnx.ml", + ) + + x = np.arange(12).reshape((3, 4)).astype(np.float32) + y = np.array([0, 1], dtype=np.int64) + z = np.array([[0, 4, 8], [1, 5, 9]], dtype=np.float32).T + expect( + node, + inputs=[x, y], + outputs=[z], + name="test_ai_onnx_ml_array_feature_extractor", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/binarizer.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/binarizer.py new file mode 100644 index 0000000000000000000000000000000000000000..2c8ae4f1f1b6357e6e3f3b618cb49d6ac2a9cb26 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/binarizer.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.reference.ops.aionnxml.op_binarizer import compute_binarizer + + +class Binarizer(Base): + @staticmethod + def export() -> None: + threshold = 1.0 + node = onnx.helper.make_node( + "Binarizer", + inputs=["X"], + outputs=["Y"], + threshold=threshold, + domain="ai.onnx.ml", + ) + x = np.random.randn(3, 4, 5).astype(np.float32) + y = compute_binarizer(x, threshold)[0] + + expect(node, inputs=[x], outputs=[y], name="test_ai_onnx_ml_binarizer") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/label_encoder.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/label_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..83eb15c298988dec6d64c3ff5d622523a1ffd330 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/label_encoder.py @@ -0,0 +1,100 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.helper import make_tensor + + +class LabelEncoder(Base): + @staticmethod + def export_string_int_label_encoder() -> None: + node = onnx.helper.make_node( + "LabelEncoder", + inputs=["X"], + outputs=["Y"], + domain="ai.onnx.ml", + keys_strings=["a", "b", "c"], + values_int64s=[0, 1, 2], + default_int64=42, + ) + x = np.array(["a", "b", "d", "c", "g"]).astype(object) + y = np.array([0, 1, 42, 2, 42]).astype(np.int64) + expect( + node, + inputs=[x], + outputs=[y], + name="test_ai_onnx_ml_label_encoder_string_int", + ) + + node = onnx.helper.make_node( + "LabelEncoder", + inputs=["X"], + outputs=["Y"], + domain="ai.onnx.ml", + keys_strings=["a", "b", "c"], + values_int64s=[0, 1, 2], + ) + x = np.array(["a", "b", "d", "c", "g"]).astype(object) + y = np.array([0, 1, -1, 2, -1]).astype(np.int64) + expect( + node, + inputs=[x], + outputs=[y], + name="test_ai_onnx_ml_label_encoder_string_int_no_default", + ) + + @staticmethod + def export_tensor_based_label_encoder() -> None: + tensor_keys = make_tensor( + "keys_tensor", onnx.TensorProto.STRING, (3,), ["a", "b", "c"] + ) + repeated_string_keys = ["a", "b", "c"] + x = np.array(["a", "b", "d", "c", "g"]).astype(object) + y = np.array([0, 1, 42, 2, 42]).astype(np.int16) + + node = onnx.helper.make_node( + "LabelEncoder", + inputs=["X"], + outputs=["Y"], + domain="ai.onnx.ml", + keys_tensor=tensor_keys, + values_tensor=make_tensor( + "values_tensor", onnx.TensorProto.INT16, (3,), [0, 1, 2] + ), + default_tensor=make_tensor( + "default_tensor", onnx.TensorProto.INT16, (1,), [42] + ), + ) + + expect( + node, + inputs=[x], + outputs=[y], + name="test_ai_onnx_ml_label_encoder_tensor_mapping", + ) + + node = onnx.helper.make_node( + "LabelEncoder", + inputs=["X"], + outputs=["Y"], + domain="ai.onnx.ml", + keys_strings=repeated_string_keys, + values_tensor=make_tensor( + "values_tensor", onnx.TensorProto.INT16, (3,), [0, 1, 2] + ), + default_tensor=make_tensor( + "default_tensor", onnx.TensorProto.INT16, (1,), [42] + ), + ) + + expect( + node, + inputs=[x], + outputs=[y], + name="test_ai_onnx_ml_label_encoder_tensor_value_only_mapping", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py new file mode 100644 index 0000000000000000000000000000000000000000..ff91b2bb4fdad9aa2fb7eb34f841cdc98f8278b4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py @@ -0,0 +1,122 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.helper import make_tensor + + +class TreeEnsemble(Base): + @staticmethod + def export_tree_ensemble_single_tree() -> None: + node = onnx.helper.make_node( + "TreeEnsemble", + ["X"], + ["Y"], + domain="ai.onnx.ml", + n_targets=2, + membership_values=None, + nodes_missing_value_tracks_true=None, + nodes_hitrates=None, + aggregate_function=1, + post_transform=0, + tree_roots=[0], + nodes_modes=make_tensor( + "nodes_modes", + onnx.TensorProto.UINT8, + (3,), + np.array([0, 0, 0], dtype=np.uint8), + ), + nodes_featureids=[0, 0, 0], + nodes_splits=make_tensor( + "nodes_splits", + onnx.TensorProto.DOUBLE, + (3,), + np.array([3.14, 1.2, 4.2], dtype=np.float64), + ), + nodes_truenodeids=[1, 0, 1], + nodes_trueleafs=[0, 1, 1], + nodes_falsenodeids=[2, 2, 3], + nodes_falseleafs=[0, 1, 1], + leaf_targetids=[0, 1, 0, 1], + leaf_weights=make_tensor( + "leaf_weights", + onnx.TensorProto.DOUBLE, + (4,), + np.array([5.23, 12.12, -12.23, 7.21], dtype=np.float64), + ), + ) + + x = np.array([1.2, 3.4, -0.12, 1.66, 4.14, 1.77], np.float64).reshape(3, 2) + y = np.array([[5.23, 0], [5.23, 0], [0, 12.12]], dtype=np.float64) + expect( + node, + inputs=[x], + outputs=[y], + name="test_ai_onnx_ml_tree_ensemble_single_tree", + ) + + @staticmethod + def export_tree_ensemble_set_membership() -> None: + node = onnx.helper.make_node( + "TreeEnsemble", + ["X"], + ["Y"], + domain="ai.onnx.ml", + n_targets=4, + aggregate_function=1, + membership_values=make_tensor( + "membership_values", + onnx.TensorProto.FLOAT, + (8,), + [1.2, 3.7, 8, 9, np.nan, 12, 7, np.nan], + ), + nodes_missing_value_tracks_true=None, + nodes_hitrates=None, + post_transform=0, + tree_roots=[0], + nodes_modes=make_tensor( + "nodes_modes", + onnx.TensorProto.UINT8, + (3,), + np.array([0, 6, 6], dtype=np.uint8), + ), + nodes_featureids=[0, 0, 0], + nodes_splits=make_tensor( + "nodes_splits", + onnx.TensorProto.FLOAT, + (3,), + np.array([11, 232344.0, np.nan], dtype=np.float32), + ), + nodes_trueleafs=[0, 1, 1], + nodes_truenodeids=[1, 0, 1], + nodes_falseleafs=[1, 0, 1], + nodes_falsenodeids=[2, 2, 3], + leaf_targetids=[0, 1, 2, 3], + leaf_weights=make_tensor( + "leaf_weights", onnx.TensorProto.FLOAT, (4,), [1, 10, 1000, 100] + ), + ) + + x = np.array([1.2, 3.4, -0.12, np.nan, 12, 7], np.float32).reshape(-1, 1) + expected = np.array( + [ + [1, 0, 0, 0], + [0, 0, 0, 100], + [0, 0, 0, 100], + [0, 0, 1000, 0], + [0, 0, 1000, 0], + [0, 10, 0, 0], + ], + dtype=np.float32, + ) + expect( + node, + inputs=[x], + outputs=[expected], + name="test_ai_onnx_ml_tree_ensemble_set_membership", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/and.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/and.py new file mode 100644 index 0000000000000000000000000000000000000000..54527953457e9f2950dd551debf2a5a7edb4ffe4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/and.py @@ -0,0 +1,75 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class And(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "And", + inputs=["x", "y"], + outputs=["and"], + ) + + # 2d + x = (np.random.randn(3, 4) > 0).astype(bool) + y = (np.random.randn(3, 4) > 0).astype(bool) + z = np.logical_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_and2d") + + # 3d + x = (np.random.randn(3, 4, 5) > 0).astype(bool) + y = (np.random.randn(3, 4, 5) > 0).astype(bool) + z = np.logical_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_and3d") + + # 4d + x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool) + y = (np.random.randn(3, 4, 5, 6) > 0).astype(bool) + z = np.logical_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_and4d") + + @staticmethod + def export_and_broadcast() -> None: + node = onnx.helper.make_node( + "And", + inputs=["x", "y"], + outputs=["and"], + ) + + # 3d vs 1d + x = (np.random.randn(3, 4, 5) > 0).astype(bool) + y = (np.random.randn(5) > 0).astype(bool) + z = np.logical_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast3v1d") + + # 3d vs 2d + x = (np.random.randn(3, 4, 5) > 0).astype(bool) + y = (np.random.randn(4, 5) > 0).astype(bool) + z = np.logical_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast3v2d") + + # 4d vs 2d + x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool) + y = (np.random.randn(5, 6) > 0).astype(bool) + z = np.logical_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast4v2d") + + # 4d vs 3d + x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool) + y = (np.random.randn(4, 5, 6) > 0).astype(bool) + z = np.logical_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast4v3d") + + # 4d vs 4d + x = (np.random.randn(1, 4, 1, 6) > 0).astype(bool) + y = (np.random.randn(3, 1, 5, 6) > 0).astype(bool) + z = np.logical_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast4v4d") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/argmax.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/argmax.py new file mode 100644 index 0000000000000000000000000000000000000000..dd9c69da2548e56240e19df35087b5f4f0e6087f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/argmax.py @@ -0,0 +1,255 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def argmax_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1) -> np.ndarray: + result = np.argmax(data, axis=axis) + if keepdims == 1: + result = np.expand_dims(result, axis) + return result.astype(np.int64) + + +def argmax_use_numpy_select_last_index( + data: np.ndarray, axis: int = 0, keepdims: int = True +) -> np.ndarray: + data = np.flip(data, axis) + result = np.argmax(data, axis=axis) + result = data.shape[axis] - result - 1 + if keepdims: + result = np.expand_dims(result, axis) + return result.astype(np.int64) + + +class ArgMax(Base): + @staticmethod + def export_no_keepdims() -> None: + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 0 + node = onnx.helper.make_node( + "ArgMax", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims + ) + # result: [0, 1] + result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_no_keepdims_example", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 4] + result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, inputs=[data], outputs=[result], name="test_argmax_no_keepdims_random" + ) + + @staticmethod + def export_keepdims() -> None: + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 1 + node = onnx.helper.make_node( + "ArgMax", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims + ) + # result: [[0], [1]] + result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, inputs=[data], outputs=[result], name="test_argmax_keepdims_example" + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 1, 4] + result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, inputs=[data], outputs=[result], name="test_argmax_keepdims_random" + ) + + @staticmethod + def export_default_axes_keepdims() -> None: + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + keepdims = 1 + node = onnx.helper.make_node( + "ArgMax", inputs=["data"], outputs=["result"], keepdims=keepdims + ) + + # result: [[1, 1]] + result = argmax_use_numpy(data, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_default_axis_example", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [1, 3, 4] + result = argmax_use_numpy(data, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_default_axis_random", + ) + + @staticmethod + def export_negative_axis_keepdims() -> None: + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + axis = -1 + keepdims = 1 + node = onnx.helper.make_node( + "ArgMax", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims + ) + # result: [[0], [1]] + result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_negative_axis_keepdims_example", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 3, 1] + result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_negative_axis_keepdims_random", + ) + + @staticmethod + def export_no_keepdims_select_last_index() -> None: + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 0 + node = onnx.helper.make_node( + "ArgMax", + inputs=["data"], + outputs=["result"], + axis=axis, + keepdims=keepdims, + select_last_index=True, + ) + # result: [1, 1] + result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_no_keepdims_example_select_last_index", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 4] + result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_no_keepdims_random_select_last_index", + ) + + @staticmethod + def export_keepdims_select_last_index() -> None: + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 1 + node = onnx.helper.make_node( + "ArgMax", + inputs=["data"], + outputs=["result"], + axis=axis, + keepdims=keepdims, + select_last_index=True, + ) + # result: [[1], [1]] + result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_keepdims_example_select_last_index", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 1, 4] + result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_keepdims_random_select_last_index", + ) + + @staticmethod + def export_default_axes_keepdims_select_last_index() -> None: + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + keepdims = 1 + node = onnx.helper.make_node( + "ArgMax", + inputs=["data"], + outputs=["result"], + keepdims=keepdims, + select_last_index=True, + ) + + # result: [[1, 1]] + result = argmax_use_numpy_select_last_index(data, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_default_axis_example_select_last_index", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [1, 3, 4] + result = argmax_use_numpy_select_last_index(data, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_default_axis_random_select_last_index", + ) + + @staticmethod + def export_negative_axis_keepdims_select_last_index() -> None: + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + axis = -1 + keepdims = 1 + node = onnx.helper.make_node( + "ArgMax", + inputs=["data"], + outputs=["result"], + axis=axis, + keepdims=keepdims, + select_last_index=True, + ) + # result: [[1], [1]] + result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_negative_axis_keepdims_example_select_last_index", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 3, 1] + result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmax_negative_axis_keepdims_random_select_last_index", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/argmin.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/argmin.py new file mode 100644 index 0000000000000000000000000000000000000000..385f19d36ba9aa7f98a1c5a0aa24fb55c24d1d12 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/argmin.py @@ -0,0 +1,255 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def argmin_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1) -> np.ndarray: + result = np.argmin(data, axis=axis) + if keepdims == 1: + result = np.expand_dims(result, axis) + return result.astype(np.int64) + + +def argmin_use_numpy_select_last_index( + data: np.ndarray, axis: int = 0, keepdims: int = True +) -> np.ndarray: + data = np.flip(data, axis) + result = np.argmin(data, axis=axis) + result = data.shape[axis] - result - 1 + if keepdims: + result = np.expand_dims(result, axis) + return result.astype(np.int64) + + +class ArgMin(Base): + @staticmethod + def export_no_keepdims() -> None: + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 0 + node = onnx.helper.make_node( + "ArgMin", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims + ) + # The content of result is : [[1, 0]] + result = argmin_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_no_keepdims_example", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 4] + result = argmin_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, inputs=[data], outputs=[result], name="test_argmin_no_keepdims_random" + ) + + @staticmethod + def export_keepdims() -> None: + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 1 + node = onnx.helper.make_node( + "ArgMin", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims + ) + # The content of result is : [[1], [0]] + result = argmin_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, inputs=[data], outputs=[result], name="test_argmin_keepdims_example" + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 1, 4] + result = argmin_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, inputs=[data], outputs=[result], name="test_argmin_keepdims_random" + ) + + @staticmethod + def export_default_axes_keepdims() -> None: + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + keepdims = 1 + node = onnx.helper.make_node( + "ArgMin", inputs=["data"], outputs=["result"], keepdims=keepdims + ) + + # The content of result is : [[0], [0]] + result = argmin_use_numpy(data, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_default_axis_example", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [1, 3, 4] + result = argmin_use_numpy(data, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_default_axis_random", + ) + + @staticmethod + def export_negative_axis_keepdims() -> None: + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + axis = -1 + keepdims = 1 + node = onnx.helper.make_node( + "ArgMin", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims + ) + # The content of result is : [[1], [0]] + result = argmin_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_negative_axis_keepdims_example", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 3, 1] + result = argmin_use_numpy(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_negative_axis_keepdims_random", + ) + + @staticmethod + def export_no_keepdims_select_last_index() -> None: + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 0 + node = onnx.helper.make_node( + "ArgMin", + inputs=["data"], + outputs=["result"], + axis=axis, + keepdims=keepdims, + select_last_index=True, + ) + # result: [[1, 0]] + result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_no_keepdims_example_select_last_index", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 4] + result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_no_keepdims_random_select_last_index", + ) + + @staticmethod + def export_keepdims_select_last_index() -> None: + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 1 + node = onnx.helper.make_node( + "ArgMin", + inputs=["data"], + outputs=["result"], + axis=axis, + keepdims=keepdims, + select_last_index=True, + ) + # result: [[1], [0]] + result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_keepdims_example_select_last_index", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 1, 4] + result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_keepdims_random_select_last_index", + ) + + @staticmethod + def export_default_axes_keepdims_select_last_index() -> None: + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + keepdims = 1 + node = onnx.helper.make_node( + "ArgMin", + inputs=["data"], + outputs=["result"], + keepdims=keepdims, + select_last_index=True, + ) + + # result: [[0, 0]] + result = argmin_use_numpy_select_last_index(data, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_default_axis_example_select_last_index", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [1, 3, 4] + result = argmin_use_numpy_select_last_index(data, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_default_axis_random_select_last_index", + ) + + @staticmethod + def export_negative_axis_keepdims_select_last_index() -> None: + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + axis = -1 + keepdims = 1 + node = onnx.helper.make_node( + "ArgMin", + inputs=["data"], + outputs=["result"], + axis=axis, + keepdims=keepdims, + select_last_index=True, + ) + # result: [[1], [0]] + result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_negative_axis_keepdims_example_select_last_index", + ) + + data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32) + # result's shape: [2, 3, 1] + result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + expect( + node, + inputs=[data], + outputs=[result], + name="test_argmin_negative_axis_keepdims_random_select_last_index", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/asin.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/asin.py new file mode 100644 index 0000000000000000000000000000000000000000..e648b483efa56b8f8ae5b21052c64ed95256e6c9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/asin.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Asin(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Asin", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-0.5, 0, 0.5]).astype(np.float32) + y = np.arcsin(x) + expect(node, inputs=[x], outputs=[y], name="test_asin_example") + + x = np.random.rand(3, 4, 5).astype(np.float32) + y = np.arcsin(x) + expect(node, inputs=[x], outputs=[y], name="test_asin") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/asinh.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/asinh.py new file mode 100644 index 0000000000000000000000000000000000000000..8ef9f9020fda74c22789293f909f87c545908779 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/asinh.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Asinh(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Asinh", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + y = np.arcsinh(x) # expected output [-0.88137358, 0., 0.88137358] + expect(node, inputs=[x], outputs=[y], name="test_asinh_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.arcsinh(x) + expect(node, inputs=[x], outputs=[y], name="test_asinh") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/atan.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/atan.py new file mode 100644 index 0000000000000000000000000000000000000000..c66822369d2c9b8fd3519d6dd2c64998f28676e7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/atan.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Atan(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Atan", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + y = np.arctan(x) + expect(node, inputs=[x], outputs=[y], name="test_atan_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.arctan(x) + expect(node, inputs=[x], outputs=[y], name="test_atan") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/atanh.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/atanh.py new file mode 100644 index 0000000000000000000000000000000000000000..3e2ca827b11b9a6ad54a4b1ce698bb8814ef5a0c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/atanh.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Atanh(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Atanh", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-0.5, 0, 0.5]).astype(np.float32) + y = np.arctanh(x) # expected output [-0.54930615, 0., 0.54930615] + expect(node, inputs=[x], outputs=[y], name="test_atanh_example") + + x = np.random.uniform(0.0, 1.0, (3, 4, 5)).astype(np.float32) + y = np.arctanh(x) + expect(node, inputs=[x], outputs=[y], name="test_atanh") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/averagepool.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/averagepool.py new file mode 100644 index 0000000000000000000000000000000000000000..256caef950b9a14058ec22a027e2407bcd353ae7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/averagepool.py @@ -0,0 +1,613 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.reference.ops.op_pool_common import ( + get_output_shape_auto_pad, + get_output_shape_explicit_padding, + get_pad_shape, + pool, +) + + +class AveragePool(Base): + @staticmethod + def export_averagepool_2d_precomputed_pads() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 5, 5] + pad_shape: [4, 4] -> [2, 2, 2, 2] by axis + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[5, 5], + pads=[2, 2, 2, 2], + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array( + [ + [ + [ + [7, 7.5, 8, 8.5, 9], + [9.5, 10, 10.5, 11, 11.5], + [12, 12.5, 13, 13.5, 14], + [14.5, 15, 15.5, 16, 16.5], + [17, 17.5, 18, 18.5, 19], + ] + ] + ] + ).astype(np.float32) + + expect( + node, inputs=[x], outputs=[y], name="test_averagepool_2d_precomputed_pads" + ) + + @staticmethod + def export_averagepool_2d_precomputed_pads_count_include_pad() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 5, 5] + pad_shape: [4, 4] -> [2, 2, 2, 2] by axis + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[5, 5], + pads=[2, 2, 2, 2], + count_include_pad=1, + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array( + [ + [ + [ + [2.5200, 3.6000, 4.8000, 4.0800, 3.2400], + [4.5600, 6.4000, 8.4000, 7.0400, 5.5200], + [7.2000, 10.0000, 13.0000, 10.8000, 8.4000], + [6.9600, 9.6000, 12.4000, 10.2400, 7.9200], + [6.1200, 8.4000, 10.8000, 8.8800, 6.8400], + ] + ] + ] + ).astype(np.float32) + + expect( + node, + inputs=[x], + outputs=[y], + name="test_averagepool_2d_precomputed_pads_count_include_pad", + ) + + @staticmethod + def export_averagepool_2d_precomputed_strides() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 2, 2] + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + strides=[2, 2], + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[4, 6], [14, 16]]]]).astype(np.float32) + + expect( + node, + inputs=[x], + outputs=[y], + name="test_averagepool_2d_precomputed_strides", + ) + + @staticmethod + def export_averagepool_2d_precomputed_same_upper() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 3, 3] + pad_shape: [2, 2] -> [1, 1, 1, 1] by axis + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[3, 3], + strides=[2, 2], + auto_pad="SAME_UPPER", + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[4, 5.5, 7], [11.5, 13, 14.5], [19, 20.5, 22]]]]).astype( + np.float32 + ) + + expect( + node, + inputs=[x], + outputs=[y], + name="test_averagepool_2d_precomputed_same_upper", + ) + + @staticmethod + def export_averagepool_1d_default() -> None: + """input_shape: [1, 3, 32] + output_shape: [1, 3, 31] + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2], + ) + x = np.random.randn(1, 3, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = [2] + strides = [1] + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG") + + expect(node, inputs=[x], outputs=[y], name="test_averagepool_1d_default") + + @staticmethod + def export_averagepool_2d_default() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 31, 31] + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = (2, 2) + strides = (1, 1) + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG") + + expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_default") + + @staticmethod + def export_averagepool_3d_default() -> None: + """input_shape: [1, 3, 32, 32, 32] + output_shape: [1, 3, 31, 31, 31] + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2, 2], + ) + x = np.random.randn(1, 3, 32, 32, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = [2, 2, 2] + strides = [1, 1, 1] + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG") + + expect(node, inputs=[x], outputs=[y], name="test_averagepool_3d_default") + + @staticmethod + def export_averagepool_2d_same_upper() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 32, 32] + pad_shape: [1, 1] -> [0, 1, 0, 1] by axis + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + auto_pad="SAME_UPPER", + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (2, 2) + strides = (1, 1) + out_shape = get_output_shape_auto_pad( + "SAME_UPPER", x_shape[2:], kernel_shape, strides + ) + pad_shape = get_pad_shape( + "SAME_UPPER", x_shape[2:], kernel_shape, strides, out_shape + ) + pad_top = pad_shape[0] // 2 + pad_bottom = pad_shape[0] - pad_top + pad_left = pad_shape[1] // 2 + pad_right = pad_shape[1] - pad_left + padded = np.pad( + x, + ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), + mode="constant", + constant_values=np.nan, + ) + pads = (pad_top, pad_left, pad_bottom, pad_right) + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG", pads) + + expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_same_upper") + + @staticmethod + def export_averagepool_2d_same_lower() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 32, 32] + pad_shape: [1, 1] -> [1, 0, 1, 0] by axis + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + auto_pad="SAME_LOWER", + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (2, 2) + strides = (1, 1) + out_shape = get_output_shape_auto_pad( + "SAME_LOWER", x_shape[2:], kernel_shape, strides + ) + pad_shape = get_pad_shape( + "SAME_LOWER", x_shape[2:], kernel_shape, strides, out_shape + ) + pad_bottom = pad_shape[0] // 2 + pad_top = pad_shape[0] - pad_bottom + pad_right = pad_shape[1] // 2 + pad_left = pad_shape[1] - pad_right + padded = np.pad( + x, + ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), + mode="constant", + constant_values=np.nan, + ) + pads = (pad_top, pad_left, pad_bottom, pad_right) + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG", pads) + + expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_same_lower") + + @staticmethod + def export_averagepool_2d_pads() -> None: + """input_shape: [1, 3, 28, 28] + output_shape: [1, 3, 30, 30] + pad_shape: [4, 4] -> [2, 2, 2, 2] by axis + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[3, 3], + pads=[2, 2, 2, 2], + ) + x = np.random.randn(1, 3, 28, 28).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (3, 3) + strides = (1, 1) + pad_bottom = 2 + pad_top = 2 + pad_right = 2 + pad_left = 2 + pads = [pad_top, pad_left, pad_bottom, pad_right] + out_shape, pads = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides, ceil_mode=False + ) + padded = np.pad( + x, + ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])), + mode="constant", + constant_values=np.nan, + ) + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG", pads) + + expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_pads") + + @staticmethod + def export_averagepool_2d_pads_count_include_pad() -> None: + """input_shape: [1, 3, 28, 28] + output_shape: [1, 3, 30, 30] + pad_shape: [4, 4] -> [2, 2, 2, 2] by axis + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[3, 3], + pads=[2, 2, 2, 2], + count_include_pad=1, + ) + x = np.random.randn(1, 3, 28, 28).astype(np.float32) + x_shape = np.shape(x) + dilations = (1, 1) + kernel_shape = (3, 3) + strides = (1, 1) + pad_bottom = 2 + pad_top = 2 + pad_right = 2 + pad_left = 2 + pads = [pad_top, pad_left, pad_bottom, pad_right] + out_shape, pads = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides, dilations, ceil_mode=False + ) + padded = np.pad( + x, + ((0, 0), (0, 0), (pads[0], pads[2]), (pads[1], pads[3])), + mode="constant", + constant_values=0, + ) + y = pool( + padded, + x_shape, + kernel_shape, + strides, + out_shape, + "AVG", + pads, + count_include_pad=1, + ) + + expect( + node, + inputs=[x], + outputs=[y], + name="test_averagepool_2d_pads_count_include_pad", + ) + + @staticmethod + def export_averagepool_2d_strides() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 10, 10] + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[5, 5], + strides=[3, 3], + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (5, 5) + strides = (3, 3) + out_shape, pads = get_output_shape_explicit_padding( + None, x_shape[2:], kernel_shape, strides, ceil_mode=False + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG", pads) + + expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_strides") + + @staticmethod + def export_averagepool_2d_ceil() -> None: + """input_shape: [1, 1, 4, 4] + output_shape: [1, 1, 2, 2] + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[3, 3], + strides=[2, 2], + ceil_mode=True, + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[6, 7.5], [12, 13.5]]]]).astype(np.float32) + + expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_ceil") + + @staticmethod + def export_averagepool_2d_dilations() -> None: + """input_shape: [1, 1, 4, 4] + output_shape: [1, 1, 2, 2] + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + strides=[1, 1], + dilations=[2, 2], + ceil_mode=True, + ) + + # input shape: [1, 1, 4, 4] + x = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype(np.float32) + + y = np.array([[[[6, 7], [10, 11]]]]).astype(np.float32) + + expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_dilations") + + @staticmethod + def export_averagepool_3d_dilations() -> None: + """input_shape: [1, 1, 4, 4] + output_shape: [1, 1, 2, 2] + """ + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2, 2], + strides=[1, 1, 1], + dilations=[2, 2, 2], + ceil_mode=True, + ) + + # input shape: [1, 1, 4, 4, 4] + x = np.array( + [ + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + ] + ] + ] + ).astype(np.float32) + + y = np.array([[[[[6, 7], [10, 11]], [[6, 7], [10, 11]]]]]).astype(np.float32) + + expect( + node, inputs=[x], outputs=[y], name="test_averagepool_3d_dilations_small" + ) + + @staticmethod + def export_averagepool_3d_dilations_large() -> None: + x_shape = (32, 32, 32) + dilations = (2, 2, 2) + kernel_shape = (5, 5, 5) + strides = (3, 3, 3) + count_include_pad = 0 + + for count_include_pad in (0, 1): + for ceil_mode in (True, False): + node = onnx.helper.make_node( + "AveragePool", + inputs=["x"], + outputs=["y"], + kernel_shape=kernel_shape, + strides=strides, + dilations=dilations, + count_include_pad=count_include_pad, + ceil_mode=ceil_mode, + ) + + x = np.random.randn(1, 1, *x_shape).astype(np.float32) + out_shape, pads = get_output_shape_explicit_padding( + None, + x_shape, + kernel_shape, + strides, + dilations=dilations, + ceil_mode=ceil_mode, + ) + padded = np.pad( + x, + ( + (0, 0), + (0, 0), + (pads[0], pads[3]), + (pads[1], pads[4]), + (pads[2], pads[5]), + ), + mode="constant", + constant_values=0 if count_include_pad == 1 else np.nan, + ) + y = pool( + padded, + (1, 1, *x_shape), + kernel_shape, + strides, + out_shape, + "AVG", + pads=pads, + dilations=dilations, + count_include_pad=count_include_pad, + ) + + test_name = f"test_averagepool_3d_dilations_large_count_include_pad_is_{count_include_pad}_ceil_mode_is_{ceil_mode}" + expect(node, inputs=[x], outputs=[y], name=test_name) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/batchnorm.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..439ece136fd526b3b2484cacc9024c53211da013 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/batchnorm.py @@ -0,0 +1,136 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): # type: ignore + dims_x = len(x.shape) + dim_ones = (1,) * (dims_x - 2) + s = s.reshape(-1, *dim_ones) + bias = bias.reshape(-1, *dim_ones) + mean = mean.reshape(-1, *dim_ones) + var = var.reshape(-1, *dim_ones) + return s * (x - mean) / np.sqrt(var + epsilon) + bias + + +def _batchnorm_training_mode(x, s, bias, mean, var, momentum=0.9, epsilon=1e-5): # type: ignore + axis = tuple(np.delete(np.arange(len(x.shape)), 1)) + saved_mean = x.mean(axis=axis) + saved_var = x.var(axis=axis) + output_mean = mean * momentum + saved_mean * (1 - momentum) + output_var = var * momentum + saved_var * (1 - momentum) + y = _batchnorm_test_mode(x, s, bias, saved_mean, saved_var, epsilon=epsilon) + return y.astype(np.float32), output_mean, output_var + + +class BatchNormalization(Base): + @staticmethod + def export() -> None: + # input size: (2, 3, 4, 5) + x = np.random.randn(2, 3, 4, 5).astype(np.float32) + s = np.random.randn(3).astype(np.float32) + bias = np.random.randn(3).astype(np.float32) + mean = np.random.randn(3).astype(np.float32) + var = np.random.rand(3).astype(np.float32) + y = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32) + + node = onnx.helper.make_node( + "BatchNormalization", + inputs=["x", "s", "bias", "mean", "var"], + outputs=["y"], + ) + + # output size: (2, 3, 4, 5) + expect( + node, + inputs=[x, s, bias, mean, var], + outputs=[y], + name="test_batchnorm_example", + ) + + # input size: (2, 3, 4, 5) + x = np.random.randn(2, 3, 4, 5).astype(np.float32) + s = np.random.randn(3).astype(np.float32) + bias = np.random.randn(3).astype(np.float32) + mean = np.random.randn(3).astype(np.float32) + var = np.random.rand(3).astype(np.float32) + epsilon = 1e-2 + y = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32) + + node = onnx.helper.make_node( + "BatchNormalization", + inputs=["x", "s", "bias", "mean", "var"], + outputs=["y"], + epsilon=epsilon, + ) + + # output size: (2, 3, 4, 5) + expect( + node, + inputs=[x, s, bias, mean, var], + outputs=[y], + name="test_batchnorm_epsilon", + ) + + @staticmethod + def export_train() -> None: + # input size: (2, 3, 4, 5) + x = np.random.randn(2, 3, 4, 5).astype(np.float32) + s = np.random.randn(3).astype(np.float32) + bias = np.random.randn(3).astype(np.float32) + mean = np.random.randn(3).astype(np.float32) + var = np.random.rand(3).astype(np.float32) + # using np.bool(1) while generating test data with "'bool' object has no attribute 'dtype'" + # working around by using np.byte(1).astype(bool) + training_mode = 1 + y, output_mean, output_var = _batchnorm_training_mode(x, s, bias, mean, var) + + node = onnx.helper.make_node( + "BatchNormalization", + inputs=["x", "s", "bias", "mean", "var"], + outputs=["y", "output_mean", "output_var"], + training_mode=training_mode, + ) + + # output size: (2, 3, 4, 5) + expect( + node, + inputs=[x, s, bias, mean, var], + outputs=[y, output_mean, output_var], + name="test_batchnorm_example_training_mode", + ) + + # input size: (2, 3, 4, 5) + x = np.random.randn(2, 3, 4, 5).astype(np.float32) + s = np.random.randn(3).astype(np.float32) + bias = np.random.randn(3).astype(np.float32) + mean = np.random.randn(3).astype(np.float32) + var = np.random.rand(3).astype(np.float32) + training_mode = 1 + momentum = 0.9 + epsilon = 1e-2 + y, output_mean, output_var = _batchnorm_training_mode( + x, s, bias, mean, var, momentum, epsilon + ) + + node = onnx.helper.make_node( + "BatchNormalization", + inputs=["x", "s", "bias", "mean", "var"], + outputs=["y", "output_mean", "output_var"], + epsilon=epsilon, + training_mode=training_mode, + ) + + # output size: (2, 3, 4, 5) + expect( + node, + inputs=[x, s, bias, mean, var], + outputs=[y, output_mean, output_var], + name="test_batchnorm_epsilon_training_mode", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/bernoulli.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bernoulli.py new file mode 100644 index 0000000000000000000000000000000000000000..e54094c2d76689ed70ab4c749148fe5d9faed80d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bernoulli.py @@ -0,0 +1,58 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def bernoulli_reference_implementation(x, dtype): # type: ignore + # binomial n = 1 equal bernoulli + # This example and test-case is for informational purpose. The generator operator is + # non-deterministic and may not produce the same values in different implementations + # even if a seed is specified. + return np.random.binomial(1, p=x).astype(dtype) + + +class Bernoulli(Base): + @staticmethod + def export_bernoulli_without_dtype() -> None: + node = onnx.helper.make_node( + "Bernoulli", + inputs=["x"], + outputs=["y"], + ) + + x = np.random.uniform(0.0, 1.0, 10).astype(float) + y = bernoulli_reference_implementation(x, float) + expect(node, inputs=[x], outputs=[y], name="test_bernoulli") + + @staticmethod + def export_bernoulli_with_dtype() -> None: + node = onnx.helper.make_node( + "Bernoulli", + inputs=["x"], + outputs=["y"], + dtype=onnx.TensorProto.DOUBLE, + ) + + x = np.random.uniform(0.0, 1.0, 10).astype(np.float32) + y = bernoulli_reference_implementation(x, float) + expect(node, inputs=[x], outputs=[y], name="test_bernoulli_double") + + @staticmethod + def export_bernoulli_with_seed() -> None: + seed = float(0) + node = onnx.helper.make_node( + "Bernoulli", + inputs=["x"], + outputs=["y"], + seed=seed, + ) + + x = np.random.uniform(0.0, 1.0, 10).astype(np.float32) + y = bernoulli_reference_implementation(x, np.float32) + expect(node, inputs=[x], outputs=[y], name="test_bernoulli_seed") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitshift.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitshift.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b4c976c510ab40150af322db2ba960bb79dbc9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitshift.py @@ -0,0 +1,99 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class BitShift(Base): + @staticmethod + def export_right_unit8() -> None: + node = onnx.helper.make_node( + "BitShift", inputs=["x", "y"], outputs=["z"], direction="RIGHT" + ) + + x = np.array([16, 4, 1]).astype(np.uint8) + y = np.array([1, 2, 3]).astype(np.uint8) + z = x >> y # expected output [8, 1, 0] + expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_right_uint8") + + @staticmethod + def export_right_unit16() -> None: + node = onnx.helper.make_node( + "BitShift", inputs=["x", "y"], outputs=["z"], direction="RIGHT" + ) + + x = np.array([16, 4, 1]).astype(np.uint16) + y = np.array([1, 2, 3]).astype(np.uint16) + z = x >> y # expected output [8, 1, 0] + expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_right_uint16") + + @staticmethod + def export_right_unit32() -> None: + node = onnx.helper.make_node( + "BitShift", inputs=["x", "y"], outputs=["z"], direction="RIGHT" + ) + + x = np.array([16, 4, 1]).astype(np.uint32) + y = np.array([1, 2, 3]).astype(np.uint32) + z = x >> y # expected output [8, 1, 0] + expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_right_uint32") + + @staticmethod + def export_right_unit64() -> None: + node = onnx.helper.make_node( + "BitShift", inputs=["x", "y"], outputs=["z"], direction="RIGHT" + ) + + x = np.array([16, 4, 1]).astype(np.uint64) + y = np.array([1, 2, 3]).astype(np.uint64) + z = x >> y # expected output [8, 1, 0] + expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_right_uint64") + + @staticmethod + def export_left_unit8() -> None: + node = onnx.helper.make_node( + "BitShift", inputs=["x", "y"], outputs=["z"], direction="LEFT" + ) + + x = np.array([16, 4, 1]).astype(np.uint8) + y = np.array([1, 2, 3]).astype(np.uint8) + z = x << y # expected output [32, 16, 8] + expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_left_uint8") + + @staticmethod + def export_left_unit16() -> None: + node = onnx.helper.make_node( + "BitShift", inputs=["x", "y"], outputs=["z"], direction="LEFT" + ) + + x = np.array([16, 4, 1]).astype(np.uint16) + y = np.array([1, 2, 3]).astype(np.uint16) + z = x << y # expected output [32, 16, 8] + expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_left_uint16") + + @staticmethod + def export_left_unit32() -> None: + node = onnx.helper.make_node( + "BitShift", inputs=["x", "y"], outputs=["z"], direction="LEFT" + ) + + x = np.array([16, 4, 1]).astype(np.uint32) + y = np.array([1, 2, 3]).astype(np.uint32) + z = x << y # expected output [32, 16, 8] + expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_left_uint32") + + @staticmethod + def export_left_unit64() -> None: + node = onnx.helper.make_node( + "BitShift", inputs=["x", "y"], outputs=["z"], direction="LEFT" + ) + + x = np.array([16, 4, 1]).astype(np.uint64) + y = np.array([1, 2, 3]).astype(np.uint64) + z = x << y # expected output [32, 16, 8] + expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_left_uint64") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwiseand.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwiseand.py new file mode 100644 index 0000000000000000000000000000000000000000..ef6a1bb283f4572cc455489b0d29ba2df4571940 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwiseand.py @@ -0,0 +1,54 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np # type: ignore + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.numpy_helper import create_random_int + + +class BitwiseAnd(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "BitwiseAnd", + inputs=["x", "y"], + outputs=["bitwiseand"], + ) + + # 2d + x = create_random_int((3, 4), np.int32) + y = create_random_int((3, 4), np.int32) + z = np.bitwise_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_and_i32_2d") + + # 3d + x = create_random_int((3, 4, 5), np.int16) + y = create_random_int((3, 4, 5), np.int16) + z = np.bitwise_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_and_i16_3d") + + @staticmethod + def export_bitwiseand_broadcast() -> None: + node = onnx.helper.make_node( + "BitwiseAnd", + inputs=["x", "y"], + outputs=["bitwiseand"], + ) + + # 3d vs 1d + x = create_random_int((3, 4, 5), np.uint64) + y = create_random_int((5,), np.uint64) + z = np.bitwise_and(x, y) + expect( + node, inputs=[x, y], outputs=[z], name="test_bitwise_and_ui64_bcast_3v1d" + ) + + # 4d vs 3d + x = create_random_int((3, 4, 5, 6), np.uint8) + y = create_random_int((4, 5, 6), np.uint8) + z = np.bitwise_and(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_and_ui8_bcast_4v3d") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwisenot.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwisenot.py new file mode 100644 index 0000000000000000000000000000000000000000..e20bfc14f72e76284c2d9175ca6a0d9da884d69c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwisenot.py @@ -0,0 +1,35 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np # type: ignore + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.numpy_helper import create_random_int + + +class BitwiseNot(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "BitwiseNot", + inputs=["x"], + outputs=["bitwise_not"], + ) + + # 2d + x = create_random_int((3, 4), np.int32) + y = np.bitwise_not(x) + expect(node, inputs=[x], outputs=[y], name="test_bitwise_not_2d") + + # 3d + x = create_random_int((3, 4, 5), np.uint16) + y = np.bitwise_not(x) + expect(node, inputs=[x], outputs=[y], name="test_bitwise_not_3d") + + # 4d + x = create_random_int((3, 4, 5, 6), np.uint8) + y = np.bitwise_not(x) + expect(node, inputs=[x], outputs=[y], name="test_bitwise_not_4d") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwiseor.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwiseor.py new file mode 100644 index 0000000000000000000000000000000000000000..55afc5420ecd44940853eb7f54b3079eddb6f3c4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwiseor.py @@ -0,0 +1,51 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np # type: ignore + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.numpy_helper import create_random_int + + +class BitwiseOr(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "BitwiseOr", + inputs=["x", "y"], + outputs=["bitwiseor"], + ) + # 2d + x = create_random_int((3, 4), np.int32) + y = create_random_int((3, 4), np.int32) + z = np.bitwise_or(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_or_i32_2d") + + # 4d + x = create_random_int((3, 4, 5, 6), np.int8) + y = create_random_int((3, 4, 5, 6), np.int8) + z = np.bitwise_or(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_or_i16_4d") + + @staticmethod + def export_bitwiseor_broadcast() -> None: + node = onnx.helper.make_node( + "BitwiseOr", + inputs=["x", "y"], + outputs=["bitwiseor"], + ) + + # 3d vs 1d + x = create_random_int((3, 4, 5), np.uint64) + y = create_random_int((5,), np.uint64) + z = np.bitwise_or(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_or_ui64_bcast_3v1d") + + # 4d vs 3d + x = create_random_int((3, 4, 5, 6), np.uint8) + y = create_random_int((4, 5, 6), np.uint8) + z = np.bitwise_or(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_or_ui8_bcast_4v3d") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwisexor.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwisexor.py new file mode 100644 index 0000000000000000000000000000000000000000..84f9b239b3500d986fe27a58d7be382d99239d38 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/bitwisexor.py @@ -0,0 +1,54 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np # type: ignore + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.numpy_helper import create_random_int + + +class BitwiseXor(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "BitwiseXor", + inputs=["x", "y"], + outputs=["bitwisexor"], + ) + + # 2d + x = create_random_int((3, 4), np.int32) + y = create_random_int((3, 4), np.int32) + z = np.bitwise_xor(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_xor_i32_2d") + + # 3d + x = create_random_int((3, 4, 5), np.int16) + y = create_random_int((3, 4, 5), np.int16) + z = np.bitwise_xor(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_xor_i16_3d") + + @staticmethod + def export_bitwiseor_broadcast() -> None: + node = onnx.helper.make_node( + "BitwiseXor", + inputs=["x", "y"], + outputs=["bitwisexor"], + ) + + # 3d vs 1d + x = create_random_int((3, 4, 5), np.uint64) + y = create_random_int((5,), np.uint64) + z = np.bitwise_xor(x, y) + expect( + node, inputs=[x, y], outputs=[z], name="test_bitwise_xor_ui64_bcast_3v1d" + ) + + # 4d vs 3d + x = create_random_int((3, 4, 5, 6), np.uint8) + y = create_random_int((4, 5, 6), np.uint8) + z = np.bitwise_xor(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_xor_ui8_bcast_4v3d") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/blackmanwindow.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/blackmanwindow.py new file mode 100644 index 0000000000000000000000000000000000000000..2b5cae682660a222354aff804ab9ff9778ddb2d3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/blackmanwindow.py @@ -0,0 +1,46 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class BlackmanWindow(Base): + @staticmethod + def export() -> None: + # Test periodic window + node = onnx.helper.make_node( + "BlackmanWindow", + inputs=["x"], + outputs=["y"], + ) + size = np.int32(10) + a0 = 0.42 + a1 = -0.5 + a2 = 0.08 + y = a0 + y += a1 * np.cos(2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size) + y += a2 * np.cos(4 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size) + expect(node, inputs=[size], outputs=[y], name="test_blackmanwindow") + + # Test symmetric window + node = onnx.helper.make_node( + "BlackmanWindow", inputs=["x"], outputs=["y"], periodic=0 + ) + size = np.int32(10) + a0 = 0.42 + a1 = -0.5 + a2 = 0.08 + y = a0 + y += a1 * np.cos( + 2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1) + ) + y += a2 * np.cos( + 4 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1) + ) + expect(node, inputs=[size], outputs=[y], name="test_blackmanwindow_symmetric") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/cast.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/cast.py new file mode 100644 index 0000000000000000000000000000000000000000..6c10910e438fe8a39c6da0c4b6673f4f38abcbe7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/cast.py @@ -0,0 +1,431 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import sys + +import numpy as np + +import onnx +import onnx.reference.custom_element_types as custom +from onnx import TensorProto, helper, subbyte +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.helper import ( + float32_to_float8e4m3, + float32_to_float8e5m2, + make_tensor, + tensor_dtype_to_field, +) +from onnx.numpy_helper import float8e4m3_to_float32, float8e5m2_to_float32 + + +class Cast(Base): + @staticmethod + def export() -> None: + shape = (3, 4) + test_cases = [ + ("FLOAT", "FLOAT16"), + ("FLOAT", "DOUBLE"), + ("FLOAT16", "FLOAT"), + ("FLOAT16", "DOUBLE"), + ("DOUBLE", "FLOAT"), + ("DOUBLE", "FLOAT16"), + ("FLOAT", "STRING"), + ("STRING", "FLOAT"), + ("FLOAT", "BFLOAT16"), + ("BFLOAT16", "FLOAT"), + ("FLOAT", "FLOAT8E4M3FN"), + ("FLOAT16", "FLOAT8E4M3FN"), + ("FLOAT", "FLOAT8E4M3FNUZ"), + ("FLOAT16", "FLOAT8E4M3FNUZ"), + ("FLOAT8E4M3FN", "FLOAT"), + ("FLOAT8E4M3FN", "FLOAT16"), + ("FLOAT8E4M3FNUZ", "FLOAT"), + ("FLOAT8E4M3FNUZ", "FLOAT16"), + ("FLOAT", "FLOAT8E5M2"), + ("FLOAT16", "FLOAT8E5M2"), + ("FLOAT", "FLOAT8E5M2FNUZ"), + ("FLOAT16", "FLOAT8E5M2FNUZ"), + ("FLOAT8E5M2", "FLOAT"), + ("FLOAT8E5M2", "FLOAT16"), + ("FLOAT8E5M2FNUZ", "FLOAT"), + ("FLOAT8E5M2FNUZ", "FLOAT16"), + ("FLOAT", "UINT4"), + ("FLOAT16", "UINT4"), + ("FLOAT", "INT4"), + ("FLOAT16", "INT4"), + ("UINT4", "FLOAT"), + ("UINT4", "FLOAT16"), + ("UINT4", "UINT8"), + ("INT4", "FLOAT"), + ("INT4", "FLOAT16"), + ("INT4", "INT8"), + ] + + vect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3) + vect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2) + vect_float32_to_uint4 = np.vectorize( + lambda x: subbyte.float32_to_4bit_unpacked(x, signed=False) + ) + vect_float32_to_int4 = np.vectorize( + lambda x: subbyte.float32_to_4bit_unpacked(x, signed=True) + ) + + f8_types = ("FLOAT8E4M3FN", "FLOAT8E4M3FNUZ", "FLOAT8E5M2", "FLOAT8E5M2FNUZ") + + for from_type, to_type in test_cases: + input_type_proto = None + output_type_proto = None + if from_type == "BFLOAT16" or to_type == "BFLOAT16": + np_fp32 = np.array( + [ + "0.47892547", + "0.48033667", + "0.49968487", + "0.81910545", + "0.47031248", + "0.816468", + "0.21087195", + "0.7229038", + "NaN", + "INF", + "+INF", + "-INF", + ], + dtype=np.float32, + ) + little_endisan = sys.byteorder == "little" + np_uint16_view = np_fp32.view(dtype=np.uint16) + np_bfp16 = ( + np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2] + ) + if to_type == "BFLOAT16": + assert from_type == "FLOAT" + input = np_fp32.reshape([3, 4]) + output = np_bfp16.reshape([3, 4]) + input_type_proto = onnx.helper.make_tensor_type_proto( + int(TensorProto.FLOAT), input.shape + ) + output_type_proto = onnx.helper.make_tensor_type_proto( + int(TensorProto.BFLOAT16), output.shape + ) + else: + assert to_type == "FLOAT" + input = np_bfp16.reshape([3, 4]) + # convert bfloat to FLOAT + np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16) + if little_endisan: + np_fp32_zeros[1::2] = np_bfp16 + else: + np_fp32_zeros[0::2] = np_bfp16 + np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32) + output = np_fp32_from_bfloat.reshape([3, 4]) + input_type_proto = onnx.helper.make_tensor_type_proto( + int(TensorProto.BFLOAT16), input.shape + ) + output_type_proto = onnx.helper.make_tensor_type_proto( + int(TensorProto.FLOAT), output.shape + ) + elif from_type in f8_types or to_type in f8_types: + np_fp32 = np.array( + [ + "0.47892547", + "0.48033667", + "0.49968487", + "0.81910545", + "0.47031248", + "0.7229038", + "1000000", + "1e-7", + "NaN", + "INF", + "+INF", + "-INF", + "-0.0000001", + "0.0000001", + "-1000000", + ], + dtype=np.float32, + ) + + if from_type == "FLOAT": + input_values = np_fp32 + input = make_tensor( + "x", TensorProto.FLOAT, [3, 5], np_fp32.tolist() + ) + elif from_type == "FLOAT16": + input_values = np_fp32.astype(np.float16).astype(np.float32) + input = make_tensor( + "x", TensorProto.FLOAT16, [3, 5], input_values.tolist() + ) + elif from_type == "FLOAT8E4M3FN": + input_values = float8e4m3_to_float32( + vect_float32_to_float8e4m3(np_fp32) + ) + input = make_tensor( + "x", TensorProto.FLOAT8E4M3FN, [3, 5], input_values.tolist() + ) + elif from_type == "FLOAT8E4M3FNUZ": + input_values = float8e4m3_to_float32( + vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True + ) + input = make_tensor( + "x", TensorProto.FLOAT8E4M3FNUZ, [3, 5], input_values.tolist() + ) + elif from_type == "FLOAT8E5M2": + input_values = float8e5m2_to_float32( + vect_float32_to_float8e5m2(np_fp32) + ) + input = make_tensor( + "x", TensorProto.FLOAT8E5M2, [3, 5], input_values.tolist() + ) + elif from_type == "FLOAT8E5M2FNUZ": + input_values = float8e5m2_to_float32( + vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True), + fn=True, + uz=True, + ) + input = make_tensor( + "x", TensorProto.FLOAT8E5M2FNUZ, [3, 5], input_values.tolist() + ) + else: + raise ValueError( + "Conversion from {from_type} to {to_type} is not tested." + ) + + if to_type == "FLOAT8E4M3FN": + expected = float8e4m3_to_float32( + vect_float32_to_float8e4m3(input_values) + ) + elif to_type == "FLOAT8E4M3FNUZ": + expected = float8e4m3_to_float32( + vect_float32_to_float8e4m3(input_values, uz=True), uz=True + ) + elif to_type == "FLOAT8E5M2": + expected = float8e5m2_to_float32( + vect_float32_to_float8e5m2(input_values) + ) + elif to_type == "FLOAT8E5M2FNUZ": + expected = float8e5m2_to_float32( + vect_float32_to_float8e5m2(input_values, fn=True, uz=True), + fn=True, + uz=True, + ) + elif to_type == "FLOAT16": + expected = input_values.astype(np.float16).astype(np.float32) + elif to_type == "FLOAT": + expected = input_values + else: + raise ValueError( + "Conversion from {from_type} to {to_type} is not tested." + ) + expected_tensor = make_tensor( + "x", getattr(TensorProto, to_type), [3, 5], expected.tolist() + ) + output = expected_tensor + elif from_type in ("UINT4", "INT4") or to_type in ("UINT4", "INT4"): + np_fp32 = np.arange(-9, 16).astype(np.float32) + input_shape = (5, 5) + if from_type == "FLOAT": + input_values = np_fp32 + input = make_tensor( + "x", TensorProto.FLOAT, input_shape, input_values.tolist() + ) + elif from_type == "FLOAT16": + input_values = np_fp32.astype(np.float16) + input = make_tensor( + "x", TensorProto.FLOAT16, input_shape, input_values.tolist() + ) + elif from_type == "UINT4": + input_values = vect_float32_to_uint4(np_fp32) + input = make_tensor( + "x", TensorProto.UINT4, input_shape, input_values.tolist() + ) + elif from_type == "INT4": + input_values = vect_float32_to_int4(np_fp32) + input = make_tensor( + "x", TensorProto.INT4, input_shape, input_values.tolist() + ) + else: + raise ValueError( + "Conversion from {from_type} to {to_type} is not tested." + ) + if to_type == "UINT4": + expected = vect_float32_to_uint4(input_values).astype(custom.uint4) + elif to_type == "INT4": + expected = vect_float32_to_int4(input_values).astype(custom.int4) + elif to_type == "FLOAT16": + expected = input_values.astype(np.float16) + elif to_type == "FLOAT": + expected = input_values + elif to_type == "UINT8": + expected = input_values.astype(np.uint8) + elif to_type == "INT8": + expected = input_values.astype(np.int8) + else: + raise ValueError( + "Conversion from {from_type} to {to_type} is not tested." + ) + expected_tensor = make_tensor( + "y", getattr(TensorProto, to_type), input_shape, expected.tolist() + ) + output = expected_tensor + input_type_proto = onnx.helper.make_tensor_type_proto( + getattr(TensorProto, from_type), input_shape + ) + output_type_proto = onnx.helper.make_tensor_type_proto( + getattr(TensorProto, to_type), input_shape + ) + + elif from_type != "STRING": + input = np.random.random_sample(shape).astype( + helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type)) + ) + if to_type == "STRING": + # Converting input to str, then give it object dtype for generating script + ss = [] + for i in input.flatten(): + s = str(i).encode("utf-8") + su = s.decode("utf-8") + ss.append(su) + + output = np.array(ss).astype(object).reshape([3, 4]) + else: + output = input.astype( + helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type)) + ) + else: + input = np.array( + [ + "0.47892547", + "0.48033667", + "0.49968487", + "0.81910545", + "0.47031248", + "0.816468", + "0.21087195", + "0.7229038", + "NaN", + "INF", + "+INF", + "-INF", + ], + dtype=np.dtype(object), + ).reshape([3, 4]) + output = input.astype( + helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type)) + ) + node = onnx.helper.make_node( + "Cast", + inputs=["input"], + outputs=["output"], + to=getattr(TensorProto, to_type), + ) + if input_type_proto and output_type_proto: + expect( + node, + inputs=[input], + outputs=[output], + name="test_cast_" + from_type + "_to_" + to_type, + input_type_protos=[input_type_proto], + output_type_protos=[output_type_proto], + ) + else: + expect( + node, + inputs=[input], + outputs=[output], + name="test_cast_" + from_type + "_to_" + to_type, + ) + + @staticmethod + def export_saturate_false() -> None: + test_cases = [ + ("FLOAT", "FLOAT8E4M3FN"), + ("FLOAT16", "FLOAT8E4M3FN"), + ("FLOAT", "FLOAT8E4M3FNUZ"), + ("FLOAT16", "FLOAT8E4M3FNUZ"), + ("FLOAT", "FLOAT8E5M2"), + ("FLOAT16", "FLOAT8E5M2"), + ("FLOAT", "FLOAT8E5M2FNUZ"), + ("FLOAT16", "FLOAT8E5M2FNUZ"), + ] + vect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3) + vect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2) + + for from_type, to_type in test_cases: + np_fp32 = np.array( + [ + "0.47892547", + "0.48033667", + "0.49968487", + "0.81910545", + "0.47031248", + "0.7229038", + "1000000", + "1e-7", + "NaN", + "INF", + "+INF", + "-INF", + "-0.0000001", + "0.0000001", + "-1000000", + ], + dtype=np.float32, + ) + + if from_type == "FLOAT": + input_values = np_fp32 + input = make_tensor("x", TensorProto.FLOAT, [3, 5], np_fp32.tolist()) + elif from_type == "FLOAT16": + input_values = np_fp32.astype(np.float16).astype(np.float32) + input = make_tensor( + "x", TensorProto.FLOAT16, [3, 5], input_values.tolist() + ) + else: + raise ValueError( + "Conversion from {from_type} to {to_type} is not tested." + ) + + if to_type == "FLOAT8E4M3FN": + expected = vect_float32_to_float8e4m3(input_values, saturate=False) + elif to_type == "FLOAT8E4M3FNUZ": + expected = vect_float32_to_float8e4m3( + input_values, uz=True, saturate=False + ) + elif to_type == "FLOAT8E5M2": + expected = vect_float32_to_float8e5m2(input_values, saturate=False) + elif to_type == "FLOAT8E5M2FNUZ": + expected = vect_float32_to_float8e5m2( + input_values, fn=True, uz=True, saturate=False + ) + else: + raise ValueError( + "Conversion from {from_type} to {to_type} is not tested." + ) + + ivals = bytes([int(i) for i in expected]) + tensor = TensorProto() + tensor.data_type = getattr(TensorProto, to_type) + tensor.name = "x" + tensor.dims.extend([3, 5]) + field = tensor_dtype_to_field(tensor.data_type) + getattr(tensor, field).extend(ivals) + + output = tensor + + node = onnx.helper.make_node( + "Cast", + inputs=["input"], + outputs=["output"], + to=getattr(TensorProto, to_type), + saturate=0, + ) + expect( + node, + inputs=[input], + outputs=[output], + name="test_cast_no_saturate_" + from_type + "_to_" + to_type, + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/castlike.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/castlike.py new file mode 100644 index 0000000000000000000000000000000000000000..e85c45b78a941956eac2d410d2438572d4ac321d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/castlike.py @@ -0,0 +1,242 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import sys + +import numpy as np + +import onnx +from onnx import TensorProto, helper +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.helper import float32_to_float8e4m3, float32_to_float8e5m2, make_tensor +from onnx.numpy_helper import float8e4m3_to_float32, float8e5m2_to_float32 + + +class CastLike(Base): + @staticmethod + def export() -> None: + shape = (3, 4) + test_cases = [ + ("FLOAT", "FLOAT16"), + ("FLOAT", "DOUBLE"), + ("FLOAT16", "FLOAT"), + ("FLOAT16", "DOUBLE"), + ("DOUBLE", "FLOAT"), + ("DOUBLE", "FLOAT16"), + ("FLOAT", "STRING"), + ("STRING", "FLOAT"), + ("FLOAT", "BFLOAT16"), + ("BFLOAT16", "FLOAT"), + ("FLOAT", "FLOAT8E4M3FN"), + ("FLOAT", "FLOAT8E4M3FNUZ"), + ("FLOAT8E4M3FN", "FLOAT"), + ("FLOAT8E4M3FNUZ", "FLOAT"), + ("FLOAT", "FLOAT8E5M2"), + ("FLOAT", "FLOAT8E5M2FNUZ"), + ("FLOAT8E5M2", "FLOAT"), + ("FLOAT8E5M2FNUZ", "FLOAT"), + ] + + vect_float32_to_float8e4m3 = np.vectorize(float32_to_float8e4m3) + vect_float32_to_float8e5m2 = np.vectorize(float32_to_float8e5m2) + + for from_type, to_type in test_cases: + input_type_proto = None + output_type_proto = None + if from_type == "BFLOAT16" or to_type == "BFLOAT16": + np_fp32 = np.array( + [ + "0.47892547", + "0.48033667", + "0.49968487", + "0.81910545", + "0.47031248", + "0.816468", + "0.21087195", + "0.7229038", + "NaN", + "INF", + "+INF", + "-INF", + ], + dtype=np.float32, + ) + little_endisan = sys.byteorder == "little" + np_uint16_view = np_fp32.view(dtype=np.uint16) + np_bfp16 = ( + np_uint16_view[1::2] if little_endisan else np_uint16_view[0::2] + ) + if to_type == "BFLOAT16": + assert from_type == "FLOAT" + input = np_fp32.reshape([3, 4]) + output = np_bfp16.reshape([3, 4]) + input_type_proto = onnx.helper.make_tensor_type_proto( + int(TensorProto.FLOAT), input.shape + ) + output_type_proto = onnx.helper.make_tensor_type_proto( + int(TensorProto.BFLOAT16), output.shape + ) + else: + assert to_type == "FLOAT" + input = np_bfp16.reshape([3, 4]) + # convert bfloat to FLOAT + np_fp32_zeros = np.zeros((len(np_bfp16) * 2,), dtype=np.uint16) + if little_endisan: + np_fp32_zeros[1::2] = np_bfp16 + else: + np_fp32_zeros[0::2] = np_bfp16 + np_fp32_from_bfloat = np_fp32_zeros.view(dtype=np.float32) + output = np_fp32_from_bfloat.reshape([3, 4]) + input_type_proto = onnx.helper.make_tensor_type_proto( + int(TensorProto.BFLOAT16), input.shape + ) + output_type_proto = onnx.helper.make_tensor_type_proto( + int(TensorProto.FLOAT), output.shape + ) + like = output.flatten()[0:1] + elif from_type in ( + "FLOAT8E4M3FN", + "FLOAT8E4M3FNUZ", + "FLOAT8E5M2", + "FLOAT8E5M2FNUZ", + ) or to_type in ( + "FLOAT8E4M3FN", + "FLOAT8E4M3FNUZ", + "FLOAT8E5M2", + "FLOAT8E5M2FNUZ", + ): + np_fp32 = np.array( + [ + "0.47892547", + "0.48033667", + "0.49968487", + "0.81910545", + "0.47031248", + "0.816468", + "0.21087195", + "0.7229038", + "NaN", + "INF", + "+INF", + "-INF", + ], + dtype=np.float32, + ) + if to_type == "FLOAT8E4M3FN": + expected = float8e4m3_to_float32( + vect_float32_to_float8e4m3(np_fp32) + ) + expected_tensor = make_tensor( + "x", TensorProto.FLOAT8E4M3FN, [3, 4], expected.tolist() + ) + like_tensor = make_tensor( + "x", TensorProto.FLOAT8E4M3FN, [1], expected[:1] + ) + elif to_type == "FLOAT8E4M3FNUZ": + expected = float8e4m3_to_float32( + vect_float32_to_float8e4m3(np_fp32, uz=True), uz=True + ) + expected_tensor = make_tensor( + "x", TensorProto.FLOAT8E4M3FNUZ, [3, 4], expected.tolist() + ) + like_tensor = make_tensor( + "x", TensorProto.FLOAT8E4M3FNUZ, [1], expected[:1] + ) + elif to_type == "FLOAT8E5M2": + expected = float8e5m2_to_float32( + vect_float32_to_float8e5m2(np_fp32) + ) + expected_tensor = make_tensor( + "x", TensorProto.FLOAT8E5M2, [3, 4], expected.tolist() + ) + like_tensor = make_tensor( + "x", TensorProto.FLOAT8E5M2, [1], expected[:1] + ) + elif to_type == "FLOAT8E5M2FNUZ": + expected = float8e5m2_to_float32( + vect_float32_to_float8e5m2(np_fp32, fn=True, uz=True), + fn=True, + uz=True, + ) + expected_tensor = make_tensor( + "x", TensorProto.FLOAT8E5M2FNUZ, [3, 4], expected.tolist() + ) + like_tensor = make_tensor( + "x", TensorProto.FLOAT8E5M2FNUZ, [1], expected[:1] + ) + if from_type == "FLOAT": + input = np_fp32.reshape((3, 4)) + output = expected_tensor + like = like_tensor + else: + assert to_type == "FLOAT" + input = expected_tensor + output = expected.reshape((3, 4)) + like = output.flatten()[:1] + elif from_type != "STRING": + input = np.random.random_sample(shape).astype( + helper.tensor_dtype_to_np_dtype(getattr(TensorProto, from_type)) + ) + if to_type == "STRING": + # Converting input to str, then give it object dtype for generating script + ss = [] + for i in input.flatten(): + s = str(i).encode("utf-8") + su = s.decode("utf-8") + ss.append(su) + + output = np.array(ss).astype(object).reshape([3, 4]) + else: + output = input.astype( + helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type)) + ) + like = output.flatten()[0:1] + else: + input = np.array( + [ + "0.47892547", + "0.48033667", + "0.49968487", + "0.81910545", + "0.47031248", + "0.816468", + "0.21087195", + "0.7229038", + "NaN", + "INF", + "+INF", + "-INF", + ], + dtype=np.dtype(object), + ).reshape([3, 4]) + output = input.astype( + helper.tensor_dtype_to_np_dtype(getattr(TensorProto, to_type)) + ) + like = output.flatten()[0:1] + node = onnx.helper.make_node( + "CastLike", + inputs=["input", "like"], + outputs=["output"], + ) + if input_type_proto and output_type_proto: + like_type_proto = onnx.helper.make_tensor_type_proto( + output_type_proto.tensor_type.elem_type, like.shape + ) + + expect( + node, + inputs=[input, like], + outputs=[output], + name="test_castlike_" + from_type + "_to_" + to_type, + input_type_protos=[input_type_proto, like_type_proto], + output_type_protos=[output_type_proto], + ) + else: + expect( + node, + inputs=[input, like], + outputs=[output], + name="test_castlike_" + from_type + "_to_" + to_type, + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/ceil.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ceil.py new file mode 100644 index 0000000000000000000000000000000000000000..d87644ed644ec17db9fb87f2f54ac05961ce0c35 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/ceil.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Ceil(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Ceil", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1.5, 1.2]).astype(np.float32) + y = np.ceil(x) # expected output [-1., 2.] + expect(node, inputs=[x], outputs=[y], name="test_ceil_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.ceil(x) + expect(node, inputs=[x], outputs=[y], name="test_ceil") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/celu.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/celu.py new file mode 100644 index 0000000000000000000000000000000000000000..1c2879df4d2b24b54ca647ef664f83372cce802f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/celu.py @@ -0,0 +1,49 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Celu(Base): + @staticmethod + def export() -> None: + alpha = 2.0 + node = onnx.helper.make_node( + "Celu", + inputs=["X"], + outputs=["Y"], + alpha=alpha, + ) + + input_data = np.array( + [ + [ + [[0.8439683], [0.5665144], [0.05836735]], + [[0.02916367], [0.12964272], [0.5060197]], + [[0.79538304], [0.9411346], [0.9546573]], + ], + [ + [[0.17730942], [0.46192095], [0.26480448]], + [[0.6746842], [0.01665257], [0.62473077]], + [[0.9240844], [0.9722341], [0.11965699]], + ], + [ + [[0.41356155], [0.9129373], [0.59330076]], + [[0.81929934], [0.7862604], [0.11799799]], + [[0.69248444], [0.54119414], [0.07513223]], + ], + ], + dtype=np.float32, + ) + + # Calculate expected output data + positive_input = np.maximum(0, input_data) + negative_input = np.minimum(0, alpha * (np.exp(input_data / alpha) - 1)) + expected_output = positive_input + negative_input + + expect(node, inputs=[input_data], outputs=[expected_output], name="test_celu") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/center_crop_pad.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/center_crop_pad.py new file mode 100644 index 0000000000000000000000000000000000000000..579232f5cebfd928ede810dd8d3794814bdf8b7d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/center_crop_pad.py @@ -0,0 +1,129 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class CenterCropPad(Base): + @staticmethod + def export_center_crop_pad_crop() -> None: + node = onnx.helper.make_node( + "CenterCropPad", + inputs=["x", "shape"], + outputs=["y"], + ) + + # First dim is even diff, second is uneven + x = np.random.randn(20, 10, 3).astype(np.float32) + shape = np.array([10, 7, 3], dtype=np.int64) + y = x[5:15, 1:8, :] + + expect(node, inputs=[x, shape], outputs=[y], name="test_center_crop_pad_crop") + + @staticmethod + def export_center_crop_pad_pad() -> None: + node = onnx.helper.make_node( + "CenterCropPad", + inputs=["x", "shape"], + outputs=["y"], + ) + + # First dim is even diff, second is uneven + x = np.random.randn(10, 7, 3).astype(np.float32) + shape = np.array([20, 10, 3], dtype=np.int64) + y = np.zeros([20, 10, 3], dtype=np.float32) + y[5:15, 1:8, :] = x + + expect(node, inputs=[x, shape], outputs=[y], name="test_center_crop_pad_pad") + + @staticmethod + def export_center_crop_pad_crop_and_pad() -> None: + node = onnx.helper.make_node( + "CenterCropPad", + inputs=["x", "shape"], + outputs=["y"], + ) + + # Cropping on first dim, padding on second, third stays the same + x = np.random.randn(20, 8, 3).astype(np.float32) + shape = np.array([10, 10, 3], dtype=np.int64) + y = np.zeros([10, 10, 3], dtype=np.float32) + y[:, 1:9, :] = x[5:15, :, :] + + expect( + node, + inputs=[x, shape], + outputs=[y], + name="test_center_crop_pad_crop_and_pad", + ) + + @staticmethod + def export_center_crop_pad_crop_axes_hwc() -> None: + node = onnx.helper.make_node( + "CenterCropPad", + inputs=["x", "shape"], + outputs=["y"], + axes=[0, 1], + ) + + # Cropping on first dim, padding on second, third stays the same + x = np.random.randn(20, 8, 3).astype(np.float32) + shape = np.array([10, 9], dtype=np.int64) + y = np.zeros([10, 9, 3], dtype=np.float32) + y[:, :8, :] = x[5:15, :, :] + + expect( + node, + inputs=[x, shape], + outputs=[y], + name="test_center_crop_pad_crop_axes_hwc", + ) + + @staticmethod + def export_center_crop_pad_crop_negative_axes_hwc() -> None: + node = onnx.helper.make_node( + "CenterCropPad", + inputs=["x", "shape"], + outputs=["y"], + axes=[-3, -2], + ) + + # Cropping on first dim, padding on second, third stays the same + x = np.random.randn(20, 8, 3).astype(np.float32) + shape = np.array([10, 9], dtype=np.int64) + y = np.zeros([10, 9, 3], dtype=np.float32) + y[:, :8, :] = x[5:15, :, :] + + expect( + node, + inputs=[x, shape], + outputs=[y], + name="test_center_crop_pad_crop_negative_axes_hwc", + ) + + @staticmethod + def export_center_crop_pad_crop_axes_chw() -> None: + node = onnx.helper.make_node( + "CenterCropPad", + inputs=["x", "shape"], + outputs=["y"], + axes=[1, 2], + ) + + # Cropping on second dim, padding on third, first stays the same + x = np.random.randn(3, 20, 8).astype(np.float32) + shape = np.array([10, 9], dtype=np.int64) + y = np.zeros([3, 10, 9], dtype=np.float32) + y[:, :, :8] = x[:, 5:15, :] + + expect( + node, + inputs=[x, shape], + outputs=[y], + name="test_center_crop_pad_crop_axes_chw", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/clip.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/clip.py new file mode 100644 index 0000000000000000000000000000000000000000..e7046ff02d2bd3d7d1e2346efc788c790bd276b0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/clip.py @@ -0,0 +1,132 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Clip(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Clip", + inputs=["x", "min", "max"], + outputs=["y"], + ) + + x = np.array([-2, 0, 2]).astype(np.float32) + min_val = np.float32(-1) + max_val = np.float32(1) + y = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.] + expect( + node, inputs=[x, min_val, max_val], outputs=[y], name="test_clip_example" + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.clip(x, min_val, max_val) + expect(node, inputs=[x, min_val, max_val], outputs=[y], name="test_clip") + node = onnx.helper.make_node( + "Clip", + inputs=["x", "min", "max"], + outputs=["y"], + ) + + min_val = np.float32(-5) + max_val = np.float32(5) + + x = np.array([-1, 0, 1]).astype(np.float32) + y = np.array([-1, 0, 1]).astype(np.float32) + expect( + node, inputs=[x, min_val, max_val], outputs=[y], name="test_clip_inbounds" + ) + + x = np.array([-6, 0, 6]).astype(np.float32) + y = np.array([-5, 0, 5]).astype(np.float32) + expect( + node, inputs=[x, min_val, max_val], outputs=[y], name="test_clip_outbounds" + ) + + x = np.array([-1, 0, 6]).astype(np.float32) + y = np.array([-1, 0, 5]).astype(np.float32) + expect( + node, + inputs=[x, min_val, max_val], + outputs=[y], + name="test_clip_splitbounds", + ) + + @staticmethod + def export_clip_default() -> None: + node = onnx.helper.make_node( + "Clip", + inputs=["x", "min"], + outputs=["y"], + ) + min_val = np.float32(0) + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.clip(x, min_val, np.inf) + expect(node, inputs=[x, min_val], outputs=[y], name="test_clip_default_min") + + no_min = "" # optional input, not supplied + node = onnx.helper.make_node( + "Clip", + inputs=["x", no_min, "max"], + outputs=["y"], + ) + max_val = np.float32(0) + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.clip(x, -np.inf, max_val) + expect(node, inputs=[x, max_val], outputs=[y], name="test_clip_default_max") + + no_max = "" # optional input, not supplied + node = onnx.helper.make_node( + "Clip", + inputs=["x", no_min, no_max], + outputs=["y"], + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + y = np.array([-1, 0, 1]).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_clip_default_inbounds") + + @staticmethod + def export_clip_default_int8() -> None: + node = onnx.helper.make_node( + "Clip", + inputs=["x", "min"], + outputs=["y"], + ) + min_val = np.int8(0) + x = np.random.randn(3, 4, 5).astype(np.int8) + y = np.clip(x, min_val, np.iinfo(np.int8).max) + expect( + node, inputs=[x, min_val], outputs=[y], name="test_clip_default_int8_min" + ) + + no_min = "" # optional input, not supplied + node = onnx.helper.make_node( + "Clip", + inputs=["x", no_min, "max"], + outputs=["y"], + ) + max_val = np.int8(0) + x = np.random.randn(3, 4, 5).astype(np.int8) + y = np.clip(x, np.iinfo(np.int8).min, max_val) + expect( + node, inputs=[x, max_val], outputs=[y], name="test_clip_default_int8_max" + ) + + no_max = "" # optional input, not supplied + node = onnx.helper.make_node( + "Clip", + inputs=["x", no_min, no_max], + outputs=["y"], + ) + + x = np.array([-1, 0, 1]).astype(np.int8) + y = np.array([-1, 0, 1]).astype(np.int8) + expect(node, inputs=[x], outputs=[y], name="test_clip_default_int8_inbounds") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/col2im.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/col2im.py new file mode 100644 index 0000000000000000000000000000000000000000..e9e5564fd6113e880cd2b3b1e2bb1382b031fddd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/col2im.py @@ -0,0 +1,349 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Col2Im(Base): + """Col2Im operator with N-dimension support + + The tests below can be reproduced in Python using https://github.com/f-dangel/unfoldNd/ + """ + + @staticmethod + def export() -> None: + input = np.array( + [ + [ + [1.0, 6.0, 11.0, 16.0, 21.0], # (1, 5, 5) + [2.0, 7.0, 12.0, 17.0, 22.0], + [3.0, 8.0, 13.0, 18.0, 23.0], + [4.0, 9.0, 14.0, 19.0, 24.0], + [5.0, 0.0, 15.0, 20.0, 25.0], + ] + ] + ).astype(np.float32) + + image_shape = np.array([5, 5]).astype(np.int64) + block_shape = np.array([1, 5]).astype(np.int64) + node = onnx.helper.make_node( + "Col2Im", ["input", "image_shape", "block_shape"], ["output"] + ) + + output = np.array( + [ + [ + [ + [1.0, 2.0, 3.0, 4.0, 5.0], # (1, 1, 5, 5) + [6.0, 7.0, 8.0, 9.0, 0.0], + [11.0, 12.0, 13.0, 14.0, 15.0], + [16.0, 17.0, 18.0, 19.0, 20.0], + [21.0, 22.0, 23.0, 24.0, 25.0], + ] + ] + ] + ).astype(np.float32) + + expect( + node, + inputs=[input, image_shape, block_shape], + outputs=[output], + name="test_col2im", + ) + + @staticmethod + def export_col2im_strides() -> None: + input = np.array( + [ + [ + [0.0, 0.0, 0.0, 0.0], # (1, 9, 4) + [1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0], + ] + ] + ).astype(np.float32) + image_shape = np.array([5, 5]).astype(np.int64) + block_shape = np.array([3, 3]).astype(np.int64) + + output = np.array( + [ + [ + [ + [0.0, 1.0, 1.0, 1.0, 1.0], # (1, 1, 5, 5) + [1.0, 0.0, 1.0, 0.0, 0.0], + [0.0, 2.0, 1.0, 2.0, 1.0], + [1.0, 0.0, 1.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 1.0, 0.0], + ] + ] + ] + ).astype(np.float32) + + node = onnx.helper.make_node( + "Col2Im", + ["input", "image_shape", "block_shape"], + ["output"], + strides=[2, 2], + ) + expect( + node, + inputs=[input, image_shape, block_shape], + outputs=[output], + name="test_col2im_strides", + ) + + @staticmethod + def export_col2im_pads() -> None: + input = np.array( + [ + [ + [ + 1.0, + 6.0, + 11.0, + 16.0, + 21.0, + 26, + 31, + 36, + 41, + 46, + 51, + 56, + 61, + 66, + 71, + ], # (1, 5, 15) + [ + 2.0, + 7.0, + 12.0, + 17.0, + 22.0, + 27, + 32, + 37, + 42, + 47, + 52, + 57, + 62, + 67, + 72, + ], + [ + 3.0, + 8.0, + 13.0, + 18.0, + 23.0, + 28, + 33, + 38, + 43, + 48, + 53, + 58, + 63, + 68, + 73, + ], + [ + 4.0, + 9.0, + 14.0, + 19.0, + 24.0, + 29, + 34, + 39, + 44, + 49, + 54, + 59, + 64, + 69, + 74, + ], + [ + 5.0, + 10.0, + 15.0, + 20.0, + 25.0, + 30, + 35, + 40, + 45, + 50, + 55, + 60, + 65, + 70, + 75, + ], + ] + ] + ).astype(np.float32) + image_shape = np.array([5, 5]).astype(np.int64) + block_shape = np.array([1, 5]).astype(np.int64) + + output = np.array( + [ + [ + [ + [8.0, 21.0, 24.0, 27.0, 24.0], # (1, 1, 5, 5) + [38.0, 66.0, 69.0, 72.0, 54.0], + [68.0, 111.0, 114.0, 117.0, 84.0], + [98.0, 156.0, 159.0, 162.0, 114.0], + [128.0, 201.0, 204.0, 207.0, 144.0], + ] + ] + ] + ).astype(np.float32) + + node = onnx.helper.make_node( + "Col2Im", + ["input", "image_shape", "block_shape"], + ["output"], + pads=[0, 1, 0, 1], + ) + expect( + node, + inputs=[input, image_shape, block_shape], + outputs=[output], + name="test_col2im_pads", + ) + + @staticmethod + def export_col2im_dilations() -> None: + input = np.array( + [ + [ + [1.0, 5.0, 9.0, 13.0, 17], # (1, 4, 5) + [2.0, 6.0, 10.0, 14.0, 18], + [3.0, 7.0, 11.0, 15.0, 19], + [4.0, 8.0, 12.0, 16.0, 20], + ] + ] + ).astype(np.float32) + image_shape = np.array([6, 6]).astype(np.int64) + block_shape = np.array([2, 2]).astype(np.int64) + + output = np.array( + [ + [ + [ + [1.0, 0.0, 0.0, 0.0, 0.0, 2.0], # (1, 1, 6, 6) + [8.0, 0.0, 0.0, 0.0, 0.0, 10.0], + [16.0, 0.0, 0.0, 0.0, 0.0, 18.0], + [24.0, 0.0, 0.0, 0.0, 0.0, 26.0], + [32.0, 0.0, 0.0, 0.0, 0.0, 34.0], + [19.0, 0.0, 0.0, 0.0, 0.0, 20.0], + ] + ] + ] + ).astype(np.float32) + + node = onnx.helper.make_node( + "Col2Im", + ["input", "image_shape", "block_shape"], + ["output"], + dilations=[1, 5], + ) + expect( + node, + inputs=[input, image_shape, block_shape], + outputs=[output], + name="test_col2im_dilations", + ) + + @staticmethod + def export_col2im_5d() -> None: + input = np.array( + [ + [ + [1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56], # (1, 10, 12) + [2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57], + [3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58], + [4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59], + [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60], + [61, 66, 71, 76, 81, 86, 91, 96, 101, 106, 111, 116], + [62, 67, 72, 77, 82, 87, 92, 97, 102, 107, 112, 117], + [63, 68, 73, 78, 83, 88, 93, 98, 103, 108, 113, 118], + [64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119], + [65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115, 120], + ] + ] + ).astype(np.float32) + image_shape = np.array([3, 4, 5]).astype(np.int64) + block_shape = np.array([1, 1, 5]).astype(np.int64) + + output = np.array( + [ + [ + [ + [ + [1, 2, 3, 4, 5], # (1, 2, 3, 4, 5) + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + ], + [ + [21, 22, 23, 24, 25], + [26, 27, 28, 29, 30], + [31, 32, 33, 34, 35], + [36, 37, 38, 39, 40], + ], + [ + [41, 42, 43, 44, 45], + [46, 47, 48, 49, 50], + [51, 52, 53, 54, 55], + [56, 57, 58, 59, 60], + ], + ], + [ + [ + [61, 62, 63, 64, 65], + [66, 67, 68, 69, 70], + [71, 72, 73, 74, 75], + [76, 77, 78, 79, 80], + ], + [ + [81, 82, 83, 84, 85], + [86, 87, 88, 89, 90], + [91, 92, 93, 94, 95], + [96, 97, 98, 99, 100], + ], + [ + [101, 102, 103, 104, 105], + [106, 107, 108, 109, 110], + [111, 112, 113, 114, 115], + [116, 117, 118, 119, 120], + ], + ], + ] + ] + ).astype(np.float32) + + node = onnx.helper.make_node( + "Col2Im", ["input", "image_shape", "block_shape"], ["output"] + ) + expect( + node, + inputs=[input, image_shape, block_shape], + outputs=[output], + name="test_col2im_5d", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/compress.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/compress.py new file mode 100644 index 0000000000000000000000000000000000000000..65551575d8a1f4f2007d07451cff813a22dc119e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/compress.py @@ -0,0 +1,98 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Compress(Base): + @staticmethod + def export_compress_0() -> None: + node = onnx.helper.make_node( + "Compress", + inputs=["input", "condition"], + outputs=["output"], + axis=0, + ) + input = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32) + condition = np.array([0, 1, 1]) + output = np.compress(condition, input, axis=0) + # print(output) + # [[ 3. 4.] + # [ 5. 6.]] + + expect( + node, + inputs=[input, condition.astype(bool)], + outputs=[output], + name="test_compress_0", + ) + + @staticmethod + def export_compress_1() -> None: + node = onnx.helper.make_node( + "Compress", + inputs=["input", "condition"], + outputs=["output"], + axis=1, + ) + input = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32) + condition = np.array([0, 1]) + output = np.compress(condition, input, axis=1) + # print(output) + # [[ 2.] + # [ 4.] + # [ 6.]] + + expect( + node, + inputs=[input, condition.astype(bool)], + outputs=[output], + name="test_compress_1", + ) + + @staticmethod + def export_compress_default_axis() -> None: + node = onnx.helper.make_node( + "Compress", + inputs=["input", "condition"], + outputs=["output"], + ) + input = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32) + condition = np.array([0, 1, 0, 0, 1]) + output = np.compress(condition, input) + # print(output) + # [ 2., 5.] + + expect( + node, + inputs=[input, condition.astype(bool)], + outputs=[output], + name="test_compress_default_axis", + ) + + @staticmethod + def export_compress_negative_axis() -> None: + node = onnx.helper.make_node( + "Compress", + inputs=["input", "condition"], + outputs=["output"], + axis=-1, + ) + input = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32) + condition = np.array([0, 1]) + output = np.compress(condition, input, axis=-1) + # print(output) + # [[ 2.] + # [ 4.] + # [ 6.]] + expect( + node, + inputs=[input, condition.astype(bool)], + outputs=[output], + name="test_compress_negative_axis", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/concat.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/concat.py new file mode 100644 index 0000000000000000000000000000000000000000..1cc1702e0cff042ff0135cf92bd75aa2689d112f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/concat.py @@ -0,0 +1,52 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import Any, Dict, Sequence + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Concat(Base): + @staticmethod + def export() -> None: + test_cases: Dict[str, Sequence[Any]] = { + "1d": ([1, 2], [3, 4]), + "2d": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]), + "3d": ( + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], + [[[9, 10], [11, 12]], [[13, 14], [15, 16]]], + ), + } + + for test_case, values_ in test_cases.items(): + values = [np.asarray(v, dtype=np.float32) for v in values_] + for i in range(len(values[0].shape)): + in_args = ["value" + str(k) for k in range(len(values))] + node = onnx.helper.make_node( + "Concat", inputs=list(in_args), outputs=["output"], axis=i + ) + output = np.concatenate(values, i) + expect( + node, + inputs=list(values), + outputs=[output], + name="test_concat_" + test_case + "_axis_" + str(i), + ) + + for i in range(-len(values[0].shape), 0): + in_args = ["value" + str(k) for k in range(len(values))] + node = onnx.helper.make_node( + "Concat", inputs=list(in_args), outputs=["output"], axis=i + ) + output = np.concatenate(values, i) + expect( + node, + inputs=list(values), + outputs=[output], + name="test_concat_" + test_case + "_axis_negative_" + str(abs(i)), + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/constant.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/constant.py new file mode 100644 index 0000000000000000000000000000000000000000..30f81e12ceb96421fdf17814ef6d53208f0c9b5e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/constant.py @@ -0,0 +1,28 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Constant(Base): + @staticmethod + def export() -> None: + values = np.random.randn(5, 5).astype(np.float32) + node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["values"], + value=onnx.helper.make_tensor( + name="const_tensor", + data_type=onnx.TensorProto.FLOAT, + dims=values.shape, + vals=values.flatten().astype(float), + ), + ) + + expect(node, inputs=[], outputs=[values], name="test_constant") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/constantofshape.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/constantofshape.py new file mode 100644 index 0000000000000000000000000000000000000000..dbb768880c9db6c5953f917d26c47f2cc34d5722 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/constantofshape.py @@ -0,0 +1,63 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class ConstantOfShape(Base): + @staticmethod + def export_float_ones() -> None: + x = np.array([4, 3, 2]).astype(np.int64) + tensor_value = onnx.helper.make_tensor( + "value", onnx.TensorProto.FLOAT, [1], [1] + ) + node = onnx.helper.make_node( + "ConstantOfShape", + inputs=["x"], + outputs=["y"], + value=tensor_value, + ) + + y = np.ones(x, dtype=np.float32) + expect(node, inputs=[x], outputs=[y], name="test_constantofshape_float_ones") + + @staticmethod + def export_int32_zeros() -> None: + x = np.array([10, 6]).astype(np.int64) + tensor_value = onnx.helper.make_tensor( + "value", onnx.TensorProto.INT32, [1], [0] + ) + node = onnx.helper.make_node( + "ConstantOfShape", + inputs=["x"], + outputs=["y"], + value=tensor_value, + ) + y = np.zeros(x, dtype=np.int32) + expect(node, inputs=[x], outputs=[y], name="test_constantofshape_int_zeros") + + @staticmethod + def export_int32_shape_zero() -> None: + x = np.array( + [ + 0, + ] + ).astype(np.int64) + tensor_value = onnx.helper.make_tensor( + "value", onnx.TensorProto.INT32, [1], [0] + ) + node = onnx.helper.make_node( + "ConstantOfShape", + inputs=["x"], + outputs=["y"], + value=tensor_value, + ) + y = np.zeros(x, dtype=np.int32) + expect( + node, inputs=[x], outputs=[y], name="test_constantofshape_int_shape_zero" + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/conv.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..a219a9ae4ac9cf48215c28b2be352efaa807564e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/conv.py @@ -0,0 +1,256 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Conv(Base): + @staticmethod + def export() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + [20.0, 21.0, 22.0, 23.0, 24.0], + ] + ] + ] + ).astype(np.float32) + W = np.array( + [ + [ + [ + [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ] + ] + ).astype(np.float32) + + # Convolution with padding + node_with_padding = onnx.helper.make_node( + "Conv", + inputs=["x", "W"], + outputs=["y"], + kernel_shape=[3, 3], + # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1 + pads=[1, 1, 1, 1], + ) + y_with_padding = np.array( + [ + [ + [ + [12.0, 21.0, 27.0, 33.0, 24.0], # (1, 1, 5, 5) output tensor + [33.0, 54.0, 63.0, 72.0, 51.0], + [63.0, 99.0, 108.0, 117.0, 81.0], + [93.0, 144.0, 153.0, 162.0, 111.0], + [72.0, 111.0, 117.0, 123.0, 84.0], + ] + ] + ] + ).astype(np.float32) + expect( + node_with_padding, + inputs=[x, W], + outputs=[y_with_padding], + name="test_basic_conv_with_padding", + ) + + # Convolution without padding + node_without_padding = onnx.helper.make_node( + "Conv", + inputs=["x", "W"], + outputs=["y"], + kernel_shape=[3, 3], + # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1 + pads=[0, 0, 0, 0], + ) + y_without_padding = np.array( + [ + [ + [ + [54.0, 63.0, 72.0], # (1, 1, 3, 3) output tensor + [99.0, 108.0, 117.0], + [144.0, 153.0, 162.0], + ] + ] + ] + ).astype(np.float32) + expect( + node_without_padding, + inputs=[x, W], + outputs=[y_without_padding], + name="test_basic_conv_without_padding", + ) + + @staticmethod + def export_conv_with_strides() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 7, 5) input tensor + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + [20.0, 21.0, 22.0, 23.0, 24.0], + [25.0, 26.0, 27.0, 28.0, 29.0], + [30.0, 31.0, 32.0, 33.0, 34.0], + ] + ] + ] + ).astype(np.float32) + W = np.array( + [ + [ + [ + [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ] + ] + ).astype(np.float32) + + # Convolution with strides=2 and padding + node_with_padding = onnx.helper.make_node( + "Conv", + inputs=["x", "W"], + outputs=["y"], + kernel_shape=[3, 3], + pads=[1, 1, 1, 1], + strides=[ + 2, + 2, + ], # Default values for other attributes: dilations=[1, 1], groups=1 + ) + y_with_padding = np.array( + [ + [ + [ + [12.0, 27.0, 24.0], # (1, 1, 4, 3) output tensor + [63.0, 108.0, 81.0], + [123.0, 198.0, 141.0], + [112.0, 177.0, 124.0], + ] + ] + ] + ).astype(np.float32) + expect( + node_with_padding, + inputs=[x, W], + outputs=[y_with_padding], + name="test_conv_with_strides_padding", + ) + + # Convolution with strides=2 and no padding + node_without_padding = onnx.helper.make_node( + "Conv", + inputs=["x", "W"], + outputs=["y"], + kernel_shape=[3, 3], + pads=[0, 0, 0, 0], + strides=[ + 2, + 2, + ], # Default values for other attributes: dilations=[1, 1], groups=1 + ) + y_without_padding = np.array( + [ + [ + [ + [54.0, 72.0], # (1, 1, 3, 2) output tensor + [144.0, 162.0], + [234.0, 252.0], + ] + ] + ] + ).astype(np.float32) + expect( + node_without_padding, + inputs=[x, W], + outputs=[y_without_padding], + name="test_conv_with_strides_no_padding", + ) + + # Convolution with strides=2 and padding only along one dimension (the H dimension in NxCxHxW tensor) + node_with_asymmetric_padding = onnx.helper.make_node( + "Conv", + inputs=["x", "W"], + outputs=["y"], + kernel_shape=[3, 3], + pads=[1, 0, 1, 0], + strides=[ + 2, + 2, + ], # Default values for other attributes: dilations=[1, 1], groups=1 + ) + y_with_asymmetric_padding = np.array( + [ + [ + [ + [21.0, 33.0], # (1, 1, 4, 2) output tensor + [99.0, 117.0], + [189.0, 207.0], + [171.0, 183.0], + ] + ] + ] + ).astype(np.float32) + expect( + node_with_asymmetric_padding, + inputs=[x, W], + outputs=[y_with_asymmetric_padding], + name="test_conv_with_strides_and_asymmetric_padding", + ) + + @staticmethod + def export_conv_with_autopad_same() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + [20.0, 21.0, 22.0, 23.0, 24.0], + ] + ] + ] + ).astype(np.float32) + W = np.array( + [ + [ + [ + [1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ] + ] + ).astype(np.float32) + + # Convolution with auto_pad='SAME_LOWER' and strides=2 + node = onnx.helper.make_node( + "Conv", + inputs=["x", "W"], + outputs=["y"], + auto_pad="SAME_LOWER", + kernel_shape=[3, 3], + strides=[2, 2], + ) + y = np.array( + [[[[12.0, 27.0, 24.0], [63.0, 108.0, 81.0], [72.0, 117.0, 84.0]]]] + ).astype(np.float32) + expect(node, inputs=[x, W], outputs=[y], name="test_conv_with_autopad_same") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/convinteger.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/convinteger.py new file mode 100644 index 0000000000000000000000000000000000000000..90faca43eba1e776344fcd22db4e5cba7220b3a8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/convinteger.py @@ -0,0 +1,66 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class ConvInteger(Base): + @staticmethod + def export_without_padding() -> None: + x = ( + np.array([2, 3, 4, 5, 6, 7, 8, 9, 10]) + .astype(np.uint8) + .reshape((1, 1, 3, 3)) + ) + x_zero_point = np.uint8(1) + w = np.array([1, 1, 1, 1]).astype(np.uint8).reshape((1, 1, 2, 2)) + + y = np.array([12, 16, 24, 28]).astype(np.int32).reshape(1, 1, 2, 2) + + # ConvInteger without padding + convinteger_node = onnx.helper.make_node( + "ConvInteger", inputs=["x", "w", "x_zero_point"], outputs=["y"] + ) + + expect( + convinteger_node, + inputs=[x, w, x_zero_point], + outputs=[y], + name="test_convinteger_without_padding", + ) + + @staticmethod + def export_with_padding() -> None: + x = ( + np.array([2, 3, 4, 5, 6, 7, 8, 9, 10]) + .astype(np.uint8) + .reshape((1, 1, 3, 3)) + ) + x_zero_point = np.uint8(1) + w = np.array([1, 1, 1, 1]).astype(np.uint8).reshape((1, 1, 2, 2)) + + y = ( + np.array([1, 3, 5, 3, 5, 12, 16, 9, 11, 24, 28, 15, 7, 15, 17, 9]) + .astype(np.int32) + .reshape((1, 1, 4, 4)) + ) + + # ConvInteger with padding + convinteger_node_with_padding = onnx.helper.make_node( + "ConvInteger", + inputs=["x", "w", "x_zero_point"], + outputs=["y"], + pads=[1, 1, 1, 1], + ) + + expect( + convinteger_node_with_padding, + inputs=[x, w, x_zero_point], + outputs=[y], + name="test_convinteger_with_padding", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/convtranspose.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/convtranspose.py new file mode 100644 index 0000000000000000000000000000000000000000..33f9bba2fa6f46608195979b49ab43221f8c42ce --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/convtranspose.py @@ -0,0 +1,397 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class ConvTranspose(Base): + @staticmethod + def export() -> None: + x = np.array( + [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + + W = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + + node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"]) + + y = np.array( + [ + [ + [ + [0.0, 1.0, 3.0, 3.0, 2.0], # (1, 2, 5, 5) + [3.0, 8.0, 15.0, 12.0, 7.0], + [9.0, 21.0, 36.0, 27.0, 15.0], + [9.0, 20.0, 33.0, 24.0, 13.0], + [6.0, 13.0, 21.0, 15.0, 8.0], + ], + [ + [0.0, 1.0, 3.0, 3.0, 2.0], + [3.0, 8.0, 15.0, 12.0, 7.0], + [9.0, 21.0, 36.0, 27.0, 15.0], + [9.0, 20.0, 33.0, 24.0, 13.0], + [6.0, 13.0, 21.0, 15.0, 8.0], + ], + ] + ] + ).astype(np.float32) + + expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose") + + @staticmethod + def export_convtranspose_1d() -> None: + x = np.array([[[0.0, 1.0, 2.0]]]).astype(np.float32) # (1, 1, 3) + + W = np.array([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]).astype( # (1, 2, 3) + np.float32 + ) + + node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"]) + + y = np.array( + [[[0.0, 1.0, 3.0, 3.0, 2.0], [0.0, 1.0, 3.0, 3.0, 2.0]]] # (1, 2, 5) + ).astype(np.float32) + + expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose_1d") + + @staticmethod + def export_convtranspose_3d() -> None: + x = np.array( + [ + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 3, 4, 5) + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + ], + [ + [20.0, 21.0, 22.0, 23.0, 24.0], + [25.0, 26.0, 27.0, 28.0, 29.0], + [30.0, 31.0, 32.0, 33.0, 34.0], + [35.0, 36.0, 37.0, 38.0, 39.0], + ], + [ + [40.0, 41.0, 42.0, 43.0, 44.0], + [45.0, 46.0, 47.0, 48.0, 49.0], + [50.0, 51.0, 52.0, 53.0, 54.0], + [55.0, 56.0, 57.0, 58.0, 59.0], + ], + ] + ] + ] + ).astype(np.float32) + + W = np.array( + [ + [ + [ + [ + [1.0, 1.0, 1.0], # (1, 2, 3, 3, 3) + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ], + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ], + ] + ] + ).astype(np.float32) + + node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"]) + + y = np.array( + [ + [ + [ + [ + [0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0], # (1, 2, 5, 6, 7) + [5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0], + [15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0], + [30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0], + [25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0], + [15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0], + ], + [ + [20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0], + [50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0], + [90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0], + [120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0], + [90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0], + [50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0], + ], + [ + [60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0], + [135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0], + [225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0], + [270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0], + [195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0], + [105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0], + ], + [ + [60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0], + [130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0], + [210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0], + [240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0], + [170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0], + [90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0], + ], + [ + [40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0], + [85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0], + [135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0], + [150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0], + [105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0], + [55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0], + ], + ], + [ + [ + [0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0], + [5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0], + [15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0], + [30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0], + [25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0], + [15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0], + ], + [ + [20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0], + [50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0], + [90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0], + [120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0], + [90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0], + [50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0], + ], + [ + [60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0], + [135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0], + [225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0], + [270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0], + [195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0], + [105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0], + ], + [ + [60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0], + [130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0], + [210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0], + [240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0], + [170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0], + [90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0], + ], + [ + [40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0], + [85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0], + [135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0], + [150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0], + [105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0], + [55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0], + ], + ], + ] + ] + ).astype(np.float32) + + expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose_3d") + + @staticmethod + def export_convtranspose_attributes() -> None: + x = np.array( + [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + + W = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + + y = np.array( + [ + [ + [ + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0], # (1, 2, 10, 8) + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0], + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0], + [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0], + [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0], + [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0], + [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0], + [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0], + [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0], + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0], + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0, 2.0, 0.0], + [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0], + [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0], + [3.0, 3.0, 7.0, 4.0, 9.0, 5.0, 5.0, 0.0], + [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0], + [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0], + [6.0, 6.0, 13.0, 7.0, 15.0, 8.0, 8.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + ] + ] + ).astype(np.float32) + + node = onnx.helper.make_node( + "ConvTranspose", ["X", "W"], ["Y"], strides=[3, 2], output_shape=[10, 8] + ) + expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose_output_shape") + + node = onnx.helper.make_node( + "ConvTranspose", ["X", "W"], ["Y"], strides=[3, 2], output_padding=[1, 1] + ) + expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose_pad") + + node = onnx.helper.make_node( + "ConvTranspose", + ["X", "W"], + ["Y"], + name="test", + strides=[3, 2], + output_shape=[10, 8], + kernel_shape=[3, 3], + output_padding=[1, 1], + ) + expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose_kernel_shape") + + @staticmethod + def export_convtranspose_pads() -> None: + x = np.array( + [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + + W = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + + node = onnx.helper.make_node( + "ConvTranspose", ["X", "W"], ["Y"], strides=[3, 2], pads=[1, 2, 1, 2] + ) + + y = np.array( + [ + [ + [ + [1.0, 1.0, 3.0], # (1, 2, 7, 3) + [1.0, 1.0, 3.0], + [7.0, 4.0, 9.0], + [7.0, 4.0, 9.0], + [7.0, 4.0, 9.0], + [13.0, 7.0, 15.0], + [13.0, 7.0, 15.0], + ], + [ + [1.0, 1.0, 3.0], + [1.0, 1.0, 3.0], + [7.0, 4.0, 9.0], + [7.0, 4.0, 9.0], + [7.0, 4.0, 9.0], + [13.0, 7.0, 15.0], + [13.0, 7.0, 15.0], + ], + ] + ] + ).astype(np.float32) + + expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose_pads") + + @staticmethod + def export_convtranspose_dilations() -> None: + x = np.array( + [[[[3.0, 8.0, 1.0], [9.0, 5.0, 7.0], [3.0, 2.0, 6.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + W = np.array([[[[7.0, 2.0], [1.0, 9.0]]]]).astype(np.float32) # (1, 1, 2, 2) + + node = onnx.helper.make_node( + "ConvTranspose", ["X", "W"], ["Y"], dilations=[2, 2] + ) + + y = np.array( + [ + [ + [ + [21.0, 56.0, 13.0, 16.0, 2.0], # [1, 1, 5, 5] + [63.0, 35.0, 67.0, 10.0, 14.0], + [24.0, 22.0, 76.0, 76.0, 21.0], + [9.0, 5.0, 88.0, 45.0, 63.0], + [3.0, 2.0, 33.0, 18.0, 54.0], + ] + ] + ] + ).astype(np.float32) + + expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose_dilations") + + @staticmethod + def export_convtranspose_autopad_same() -> None: + x = np.array( + [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + + W = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + + node = onnx.helper.make_node( + "ConvTranspose", ["X", "W"], ["Y"], auto_pad="SAME_UPPER", strides=[2, 2] + ) + + y = np.array( + [ + [ + [ + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0], + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0], + [3.0, 3.0, 8.0, 5.0, 12.0, 7.0], + [3.0, 3.0, 7.0, 4.0, 9.0, 5.0], + [9.0, 9.0, 20.0, 11.0, 24.0, 13.0], + [6.0, 6.0, 13.0, 7.0, 15.0, 8.0], + ], + [ + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0], + [0.0, 0.0, 1.0, 1.0, 3.0, 2.0], + [3.0, 3.0, 8.0, 5.0, 12.0, 7.0], + [3.0, 3.0, 7.0, 4.0, 9.0, 5.0], + [9.0, 9.0, 20.0, 11.0, 24.0, 13.0], + [6.0, 6.0, 13.0, 7.0, 15.0, 8.0], + ], + ] + ] + ).astype(np.float32) + + expect(node, inputs=[x, W], outputs=[y], name="test_convtranspose_autopad_same") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/cos.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/cos.py new file mode 100644 index 0000000000000000000000000000000000000000..6c8fa3faaac62a6123440656fe76b86b2884f907 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/cos.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Cos(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Cos", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + y = np.cos(x) + expect(node, inputs=[x], outputs=[y], name="test_cos_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.cos(x) + expect(node, inputs=[x], outputs=[y], name="test_cos") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/cosh.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/cosh.py new file mode 100644 index 0000000000000000000000000000000000000000..d6832a89afc36c79695545d3b24c74c8ced7bd6e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/cosh.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Cosh(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Cosh", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + y = np.cosh(x) # expected output [1.54308069, 1., 1.54308069] + expect(node, inputs=[x], outputs=[y], name="test_cosh_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.cosh(x) + expect(node, inputs=[x], outputs=[y], name="test_cosh") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/cumsum.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/cumsum.py new file mode 100644 index 0000000000000000000000000000000000000000..febbb8034eb44fa7f237348a509ea5c6ed4d1113 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/cumsum.py @@ -0,0 +1,87 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class CumSum(Base): + @staticmethod + def export_cumsum_1d() -> None: + node = onnx.helper.make_node("CumSum", inputs=["x", "axis"], outputs=["y"]) + x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64) + axis = np.int32(0) + y = np.array([1.0, 3.0, 6.0, 10.0, 15.0]).astype(np.float64) + expect(node, inputs=[x, axis], outputs=[y], name="test_cumsum_1d") + + @staticmethod + def export_cumsum_1d_exclusive() -> None: + node = onnx.helper.make_node( + "CumSum", inputs=["x", "axis"], outputs=["y"], exclusive=1 + ) + x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64) + axis = np.int32(0) + y = np.array([0.0, 1.0, 3.0, 6.0, 10.0]).astype(np.float64) + expect(node, inputs=[x, axis], outputs=[y], name="test_cumsum_1d_exclusive") + + @staticmethod + def export_cumsum_1d_reverse() -> None: + node = onnx.helper.make_node( + "CumSum", inputs=["x", "axis"], outputs=["y"], reverse=1 + ) + x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64) + axis = np.int32(0) + y = np.array([15.0, 14.0, 12.0, 9.0, 5.0]).astype(np.float64) + expect(node, inputs=[x, axis], outputs=[y], name="test_cumsum_1d_reverse") + + @staticmethod + def export_cumsum_1d_reverse_exclusive() -> None: + node = onnx.helper.make_node( + "CumSum", inputs=["x", "axis"], outputs=["y"], reverse=1, exclusive=1 + ) + x = np.array([1.0, 2.0, 3.0, 4.0, 5.0]).astype(np.float64) + axis = np.int32(0) + y = np.array([14.0, 12.0, 9.0, 5.0, 0.0]).astype(np.float64) + expect( + node, inputs=[x, axis], outputs=[y], name="test_cumsum_1d_reverse_exclusive" + ) + + @staticmethod + def export_cumsum_2d_axis_0() -> None: + node = onnx.helper.make_node( + "CumSum", + inputs=["x", "axis"], + outputs=["y"], + ) + x = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3)) + axis = np.int32(0) + y = np.array([1.0, 2.0, 3.0, 5.0, 7.0, 9.0]).astype(np.float64).reshape((2, 3)) + expect(node, inputs=[x, axis], outputs=[y], name="test_cumsum_2d_axis_0") + + @staticmethod + def export_cumsum_2d_axis_1() -> None: + node = onnx.helper.make_node( + "CumSum", + inputs=["x", "axis"], + outputs=["y"], + ) + x = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3)) + axis = np.int32(1) + y = np.array([1.0, 3.0, 6.0, 4.0, 9.0, 15.0]).astype(np.float64).reshape((2, 3)) + expect(node, inputs=[x, axis], outputs=[y], name="test_cumsum_2d_axis_1") + + @staticmethod + def export_cumsum_2d_negative_axis() -> None: + node = onnx.helper.make_node( + "CumSum", + inputs=["x", "axis"], + outputs=["y"], + ) + x = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3)) + axis = np.int32(-1) + y = np.array([1.0, 3.0, 6.0, 4.0, 9.0, 15.0]).astype(np.float64).reshape((2, 3)) + expect(node, inputs=[x, axis], outputs=[y], name="test_cumsum_2d_negative_axis") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/deformconv.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/deformconv.py new file mode 100644 index 0000000000000000000000000000000000000000..cd40174291a5fd0c1d690de9e3e7a99886906424 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/deformconv.py @@ -0,0 +1,170 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class DeformConv(Base): + @staticmethod + def export() -> None: + X = np.arange(9).astype(np.float32) + X.shape = (1, 1, 3, 3) + W = np.ones((1, 1, 2, 2), dtype=np.float32) + + # Convolution with padding + offset_with_padding = np.zeros((1, 8, 4, 4), dtype=np.float32) + offset_with_padding[ + 0, 0, 0, 0 + ] = 0.5 # h-coord of [0, 0] element of kernel, at output position [0, 0] + offset_with_padding[ + 0, 5, 1, 2 + ] = -0.1 # w-coord of [1, 0] element of kernel, at output position [1, 2] + + node_with_padding = onnx.helper.make_node( + "DeformConv", + inputs=["X", "W", "offset_with_padding"], + outputs=["Y_with_padding"], + kernel_shape=[2, 2], + pads=[1, 1, 1, 1], + ) + Y_with_padding = np.array( + [ + [ + [ + [0.0, 1.0, 3.0, 2.0], # (1, 1, 4, 4) output tensor + [3.0, 8.0, 11.9, 7.0], + [9.0, 20.0, 24.0, 13.0], + [6.0, 13.0, 15.0, 8.0], + ] + ] + ] + ).astype(np.float32) + expect( + node_with_padding, + inputs=[X, W, offset_with_padding], + outputs=[Y_with_padding], + name="test_basic_deform_conv_with_padding", + ) + + # Convolution without padding + offset_without_padding = np.zeros((1, 8, 2, 2), dtype=np.float32) + offset_without_padding[ + 0, 0, 0, 0 + ] = 0.5 # h-coord of [0, 0] element of kernel, at output position [0, 0] + offset_without_padding[ + 0, 5, 0, 1 + ] = -0.1 # w-coord of [1, 0] element of kernel, at output position [0, 1] + + node_without_padding = onnx.helper.make_node( + "DeformConv", + inputs=["X", "W", "offset_without_padding"], + outputs=["Y_without_padding"], + kernel_shape=[2, 2], + pads=[0, 0, 0, 0], + ) + Y_without_padding = np.array( + [ + [ + [ + [9.5, 11.9], # (1, 1, 2, 2) output tensor + [20.0, 24.0], + ] + ] + ] + ).astype(np.float32) + expect( + node_without_padding, + inputs=[X, W, offset_without_padding], + outputs=[Y_without_padding], + name="test_basic_deform_conv_without_padding", + ) + + @staticmethod + def export_deformconv_with_mask_bias() -> None: + X = np.arange(9).astype(np.float32) + X.shape = (1, 1, 3, 3) + W = np.ones((1, 1, 2, 2), dtype=np.float32) + B = np.ones((1,), dtype=np.float32) + + offset = np.zeros((1, 8, 2, 2), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 # h-coord of [0, 0] element of kernel, at output position [0, 0] + offset[ + 0, 5, 0, 1 + ] = -0.1 # w-coord of [1, 0] element of kernel, at output position [0, 1] + + mask = np.ones((1, 4, 2, 2), dtype=np.float32) + mask[0, 2, 1, 1] = 0.2 # [1, 0] element of kernel at output position [1, 1] + + node = onnx.helper.make_node( + "DeformConv", + inputs=["X", "W", "offset", "B", "mask"], + outputs=["Y"], + kernel_shape=[2, 2], + pads=[0, 0, 0, 0], + ) + Y = np.array( + [ + [ + [ + [10.5, 12.9], # (1, 1, 2, 2) output tensor + [21.0, 19.4], + ] + ] + ] + ).astype(np.float32) + expect( + node, + inputs=[X, W, offset, B, mask], + outputs=[Y], + name="test_deform_conv_with_mask_bias", + ) + + @staticmethod + def export_deformconv_with_multiple_offset_groups() -> None: + X = np.zeros((1, 2, 3, 3), dtype=np.float32) + X[0, 0] = np.reshape(np.arange(9).astype(np.float32), (3, 3)) + X[0, 1] = np.reshape(np.arange(8, -1, -1).astype(np.float32), (3, 3)) + X.shape = (1, 2, 3, 3) + W = np.ones((1, 2, 2, 2), dtype=np.float32) + + offset = np.zeros((1, 16, 2, 2), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 # h-coord of [0, 0] element of kernel in channel 0, at output position [0, 0] + offset[ + 0, 13, 0, 1 + ] = ( + -0.1 + ) # w-coord of [1, 0] element of kernel in channel 1, at output position [0, 1] + + node = onnx.helper.make_node( + "DeformConv", + inputs=["X", "W", "offset"], + outputs=["Y"], + kernel_shape=[2, 2], + pads=[0, 0, 0, 0], + offset_group=2, + ) + Y = np.array( + [ + [ + [ + [33.5, 32.1], # (1, 1, 2, 2) output tensor + [32.0, 32.0], + ] + ] + ] + ).astype(np.float32) + expect( + node, + inputs=[X, W, offset], + outputs=[Y], + name="test_deform_conv_with_multiple_offset_groups", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/depthtospace.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/depthtospace.py new file mode 100644 index 0000000000000000000000000000000000000000..47aa65d48bffd55319bd8334fc94f646d899b406 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/depthtospace.py @@ -0,0 +1,97 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class DepthToSpace(Base): + @staticmethod + def export_default_mode_example() -> None: + node = onnx.helper.make_node( + "DepthToSpace", inputs=["x"], outputs=["y"], blocksize=2, mode="DCR" + ) + + # (1, 8, 2, 3) input tensor + x = np.array( + [ + [ + [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]], + [[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]], + [[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]], + [[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]], + [[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]], + [[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]], + [[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]], + [[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]], + ] + ] + ).astype(np.float32) + + # (1, 2, 4, 6) output tensor + y = np.array( + [ + [ + [ + [0.0, 18.0, 1.0, 19.0, 2.0, 20.0], + [36.0, 54.0, 37.0, 55.0, 38.0, 56.0], + [3.0, 21.0, 4.0, 22.0, 5.0, 23.0], + [39.0, 57.0, 40.0, 58.0, 41.0, 59.0], + ], + [ + [9.0, 27.0, 10.0, 28.0, 11.0, 29.0], + [45.0, 63.0, 46.0, 64.0, 47.0, 65.0], + [12.0, 30.0, 13.0, 31.0, 14.0, 32.0], + [48.0, 66.0, 49.0, 67.0, 50.0, 68.0], + ], + ] + ] + ).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_depthtospace_example") + + @staticmethod + def export_crd_mode_example() -> None: + node = onnx.helper.make_node( + "DepthToSpace", inputs=["x"], outputs=["y"], blocksize=2, mode="CRD" + ) + + # (1, 8, 2, 3) input tensor + x = np.array( + [ + [ + [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]], + [[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]], + [[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]], + [[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]], + [[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]], + [[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]], + [[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]], + [[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]], + ] + ] + ).astype(np.float32) + + # (1, 2, 4, 6) output tensor + y = np.array( + [ + [ + [ + [0.0, 9.0, 1.0, 10.0, 2.0, 11.0], + [18.0, 27.0, 19.0, 28.0, 20.0, 29.0], + [3.0, 12.0, 4.0, 13.0, 5.0, 14.0], + [21.0, 30.0, 22.0, 31.0, 23.0, 32.0], + ], + [ + [36.0, 45.0, 37.0, 46.0, 38.0, 47.0], + [54.0, 63.0, 55.0, 64.0, 56.0, 65.0], + [39.0, 48.0, 40.0, 49.0, 41.0, 50.0], + [57.0, 66.0, 58.0, 67.0, 59.0, 68.0], + ], + ] + ] + ).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_depthtospace_crd_mode_example") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/dequantizelinear.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/dequantizelinear.py new file mode 100644 index 0000000000000000000000000000000000000000..ec51a959ca3d94869bf697b583979b0881375944 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/dequantizelinear.py @@ -0,0 +1,306 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx import TensorProto +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.helper import make_tensor + + +class DequantizeLinear(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale", "x_zero_point"], + outputs=["y"], + ) + + # scalar zero point and scale + x = np.array([0, 3, 128, 255]).astype(np.uint8) + x_scale = np.float32(2) + x_zero_point = np.uint8(128) + y = np.array([-256, -250, 0, 254], dtype=np.float32) + + expect( + node, + inputs=[x, x_scale, x_zero_point], + outputs=[y], + name="test_dequantizelinear", + ) + + @staticmethod + def export_axis() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale", "x_zero_point"], + outputs=["y"], + ) + + # 1-D tensor zero point and scale of size equal to axis 1 of the input tensor + x = np.array( + [ + [ + [[3, 89], [34, 200], [74, 59]], + [[5, 24], [24, 87], [32, 13]], + [[245, 99], [4, 142], [121, 102]], + ], + ], + dtype=np.uint8, + ) + x_scale = np.array([2, 4, 5], dtype=np.float32) + x_zero_point = np.array([84, 24, 196], dtype=np.uint8) + y = ( + x.astype(np.float32) - x_zero_point.reshape(1, 3, 1, 1).astype(np.float32) + ) * x_scale.reshape(1, 3, 1, 1) + + expect( + node, + inputs=[x, x_scale, x_zero_point], + outputs=[y], + name="test_dequantizelinear_axis", + ) + + @staticmethod + def export_e4m3fn() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale"], + outputs=["y"], + axis=0, + ) + + # scalar zero point and scale + x = make_tensor("x", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104]) + x_scale = np.float32(2) + y = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32) + + expect( + node, + inputs=[x, x_scale], + outputs=[y], + name="test_dequantizelinear_e4m3fn", + ) + + @staticmethod + def export_e4m3fn_float16() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale"], + outputs=["y"], + axis=0, + ) + + # scalar zero point and scale + x = make_tensor("x", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104]) + x_scale = np.float16(2) + y = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float16) + + expect( + node, + inputs=[x, x_scale], + outputs=[y], + name="test_dequantizelinear_e4m3fn_float16", + ) + + @staticmethod + def export_e4m3fn_zero_point() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale", "zero_point"], + outputs=["y"], + axis=0, + ) + + # scalar zero point and scale + x = make_tensor("x", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, -104]) + zero_point = make_tensor("zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) + x_scale = np.float32(2) + y = np.array([0.0, 1.0, 2.0, 896.0, -208.0], dtype=np.float32) + + expect( + node, + inputs=[x, x_scale, zero_point], + outputs=[y], + name="test_dequantizelinear_e4m3fn_zero_point", + ) + + @staticmethod + def export_e5m2() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale"], + outputs=["y"], + axis=0, + ) + + # scalar zero point and scale + x = make_tensor("x", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, -96]) + x_scale = np.float32(2) + y = np.array([0.0, 1.0, 2.0, 98304.0, -192.0], dtype=np.float32) + + expect( + node, + inputs=[x, x_scale], + outputs=[y], + name="test_dequantizelinear_e5m2", + ) + + @staticmethod + def export_uint16() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale", "x_zero_point"], + outputs=["y"], + ) + + x = np.array([30000, 31000, 32768, 33000]).astype(np.uint16) + x_scale = np.float32(2) + x_zero_point = np.uint16(32767) + y = np.array([-5534.0, -3534.0, 2.0, 466.0], dtype=np.float32) + + expect( + node, + inputs=[x, x_scale, x_zero_point], + outputs=[y], + name="test_dequantizelinear_uint16", + ) + + @staticmethod + def export_int16() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale", "x_zero_point"], + outputs=["y"], + ) + + x = np.array([-300, -30, -1025, 1270]).astype(np.int16) + x_scale = np.float32(2) + x_zero_point = np.int16(-1024) + y = np.array([1448.0, 1988.0, -2.0, 4588.0], dtype=np.float32) + + expect( + node, + inputs=[x, x_scale, x_zero_point], + outputs=[y], + name="test_dequantizelinear_int16", + ) + + @staticmethod + def export_uint4() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale", "x_zero_point"], + outputs=["y"], + axis=0, + ) + + # scalar zero point and scale + x = make_tensor("x", TensorProto.UINT4, [5], [0, 1, 7, 10, 15]) + x_scale = np.float32(2) + x_zero_point = make_tensor("x_zero_point", TensorProto.UINT4, (1,), [1]) + y = np.array([-2, 0, 12, 18, 28], dtype=np.float32) + + expect( + node, + inputs=[x, x_scale, x_zero_point], + outputs=[y], + name="test_dequantizelinear_uint4", + ) + + @staticmethod + def export_int4() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale", "x_zero_point"], + outputs=["y"], + axis=0, + ) + + # scalar zero point and scale + x = make_tensor("x", TensorProto.INT4, [5], [0, 1, 7, -4, -8]) + x_scale = np.float32(2) + x_zero_point = make_tensor("x_zero_point", TensorProto.INT4, (1,), [1]) + y = np.array([-2, 0, 12, -10, -18], dtype=np.float32) + + expect( + node, + inputs=[x, x_scale, x_zero_point], + outputs=[y], + name="test_dequantizelinear_int4", + ) + + @staticmethod + def export_blocked() -> None: + node = onnx.helper.make_node( + "DequantizeLinear", + inputs=["x", "x_scale", "x_zero_point"], + outputs=["y"], + axis=1, + block_size=2, + ) + + x = np.array( + [ + [ + [[3, 89], [34, 200], [74, 59]], + [[5, 24], [24, 87], [32, 13]], + [[5, 12], [12, 33], [65, 42]], + [[245, 99], [4, 142], [121, 102]], + ], + ], + dtype=np.uint8, + ) + + x_scale = np.array( + [ + [ + [[3.0, 2.0], [4.0, 1.0], [2.0, 2.0]], + [[5.0, 2.0], [4.0, 3.0], [5.0, 2.0]], + ], + ], + dtype=np.float32, + ) + x_zero_point = np.array( + [ + [ + [[1, 0], [0, 1], [2, 20]], + [[3, 2], [4, 3], [15, 2]], + ], + ], + dtype=np.uint8, + ) + + # x.shape = (1, 4, 3, 2) + # x_scale.shape = (1, 2, 3, 2) + assert x_scale.shape == x_zero_point.shape + block_axis = 1 + # The block shape is [x.shape[i] // x_scale.shape[i] for i in range(len(x.shape))] = (1, 2, 1, 1) + assert all( + x.shape[i] == x_scale.shape[i] + for i in range(len(x.shape)) + if i != block_axis + ) + assert x.shape[block_axis] % x_scale.shape[block_axis] == 0 + repeats = x.shape[block_axis] // x_scale.shape[block_axis] + + # Create element-wise scale and zero point + x_scale_elementwise = np.repeat(x_scale, repeats=repeats, axis=block_axis) + x_zero_point_elementwise = np.repeat( + x_zero_point, repeats=repeats, axis=block_axis + ) + + y = ( + x.astype(np.float32) - x_zero_point_elementwise.astype(np.float32) + ) * x_scale_elementwise + + expect( + node, + inputs=[x, x_scale, x_zero_point], + outputs=[y], + name="test_dequantizelinear_blocked", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/det.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/det.py new file mode 100644 index 0000000000000000000000000000000000000000..5ce46ce4bf4e71e4d0fc6500d28a1d34643fadac --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/det.py @@ -0,0 +1,37 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Det(Base): + @staticmethod + def export_2d() -> None: + node = onnx.helper.make_node( + "Det", + inputs=["x"], + outputs=["y"], + ) + + x = np.arange(4).reshape(2, 2).astype(np.float32) + y = np.linalg.det(x) # expect -2 + expect(node, inputs=[x], outputs=[y], name="test_det_2d") + + @staticmethod + def export_nd() -> None: + node = onnx.helper.make_node( + "Det", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]).astype( + np.float32 + ) + y = np.linalg.det(x) # expect array([-2., -3., -8.]) + expect(node, inputs=[x], outputs=[y], name="test_det_nd") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/dft.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/dft.py new file mode 100644 index 0000000000000000000000000000000000000000..f5a1e8323b382b4694d299462fea6fc2810a685b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/dft.py @@ -0,0 +1,91 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class DFT(Base): + @staticmethod + def export_opset19() -> None: + node = onnx.helper.make_node("DFT", inputs=["x"], outputs=["y"], axis=1) + x = np.arange(0, 100).reshape(10, 10).astype(np.float32) + y = np.fft.fft(x, axis=0) + + x = x.reshape(1, 10, 10, 1) + y = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2) + expect( + node, + inputs=[x], + outputs=[y], + name="test_dft_opset19", + opset_imports=[onnx.helper.make_opsetid("", 19)], + ) + + node = onnx.helper.make_node("DFT", inputs=["x"], outputs=["y"], axis=2) + x = np.arange(0, 100).reshape(10, 10).astype(np.float32) + y = np.fft.fft(x, axis=1) + + x = x.reshape(1, 10, 10, 1) + y = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2) + expect( + node, + inputs=[x], + outputs=[y], + name="test_dft_axis_opset19", + opset_imports=[onnx.helper.make_opsetid("", 19)], + ) + + node = onnx.helper.make_node( + "DFT", inputs=["x"], outputs=["y"], inverse=1, axis=1 + ) + x = np.arange(0, 100, dtype=np.complex64).reshape( + 10, + 10, + ) + y = np.fft.ifft(x, axis=0) + + x = np.stack((x.real, x.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2) + y = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2) + expect( + node, + inputs=[x], + outputs=[y], + name="test_dft_inverse_opset19", + opset_imports=[onnx.helper.make_opsetid("", 19)], + ) + + @staticmethod + def export() -> None: + node = onnx.helper.make_node("DFT", inputs=["x", "", "axis"], outputs=["y"]) + x = np.arange(0, 100).reshape(10, 10).astype(np.float32) + axis = np.array(1, dtype=np.int64) + y = np.fft.fft(x, axis=0) + + x = x.reshape(1, 10, 10, 1) + y = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2) + expect(node, inputs=[x, axis], outputs=[y], name="test_dft") + + node = onnx.helper.make_node("DFT", inputs=["x", "", "axis"], outputs=["y"]) + x = np.arange(0, 100).reshape(10, 10).astype(np.float32) + axis = np.array(2, dtype=np.int64) + y = np.fft.fft(x, axis=1) + + x = x.reshape(1, 10, 10, 1) + y = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2) + expect(node, inputs=[x, axis], outputs=[y], name="test_dft_axis") + + node = onnx.helper.make_node( + "DFT", inputs=["x", "", "axis"], outputs=["y"], inverse=1 + ) + x = np.arange(0, 100, dtype=np.complex64).reshape(10, 10) + axis = np.array(1, dtype=np.int64) + y = np.fft.ifft(x, axis=0) + + x = np.stack((x.real, x.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2) + y = np.stack((y.real, y.imag), axis=2).astype(np.float32).reshape(1, 10, 10, 2) + expect(node, inputs=[x, axis], outputs=[y], name="test_dft_inverse") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/div.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/div.py new file mode 100644 index 0000000000000000000000000000000000000000..7ca86c30ee1343816d9b410346034512fa237ccf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/div.py @@ -0,0 +1,47 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Div(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Div", + inputs=["x", "y"], + outputs=["z"], + ) + + x = np.array([3, 4]).astype(np.float32) + y = np.array([1, 2]).astype(np.float32) + z = x / y # expected output [3., 2.] + expect(node, inputs=[x, y], outputs=[z], name="test_div_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.rand(3, 4, 5).astype(np.float32) + 1.0 + z = x / y + expect(node, inputs=[x, y], outputs=[z], name="test_div") + + x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8) + y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8) + 1 + z = x // y + expect(node, inputs=[x, y], outputs=[z], name="test_div_uint8") + + @staticmethod + def export_div_broadcast() -> None: + node = onnx.helper.make_node( + "Div", + inputs=["x", "y"], + outputs=["z"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.rand(5).astype(np.float32) + 1.0 + z = x / y + expect(node, inputs=[x, y], outputs=[z], name="test_div_bcast") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/dropout.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..0a543eee1fc861c582709e642433ef2fb3598617 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/dropout.py @@ -0,0 +1,209 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx import helper +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def dropout(X, drop_probability=0.5, seed=0, training_mode=False, return_mask=False): # type: ignore + if drop_probability == 0 or training_mode is False: + if return_mask is True: + return X, np.ones(X.shape, dtype=bool) + else: + return X + + np.random.seed(seed) + mask = np.random.uniform(0, 1.0, X.shape) >= drop_probability + scale = 1 / (1 - drop_probability) + if return_mask: + return mask * X * scale, mask.astype(bool) + return mask * X * scale + + +class Dropout(Base): + # Inferencing tests. + @staticmethod + def export_default() -> None: + seed = np.int64(0) + node = onnx.helper.make_node("Dropout", inputs=["x"], outputs=["y"], seed=seed) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = dropout(x) + expect(node, inputs=[x], outputs=[y], name="test_dropout_default") + + @staticmethod + def export_default_ratio() -> None: + seed = np.int64(0) + node = onnx.helper.make_node( + "Dropout", inputs=["x", "r"], outputs=["y"], seed=seed + ) + + r = np.float32(0.1) + x = np.random.randn(3, 4, 5).astype(np.float32) + y = dropout(x, r) + expect(node, inputs=[x, r], outputs=[y], name="test_dropout_default_ratio") + + @staticmethod + def export_default_mask() -> None: + seed = np.int64(0) + node = onnx.helper.make_node( + "Dropout", inputs=["x"], outputs=["y", "z"], seed=seed + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y, z = dropout(x, return_mask=True) + expect(node, inputs=[x], outputs=[y, z], name="test_dropout_default_mask") + + @staticmethod + def export_default_mask_ratio() -> None: + seed = np.int64(0) + node = onnx.helper.make_node( + "Dropout", inputs=["x", "r"], outputs=["y", "z"], seed=seed + ) + + r = np.float32(0.1) + x = np.random.randn(3, 4, 5).astype(np.float32) + y, z = dropout(x, r, return_mask=True) + expect( + node, inputs=[x, r], outputs=[y, z], name="test_dropout_default_mask_ratio" + ) + + # Training tests. + + @staticmethod + def export_training_default() -> None: + seed = np.int64(0) + node = onnx.helper.make_node( + "Dropout", inputs=["x", "r", "t"], outputs=["y"], seed=seed + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + r = np.float32(0.5) + t = np.bool_(True) + y = dropout(x, r, training_mode=t) + expect( + node, inputs=[x, r, t], outputs=[y], name="test_training_dropout_default" + ) + + @staticmethod + def export_training_default_ratio_mask() -> None: + seed = np.int64(0) + node = onnx.helper.make_node( + "Dropout", inputs=["x", "r", "t"], outputs=["y", "z"], seed=seed + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + r = np.float32(0.5) + t = np.bool_(True) + y, z = dropout(x, r, training_mode=t, return_mask=True) + expect( + node, + inputs=[x, r, t], + outputs=[y, z], + name="test_training_dropout_default_mask", + ) + + @staticmethod + def export_training() -> None: + seed = np.int64(0) + node = onnx.helper.make_node( + "Dropout", inputs=["x", "r", "t"], outputs=["y"], seed=seed + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + r = np.float32(0.75) + t = np.bool_(True) + y = dropout(x, r, training_mode=t) + expect(node, inputs=[x, r, t], outputs=[y], name="test_training_dropout") + + @staticmethod + def export_training_ratio_mask() -> None: + seed = np.int64(0) + node = onnx.helper.make_node( + "Dropout", inputs=["x", "r", "t"], outputs=["y", "z"], seed=seed + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + r = np.float32(0.75) + t = np.bool_(True) + y, z = dropout(x, r, training_mode=t, return_mask=True) + expect( + node, inputs=[x, r, t], outputs=[y, z], name="test_training_dropout_mask" + ) + + @staticmethod + def export_training_default_zero_ratio() -> None: + seed = np.int64(0) + node = onnx.helper.make_node( + "Dropout", inputs=["x", "r", "t"], outputs=["y"], seed=seed + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + r = np.float32(0.0) + t = np.bool_(True) + y = dropout(x, r, training_mode=t) + expect( + node, inputs=[x, r, t], outputs=[y], name="test_training_dropout_zero_ratio" + ) + + @staticmethod + def export_training_default_zero_ratio_mask() -> None: + seed = np.int64(0) + node = onnx.helper.make_node( + "Dropout", inputs=["x", "r", "t"], outputs=["y", "z"], seed=seed + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + r = np.float32(0.0) + t = np.bool_(True) + y, z = dropout(x, r, training_mode=t, return_mask=True) + expect( + node, + inputs=[x, r, t], + outputs=[y, z], + name="test_training_dropout_zero_ratio_mask", + ) + + # Old dropout tests + + @staticmethod + def export_default_old() -> None: + node = onnx.helper.make_node( + "Dropout", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + y = x + expect( + node, + inputs=[x], + outputs=[y], + name="test_dropout_default_old", + opset_imports=[helper.make_opsetid("", 11)], + ) + + @staticmethod + def export_random_old() -> None: + node = onnx.helper.make_node( + "Dropout", + inputs=["x"], + outputs=["y"], + ratio=0.2, + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = x + expect( + node, + inputs=[x], + outputs=[y], + name="test_dropout_random_old", + opset_imports=[helper.make_opsetid("", 11)], + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/dynamicquantizelinear.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/dynamicquantizelinear.py new file mode 100644 index 0000000000000000000000000000000000000000..e9992c950fdb94ba3ee19d9b99a11e67c28841f2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/dynamicquantizelinear.py @@ -0,0 +1,69 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class DynamicQuantizeLinear(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "DynamicQuantizeLinear", + inputs=["x"], + outputs=["y", "y_scale", "y_zero_point"], + ) + + # expected scale 0.0196078438 and zero point 153 + X = np.array([0, 2, -3, -2.5, 1.34, 0.5]).astype(np.float32) + x_min = np.minimum(0, np.min(X)) + x_max = np.maximum(0, np.max(X)) + Y_Scale = np.float32((x_max - x_min) / (255 - 0)) # uint8 -> [0, 255] + Y_ZeroPoint = np.clip(round((0 - x_min) / Y_Scale), 0, 255).astype(np.uint8) + Y = np.clip(np.round(X / Y_Scale) + Y_ZeroPoint, 0, 255).astype(np.uint8) + + expect( + node, + inputs=[X], + outputs=[Y, Y_Scale, Y_ZeroPoint], + name="test_dynamicquantizelinear", + ) + + # expected scale 0.0156862754 and zero point 255 + X = np.array([-1.0, -2.1, -1.3, -2.5, -3.34, -4.0]).astype(np.float32) + x_min = np.minimum(0, np.min(X)) + x_max = np.maximum(0, np.max(X)) + Y_Scale = np.float32((x_max - x_min) / (255 - 0)) # uint8 -> [0, 255] + Y_ZeroPoint = np.clip(round((0 - x_min) / Y_Scale), 0, 255).astype(np.uint8) + Y = np.clip(np.round(X / Y_Scale) + Y_ZeroPoint, 0, 255).astype(np.uint8) + + expect( + node, + inputs=[X], + outputs=[Y, Y_Scale, Y_ZeroPoint], + name="test_dynamicquantizelinear_max_adjusted", + ) + + X = ( + np.array([1, 2.1, 1.3, 2.5, 3.34, 4.0, 1.5, 2.6, 3.9, 4.0, 3.0, 2.345]) + .astype(np.float32) + .reshape((3, 4)) + ) + + # expected scale 0.0156862754 and zero point 0 + x_min = np.minimum(0, np.min(X)) + x_max = np.maximum(0, np.max(X)) + Y_Scale = np.float32((x_max - x_min) / (255 - 0)) # uint8 -> [0, 255] + Y_ZeroPoint = np.clip(round((0 - x_min) / Y_Scale), 0, 255).astype(np.uint8) + Y = np.clip(np.round(X / Y_Scale) + Y_ZeroPoint, 0, 255).astype(np.uint8) + + expect( + node, + inputs=[X], + outputs=[Y, Y_Scale, Y_ZeroPoint], + name="test_dynamicquantizelinear_min_adjusted", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/einsum.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/einsum.py new file mode 100644 index 0000000000000000000000000000000000000000..88ebb5b53477fdc6b68ddee102ecf587fbe95b58 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/einsum.py @@ -0,0 +1,82 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import Tuple + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def einsum_reference_implementation( + Eqn: str, Operands: Tuple[np.ndarray, ...] +) -> np.ndarray: + Z = np.einsum(Eqn, *Operands) + return Z + + +class Einsum(Base): + @staticmethod + def export_einsum_transpose() -> None: + Eqn = "ij->ji" + node = onnx.helper.make_node( + "Einsum", inputs=["x"], outputs=["y"], equation=Eqn + ) + + X = np.random.randn(3, 4) + Y = einsum_reference_implementation(Eqn, (X,)) + + expect(node, inputs=[X], outputs=[Y], name="test_einsum_transpose") + + @staticmethod + def export_einsum_sum() -> None: + Eqn = "ij->i" + node = onnx.helper.make_node( + "Einsum", inputs=["x"], outputs=["y"], equation=Eqn + ) + + X = np.random.randn(3, 4) + Z = einsum_reference_implementation(Eqn, (X,)) + + expect(node, inputs=[X], outputs=[Z], name="test_einsum_sum") + + @staticmethod + def export_einsum_batch_diagonal() -> None: + Eqn = "...ii ->...i" + node = onnx.helper.make_node( + "Einsum", inputs=["x"], outputs=["y"], equation=Eqn + ) + + X = np.random.randn(3, 5, 5) + Z = einsum_reference_implementation(Eqn, (X,)) + + expect(node, inputs=[X], outputs=[Z], name="test_einsum_batch_diagonal") + + @staticmethod + def export_einsum_inner_prod() -> None: + Eqn = "i,i" + node = onnx.helper.make_node( + "Einsum", inputs=["x", "y"], outputs=["z"], equation=Eqn + ) + + X = np.random.randn(5) + Y = np.random.randn(5) + Z = einsum_reference_implementation(Eqn, (X, Y)) + + expect(node, inputs=[X, Y], outputs=[Z], name="test_einsum_inner_prod") + + @staticmethod + def export_einsum_batch_matmul() -> None: + Eqn = "bij, bjk -> bik" + node = onnx.helper.make_node( + "Einsum", inputs=["x", "y"], outputs=["z"], equation=Eqn + ) + + X = np.random.randn(5, 2, 3) + Y = np.random.randn(5, 3, 4) + Z = einsum_reference_implementation(Eqn, (X, Y)) + + expect(node, inputs=[X, Y], outputs=[Z], name="test_einsum_batch_matmul") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/elu.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/elu.py new file mode 100644 index 0000000000000000000000000000000000000000..1a237dfc50073015029e84864a62fce2ea4ce493 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/elu.py @@ -0,0 +1,36 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Elu(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node("Elu", inputs=["x"], outputs=["y"], alpha=2.0) + + x = np.array([-1, 0, 1]).astype(np.float32) + # expected output [-1.2642411, 0., 1.] + y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 + expect(node, inputs=[x], outputs=[y], name="test_elu_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 + expect(node, inputs=[x], outputs=[y], name="test_elu") + + @staticmethod + def export_elu_default() -> None: + default_alpha = 1.0 + node = onnx.helper.make_node( + "Elu", + inputs=["x"], + outputs=["y"], + ) + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha + expect(node, inputs=[x], outputs=[y], name="test_elu_default") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/equal.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/equal.py new file mode 100644 index 0000000000000000000000000000000000000000..ebb6abef27c2a334a4843b20c712b88b0d33a900 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/equal.py @@ -0,0 +1,61 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Equal(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Equal", + inputs=["x", "y"], + outputs=["z"], + ) + + x = (np.random.randn(3, 4, 5) * 10).astype(np.int32) + y = (np.random.randn(3, 4, 5) * 10).astype(np.int32) + z = np.equal(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_equal") + + @staticmethod + def export_equal_broadcast() -> None: + node = onnx.helper.make_node( + "Equal", + inputs=["x", "y"], + outputs=["z"], + ) + + x = (np.random.randn(3, 4, 5) * 10).astype(np.int32) + y = (np.random.randn(5) * 10).astype(np.int32) + z = np.equal(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_equal_bcast") + + @staticmethod + def export_equal_string() -> None: + node = onnx.helper.make_node( + "Equal", + inputs=["x", "y"], + outputs=["z"], + ) + x = np.array(["string1", "string2"], dtype=np.dtype(object)) + y = np.array(["string1", "string3"], dtype=np.dtype(object)) + z = np.equal(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_equal_string") + + @staticmethod + def export_equal_string_broadcast() -> None: + node = onnx.helper.make_node( + "Equal", + inputs=["x", "y"], + outputs=["z"], + ) + x = np.array(["string1", "string2"], dtype=np.dtype(object)) + y = np.array(["string1"], dtype=np.dtype(object)) + z = np.equal(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_equal_string_broadcast") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/erf.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/erf.py new file mode 100644 index 0000000000000000000000000000000000000000..f2c4766aa3e904c82dc04900b9964622a098d4ff --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/erf.py @@ -0,0 +1,25 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import math + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Erf(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Erf", + inputs=["x"], + outputs=["y"], + ) + + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + y = np.vectorize(math.erf)(x).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_erf") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/exp.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/exp.py new file mode 100644 index 0000000000000000000000000000000000000000..3e2031dc3290b8c1e5e852177330e901e10445a9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/exp.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Exp(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Exp", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + y = np.exp(x) # expected output [0.36787945, 1., 2.71828175] + expect(node, inputs=[x], outputs=[y], name="test_exp_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.exp(x) + expect(node, inputs=[x], outputs=[y], name="test_exp") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/expand.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/expand.py new file mode 100644 index 0000000000000000000000000000000000000000..c86f94f6c692772339c36cb421c046efde311ca7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/expand.py @@ -0,0 +1,65 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Expand(Base): + @staticmethod + def export_dim_changed() -> None: + node = onnx.helper.make_node( + "Expand", + inputs=["data", "new_shape"], + outputs=["expanded"], + ) + shape = [3, 1] + data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape) + # print(data) + # [[1.], [2.], [3.]] + new_shape = [2, 1, 6] + expanded = data * np.ones(new_shape, dtype=np.float32) + # print(expanded) + # [[[1., 1., 1., 1., 1., 1.], + # [2., 2., 2., 2., 2., 2.], + # [3., 3., 3., 3., 3., 3.]], + # + # [[1., 1., 1., 1., 1., 1.], + # [2., 2., 2., 2., 2., 2.], + # [3., 3., 3., 3., 3., 3.]]] + new_shape = np.array(new_shape, dtype=np.int64) + expect( + node, + inputs=[data, new_shape], + outputs=[expanded], + name="test_expand_dim_changed", + ) + + @staticmethod + def export_dim_unchanged() -> None: + node = onnx.helper.make_node( + "Expand", + inputs=["data", "new_shape"], + outputs=["expanded"], + ) + shape = [3, 1] + new_shape = [3, 4] + data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape) + # print(data) + # [[1.], [2.], [3.]] + expanded = np.tile(data, 4) + # print(expanded) + # [[1., 1., 1., 1.], + # [2., 2., 2., 2.], + # [3., 3., 3., 3.]] + new_shape = np.array(new_shape, dtype=np.int64) + expect( + node, + inputs=[data, new_shape], + outputs=[expanded], + name="test_expand_dim_unchanged", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/eyelike.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/eyelike.py new file mode 100644 index 0000000000000000000000000000000000000000..355f808ae2f6caf8d65d7e363f6dfd6e6a8f7135 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/eyelike.py @@ -0,0 +1,59 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class EyeLike(Base): + @staticmethod + def export_without_dtype() -> None: + shape = (4, 4) + node = onnx.helper.make_node( + "EyeLike", + inputs=["x"], + outputs=["y"], + ) + + x = np.random.randint(0, 100, size=shape, dtype=np.int32) + y = np.eye(shape[0], shape[1], dtype=np.int32) + expect(node, inputs=[x], outputs=[y], name="test_eyelike_without_dtype") + + @staticmethod + def export_with_dtype() -> None: + shape = (3, 4) + node = onnx.helper.make_node( + "EyeLike", + inputs=["x"], + outputs=["y"], + dtype=onnx.TensorProto.DOUBLE, + ) + + x = np.random.randint(0, 100, size=shape, dtype=np.int32) + y = np.eye(shape[0], shape[1], dtype=np.float64) + expect(node, inputs=[x], outputs=[y], name="test_eyelike_with_dtype") + + @staticmethod + def export_populate_off_main_diagonal() -> None: + shape = (4, 5) + off_diagonal_offset = 1 + node = onnx.helper.make_node( + "EyeLike", + inputs=["x"], + outputs=["y"], + k=off_diagonal_offset, + dtype=onnx.TensorProto.FLOAT, + ) + + x = np.random.randint(0, 100, size=shape, dtype=np.int32) + y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32) + expect( + node, + inputs=[x], + outputs=[y], + name="test_eyelike_populate_off_main_diagonal", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/flatten.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/flatten.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe585a3fc9d369b6f3b1fa49cec2aa1cd6fd062 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/flatten.py @@ -0,0 +1,64 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Flatten(Base): + @staticmethod + def export() -> None: + shape = (2, 3, 4, 5) + a = np.random.random_sample(shape).astype(np.float32) + + for i in range(len(shape)): + node = onnx.helper.make_node( + "Flatten", + inputs=["a"], + outputs=["b"], + axis=i, + ) + + new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1) + b = np.reshape(a, new_shape) + expect(node, inputs=[a], outputs=[b], name="test_flatten_axis" + str(i)) + + @staticmethod + def export_flatten_with_default_axis() -> None: + node = onnx.helper.make_node( + "Flatten", + inputs=["a"], + outputs=["b"], # Default value for axis: axis=1 + ) + + shape = (5, 4, 3, 2) + a = np.random.random_sample(shape).astype(np.float32) + new_shape = (5, 24) + b = np.reshape(a, new_shape) + expect(node, inputs=[a], outputs=[b], name="test_flatten_default_axis") + + @staticmethod + def export_flatten_negative_axis() -> None: + shape = (2, 3, 4, 5) + a = np.random.random_sample(shape).astype(np.float32) + + for i in range(-len(shape), 0): + node = onnx.helper.make_node( + "Flatten", + inputs=["a"], + outputs=["b"], + axis=i, + ) + + new_shape = (np.prod(shape[0:i]).astype(int), -1) + b = np.reshape(a, new_shape) + expect( + node, + inputs=[a], + outputs=[b], + name="test_flatten_negative_axis" + str(abs(i)), + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/floor.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/floor.py new file mode 100644 index 0000000000000000000000000000000000000000..440b6ea8c446a3409550afac53739d3b72d5a255 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/floor.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Floor(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Floor", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1.5, 1.2, 2]).astype(np.float32) + y = np.floor(x) # expected output [-2., 1., 2.] + expect(node, inputs=[x], outputs=[y], name="test_floor_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.floor(x) + expect(node, inputs=[x], outputs=[y], name="test_floor") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/gather.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gather.py new file mode 100644 index 0000000000000000000000000000000000000000..cad3edbc3273d75299bf34fb0932871a0d08ed55 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gather.py @@ -0,0 +1,90 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Gather(Base): + @staticmethod + def export_gather_0() -> None: + node = onnx.helper.make_node( + "Gather", + inputs=["data", "indices"], + outputs=["y"], + axis=0, + ) + data = np.random.randn(5, 4, 3, 2).astype(np.float32) + indices = np.array([0, 1, 3]) + y = np.take(data, indices, axis=0) + + expect( + node, + inputs=[data, indices.astype(np.int64)], + outputs=[y], + name="test_gather_0", + ) + + @staticmethod + def export_gather_1() -> None: + node = onnx.helper.make_node( + "Gather", + inputs=["data", "indices"], + outputs=["y"], + axis=1, + ) + data = np.random.randn(5, 4, 3, 2).astype(np.float32) + indices = np.array([0, 1, 3]) + y = np.take(data, indices, axis=1) + + expect( + node, + inputs=[data, indices.astype(np.int64)], + outputs=[y], + name="test_gather_1", + ) + + @staticmethod + def export_gather_2d_indices() -> None: + node = onnx.helper.make_node( + "Gather", + inputs=["data", "indices"], + outputs=["y"], + axis=1, + ) + data = np.random.randn(3, 3).astype(np.float32) + indices = np.array([[0, 2]]) + y = np.take(data, indices, axis=1) + + expect( + node, + inputs=[data, indices.astype(np.int64)], + outputs=[y], + name="test_gather_2d_indices", + ) + + @staticmethod + def export_gather_negative_indices() -> None: + node = onnx.helper.make_node( + "Gather", + inputs=["data", "indices"], + outputs=["y"], + axis=0, + ) + data = np.arange(10).astype(np.float32) + indices = np.array([0, -9, -10]) + y = np.take(data, indices, axis=0) + + # print(y) + # [0. 1. 0.] + + expect( + node, + inputs=[data, indices.astype(np.int64)], + outputs=[y], + name="test_gather_negative_indices", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/gatherelements.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gatherelements.py new file mode 100644 index 0000000000000000000000000000000000000000..57982d6943b02f483e7c2a0f5bab7840cdbf1b75 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gatherelements.py @@ -0,0 +1,92 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +# The below GatherElements' numpy implementation is from https://stackoverflow.com/a/46204790/11767360 +def gather_elements(data, indices, axis=0): # type: ignore + data_swaped = np.swapaxes(data, 0, axis) + index_swaped = np.swapaxes(indices, 0, axis) + gathered = np.choose(index_swaped, data_swaped, mode="wrap") + y = np.swapaxes(gathered, 0, axis) + return y + + +class GatherElements(Base): + @staticmethod + def export_gather_elements_0() -> None: + axis = 1 + node = onnx.helper.make_node( + "GatherElements", + inputs=["data", "indices"], + outputs=["y"], + axis=axis, + ) + data = np.array([[1, 2], [3, 4]], dtype=np.float32) + indices = np.array([[0, 0], [1, 0]], dtype=np.int32) + + y = gather_elements(data, indices, axis) + # print(y) produces + # [[1, 1], + # [4, 3]] + + expect( + node, + inputs=[data, indices.astype(np.int64)], + outputs=[y], + name="test_gather_elements_0", + ) + + @staticmethod + def export_gather_elements_1() -> None: + axis = 0 + node = onnx.helper.make_node( + "GatherElements", + inputs=["data", "indices"], + outputs=["y"], + axis=axis, + ) + data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32) + indices = np.array([[1, 2, 0], [2, 0, 0]], dtype=np.int32) + + y = gather_elements(data, indices, axis) + # print(y) produces + # [[4, 8, 3], + # [7, 2, 3]] + + expect( + node, + inputs=[data, indices.astype(np.int64)], + outputs=[y], + name="test_gather_elements_1", + ) + + @staticmethod + def export_gather_elements_negative_indices() -> None: + axis = 0 + node = onnx.helper.make_node( + "GatherElements", + inputs=["data", "indices"], + outputs=["y"], + axis=axis, + ) + data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32) + indices = np.array([[-1, -2, 0], [-2, 0, 0]], dtype=np.int32) + + y = gather_elements(data, indices, axis) + # print(y) produces + # [[7, 5, 3], + # [4, 2, 3]] + + expect( + node, + inputs=[data, indices.astype(np.int64)], + outputs=[y], + name="test_gather_elements_negative_indices", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/gathernd.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gathernd.py new file mode 100644 index 0000000000000000000000000000000000000000..0570f339809dddbf21ced7b09bd26887d0d67f66 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gathernd.py @@ -0,0 +1,120 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def gather_nd_impl( + data: np.ndarray, indices: np.ndarray, batch_dims: int +) -> np.ndarray: + # Note the data rank - will be reused multiple times later + data_rank = len(data.shape) + + # Check input tensors' shape/rank condition + assert indices.shape[-1] <= data_rank + + # The list of data/indice shape of batch_dims + batch_dims_shape = [] + + # The number of elements in the batch_dims for data/indice array + batch_dims_size = 1 + + # Check the shape of indice and data are identicial for batch dims. + for i in range(batch_dims): + batch_dims_shape.append(indices.shape[i]) + batch_dims_size *= indices.shape[i] + + # Compute output of the op as below + + # Compute shape of output array + output_shape = ( + batch_dims_shape + list(indices.shape)[batch_dims:-1] + if (indices.shape[-1] == data_rank - batch_dims) + else batch_dims_shape + + list(indices.shape)[batch_dims:-1] + + list(data.shape)[batch_dims + indices.shape[-1] :] + ) + + # Placeholder for output data + output_data_buffer = [] + + # Flatten 'indices' to 2D array + reshaped_indices = indices.reshape(batch_dims_size, -1, indices.shape[-1]) + + # Flatten 'data' to array of shape (batch_dim_size, data.shape[batch_dimes:]) + reshaped_data = data.reshape((batch_dims_size,) + data.shape[batch_dims:]) + + # gather each scalar value from 'data' + for batch_dim in range(reshaped_indices.shape[0]): + for outer_dim in range(reshaped_indices.shape[1]): + gather_index = tuple(reshaped_indices[batch_dim][outer_dim]) + output_data_buffer.append(reshaped_data[(batch_dim, *gather_index)]) + return np.asarray(output_data_buffer, dtype=data.dtype).reshape(output_shape) + + +class GatherND(Base): + @staticmethod + def export_int32() -> None: + node = onnx.helper.make_node( + "GatherND", + inputs=["data", "indices"], + outputs=["output"], + ) + + data = np.array([[0, 1], [2, 3]], dtype=np.int32) + indices = np.array([[0, 0], [1, 1]], dtype=np.int64) + output = gather_nd_impl(data, indices, 0) + expected_output = np.array([0, 3], dtype=np.int32) + assert np.array_equal(output, expected_output) + expect( + node, + inputs=[data, indices], + outputs=[output], + name="test_gathernd_example_int32", + ) + + @staticmethod + def export_float32() -> None: + node = onnx.helper.make_node( + "GatherND", + inputs=["data", "indices"], + outputs=["output"], + ) + + data = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.float32) + indices = np.array([[[0, 1]], [[1, 0]]], dtype=np.int64) + output = gather_nd_impl(data, indices, 0) + expected_output = np.array([[[2, 3]], [[4, 5]]], dtype=np.float32) + assert np.array_equal(output, expected_output) + expect( + node, + inputs=[data, indices], + outputs=[output], + name="test_gathernd_example_float32", + ) + + @staticmethod + def export_int32_batchdim_1() -> None: + node = onnx.helper.make_node( + "GatherND", + inputs=["data", "indices"], + outputs=["output"], + batch_dims=1, + ) + + data = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.int32) + indices = np.array([[1], [0]], dtype=np.int64) + output = gather_nd_impl(data, indices, 1) + expected_output = np.array([[2, 3], [4, 5]], dtype=np.int32) + assert np.array_equal(output, expected_output) + expect( + node, + inputs=[data, indices], + outputs=[output], + name="test_gathernd_example_int32_batch_dim1", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/gelu.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gelu.py new file mode 100644 index 0000000000000000000000000000000000000000..6066dd204ceec05721ef7309882738319f01dea4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gelu.py @@ -0,0 +1,51 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import math + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Gelu(Base): + @staticmethod + def export_gelu_tanh() -> None: + node = onnx.helper.make_node( + "Gelu", inputs=["x"], outputs=["y"], approximate="tanh" + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + # expected output [-0.158808, 0., 0.841192] + y = ( + 0.5 + * x + * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) + ).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_gelu_tanh_1") + + x = np.random.randn(3, 4, 5).astype(np.float32) + # expected output [2.9963627, 3.99993, 4.9999995] + y = ( + 0.5 + * x + * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) + ).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_gelu_tanh_2") + + @staticmethod + def export_gelu_default() -> None: + node = onnx.helper.make_node("Gelu", inputs=["x"], outputs=["y"]) + + x = np.array([-1, 0, 1]).astype(np.float32) + # expected output [-0.15865526, 0., 0.84134474] + y = (0.5 * x * (1 + np.vectorize(math.erf)(x / np.sqrt(2)))).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_gelu_default_1") + + x = np.random.randn(3, 4, 5).astype(np.float32) + # expected output [2.99595031, 3.99987331, 4.99999857] + y = (0.5 * x * (1 + np.vectorize(math.erf)(x / np.sqrt(2)))).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_gelu_default_2") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/gemm.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..8bc739e406a3f71510536bb860c9023ddab9e34f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gemm.py @@ -0,0 +1,158 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def gemm_reference_implementation( + A: np.ndarray, + B: np.ndarray, + C: Optional[np.ndarray] = None, + alpha: float = 1.0, + beta: float = 1.0, + transA: int = 0, + transB: int = 0, +) -> np.ndarray: + A = A if transA == 0 else A.T + B = B if transB == 0 else B.T + C = C if C is not None else np.array(0) + + Y = alpha * np.dot(A, B) + beta * C + + return Y + + +class Gemm(Base): + @staticmethod + def export_default_zero_bias() -> None: + node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"]) + a = np.random.ranf([3, 5]).astype(np.float32) + b = np.random.ranf([5, 4]).astype(np.float32) + c = np.zeros([1, 4]).astype(np.float32) + y = gemm_reference_implementation(a, b, c) + expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_default_zero_bias") + + @staticmethod + def export_default_no_bias() -> None: + node = onnx.helper.make_node("Gemm", inputs=["a", "b"], outputs=["y"]) + a = np.random.ranf([2, 10]).astype(np.float32) + b = np.random.ranf([10, 3]).astype(np.float32) + y = gemm_reference_implementation(a, b) + expect(node, inputs=[a, b], outputs=[y], name="test_gemm_default_no_bias") + + @staticmethod + def export_default_scalar_bias() -> None: + node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"]) + a = np.random.ranf([2, 3]).astype(np.float32) + b = np.random.ranf([3, 4]).astype(np.float32) + c = np.array(3.14).astype(np.float32) + y = gemm_reference_implementation(a, b, c) + expect( + node, inputs=[a, b, c], outputs=[y], name="test_gemm_default_scalar_bias" + ) + + @staticmethod + def export_default_single_elem_vector_bias() -> None: + node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"]) + a = np.random.ranf([3, 7]).astype(np.float32) + b = np.random.ranf([7, 3]).astype(np.float32) + c = np.random.ranf([1]).astype(np.float32) + y = gemm_reference_implementation(a, b, c) + expect( + node, + inputs=[a, b, c], + outputs=[y], + name="test_gemm_default_single_elem_vector_bias", + ) + + @staticmethod + def export_default_vector_bias() -> None: + node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"]) + a = np.random.ranf([2, 7]).astype(np.float32) + b = np.random.ranf([7, 4]).astype(np.float32) + c = np.random.ranf([1, 4]).astype(np.float32) + y = gemm_reference_implementation(a, b, c) + expect( + node, inputs=[a, b, c], outputs=[y], name="test_gemm_default_vector_bias" + ) + + @staticmethod + def export_default_matrix_bias() -> None: + node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"]) + a = np.random.ranf([3, 6]).astype(np.float32) + b = np.random.ranf([6, 4]).astype(np.float32) + c = np.random.ranf([3, 4]).astype(np.float32) + y = gemm_reference_implementation(a, b, c) + expect( + node, inputs=[a, b, c], outputs=[y], name="test_gemm_default_matrix_bias" + ) + + @staticmethod + def export_transposeA() -> None: + node = onnx.helper.make_node( + "Gemm", inputs=["a", "b", "c"], outputs=["y"], transA=1 + ) + a = np.random.ranf([6, 3]).astype(np.float32) + b = np.random.ranf([6, 4]).astype(np.float32) + c = np.zeros([1, 4]).astype(np.float32) + y = gemm_reference_implementation(a, b, c, transA=1) + expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_transposeA") + + @staticmethod + def export_transposeB() -> None: + node = onnx.helper.make_node( + "Gemm", inputs=["a", "b", "c"], outputs=["y"], transB=1 + ) + a = np.random.ranf([3, 6]).astype(np.float32) + b = np.random.ranf([4, 6]).astype(np.float32) + c = np.zeros([1, 4]).astype(np.float32) + y = gemm_reference_implementation(a, b, c, transB=1) + expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_transposeB") + + @staticmethod + def export_alpha() -> None: + node = onnx.helper.make_node( + "Gemm", inputs=["a", "b", "c"], outputs=["y"], alpha=0.5 + ) + a = np.random.ranf([3, 5]).astype(np.float32) + b = np.random.ranf([5, 4]).astype(np.float32) + c = np.zeros([1, 4]).astype(np.float32) + y = gemm_reference_implementation(a, b, c, alpha=0.5) + expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_alpha") + + @staticmethod + def export_beta() -> None: + node = onnx.helper.make_node( + "Gemm", inputs=["a", "b", "c"], outputs=["y"], beta=0.5 + ) + a = np.random.ranf([2, 7]).astype(np.float32) + b = np.random.ranf([7, 4]).astype(np.float32) + c = np.random.ranf([1, 4]).astype(np.float32) + y = gemm_reference_implementation(a, b, c, beta=0.5) + expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_beta") + + @staticmethod + def export_all_attributes() -> None: + node = onnx.helper.make_node( + "Gemm", + inputs=["a", "b", "c"], + outputs=["y"], + alpha=0.25, + beta=0.35, + transA=1, + transB=1, + ) + a = np.random.ranf([4, 3]).astype(np.float32) + b = np.random.ranf([5, 4]).astype(np.float32) + c = np.random.ranf([1, 5]).astype(np.float32) + y = gemm_reference_implementation( + a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35 + ) + expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_all_attributes") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/globalaveragepool.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/globalaveragepool.py new file mode 100644 index 0000000000000000000000000000000000000000..3a1730f88b283460b23ca483d9e0bbc944a44146 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/globalaveragepool.py @@ -0,0 +1,43 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class GlobalAveragePool(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "GlobalAveragePool", + inputs=["x"], + outputs=["y"], + ) + x = np.random.randn(1, 3, 5, 5).astype(np.float32) + y = np.mean(x, axis=tuple(range(2, np.ndim(x))), keepdims=True) + expect(node, inputs=[x], outputs=[y], name="test_globalaveragepool") + + @staticmethod + def export_globalaveragepool_precomputed() -> None: + node = onnx.helper.make_node( + "GlobalAveragePool", + inputs=["x"], + outputs=["y"], + ) + x = np.array( + [ + [ + [ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[5]]]]).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_globalaveragepool_precomputed") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/globalmaxpool.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/globalmaxpool.py new file mode 100644 index 0000000000000000000000000000000000000000..bff8ef65dd9fc0461fe31566d9832ebd83eb5bc2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/globalmaxpool.py @@ -0,0 +1,43 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class GlobalMaxPool(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "GlobalMaxPool", + inputs=["x"], + outputs=["y"], + ) + x = np.random.randn(1, 3, 5, 5).astype(np.float32) + y = np.max(x, axis=tuple(range(2, np.ndim(x))), keepdims=True) + expect(node, inputs=[x], outputs=[y], name="test_globalmaxpool") + + @staticmethod + def export_globalmaxpool_precomputed() -> None: + node = onnx.helper.make_node( + "GlobalMaxPool", + inputs=["x"], + outputs=["y"], + ) + x = np.array( + [ + [ + [ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[9]]]]).astype(np.float32) + expect(node, inputs=[x], outputs=[y], name="test_globalmaxpool_precomputed") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/greater.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/greater.py new file mode 100644 index 0000000000000000000000000000000000000000..7241dde8e2bfc14fe8689f7ed77520084e7b2810 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/greater.py @@ -0,0 +1,37 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Greater(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Greater", + inputs=["x", "y"], + outputs=["greater"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(3, 4, 5).astype(np.float32) + z = np.greater(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_greater") + + @staticmethod + def export_greater_broadcast() -> None: + node = onnx.helper.make_node( + "Greater", + inputs=["x", "y"], + outputs=["greater"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(5).astype(np.float32) + z = np.greater(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_greater_bcast") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/greater_equal.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/greater_equal.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e174f1a6fcc33cccc575d11d7440590e69aed0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/greater_equal.py @@ -0,0 +1,37 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Greater(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "GreaterOrEqual", + inputs=["x", "y"], + outputs=["greater_equal"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(3, 4, 5).astype(np.float32) + z = np.greater_equal(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal") + + @staticmethod + def export_greater_broadcast() -> None: + node = onnx.helper.make_node( + "GreaterOrEqual", + inputs=["x", "y"], + outputs=["greater_equal"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(5).astype(np.float32) + z = np.greater_equal(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal_bcast") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/gridsample.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gridsample.py new file mode 100644 index 0000000000000000000000000000000000000000..f83e20e013542015be6564064746ec71f7a2ac82 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gridsample.py @@ -0,0 +1,641 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class GridSample(Base): + @staticmethod + def export_gridsample() -> None: + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="linear", + padding_mode="zeros", + align_corners=0, + ) + # X shape, [N, C, H, W] - [1, 1, 4, 4] + X = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0], + [4.0, 5.0, 6.0, 7.0], + [8.0, 9.0, 10.0, 11.0], + [12.0, 13.0, 14.0, 15.0], + ] + ] + ], + dtype=np.float32, + ) + # Grid shape, [N, H_out, W_out, 2] - [1, 6, 6, 2] + Grid = np.array( + [ + [ + [ + [-1.0000, -1.0000], + [-0.6000, -1.0000], + [-0.2000, -1.0000], + [0.2000, -1.0000], + [0.6000, -1.0000], + [1.0000, -1.0000], + ], + [ + [-1.0000, -0.6000], + [-0.6000, -0.6000], + [-0.2000, -0.6000], + [0.2000, -0.6000], + [0.6000, -0.6000], + [1.0000, -0.6000], + ], + [ + [-1.0000, -0.2000], + [-0.6000, -0.2000], + [-0.2000, -0.2000], + [0.2000, -0.2000], + [0.6000, -0.2000], + [1.0000, -0.2000], + ], + [ + [-1.0000, 0.2000], + [-0.6000, 0.2000], + [-0.2000, 0.2000], + [0.2000, 0.2000], + [0.6000, 0.2000], + [1.0000, 0.2000], + ], + [ + [-1.0000, 0.6000], + [-0.6000, 0.6000], + [-0.2000, 0.6000], + [0.2000, 0.6000], + [0.6000, 0.6000], + [1.0000, 0.6000], + ], + [ + [-1.0000, 1.0000], + [-0.6000, 1.0000], + [-0.2000, 1.0000], + [0.2000, 1.0000], + [0.6000, 1.0000], + [1.0000, 1.0000], + ], + ] + ], + dtype=np.float32, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 6, 6] + Y = np.array( + [ + [ + [ + [0.0000, 0.1500, 0.5500, 0.9500, 1.3500, 0.7500], + [0.6000, 1.5000, 2.3000, 3.1000, 3.9000, 2.1000], + [2.2000, 4.7000, 5.5000, 6.3000, 7.1000, 3.7000], + [3.8000, 7.9000, 8.7000, 9.5000, 10.3000, 5.3000], + [5.4000, 11.1000, 11.9000, 12.7000, 13.5000, 6.9000], + [3.0000, 6.1500, 6.5500, 6.9500, 7.3500, 3.7500], + ] + ] + ], + dtype=np.float32, + ) + expect(node, inputs=[X, Grid], outputs=[Y], name="test_gridsample") + + @staticmethod + def export_gridsample_paddingmode() -> None: + # X shape, [N, C, H, W] - [1, 1, 3, 2] + X = np.array( + [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], + dtype=np.float32, + ) + # Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2] + Grid = np.array( + [ + [ + [ + [-10.0000, -10.0000], + [-5.0000, -5.0000], + [-0.2000, -0.2000], + [10.0000, 10.0000], + ], + [ + [10.0000, 10.0000], + [-0.2000, -0.2000], + [5.0000, 5.0000], + [10.0000, 10.0000], + ], + ] + ], + dtype=np.float32, + ) + + # setting padding_mode = 'zeros' + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + padding_mode="zeros", + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_zeros = np.array( + [[[[0.0000, 0.0000, 1.7000, 0.0000], [0.0000, 1.7000, 0.0000, 0.0000]]]], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_zeros], + name="test_gridsample_zeros_padding", + ) + + # setting padding_mode = 'border' + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + padding_mode="border", + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_border = np.array( + [[[[0.0000, 0.0000, 1.7000, 5.0000], [5.0000, 1.7000, 5.0000, 5.0000]]]], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_border], + name="test_gridsample_border_padding", + ) + + # setting padding_mode = 'reflection' + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + padding_mode="reflection", + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_reflection = np.array( + [[[[2.5000, 0.0000, 1.7000, 2.5000], [2.5000, 1.7000, 5.0000, 2.5000]]]], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_reflection], + name="test_gridsample_reflection_padding", + ) + + @staticmethod + def export_gridsample_mode_aligncorners() -> None: + # X shape, [N, C, H, W] - [1, 1, 3, 2] + X = np.array( + [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], + dtype=np.float32, + ) + # Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2] + Grid = np.array( + [ + [ + [ + [-1.0000, -1.0000], + [-0.5000, -0.5000], + [-0.2000, -0.2000], + [0.0000, 0.0000], + ], + [ + [0.0000, 0.0000], + [-0.2000, -0.2000], + [0.5000, 0.5000], + [1.0000, 1.0000], + ], + ] + ], + dtype=np.float32, + ) + + # setting mode = 'bilinear', default align_corners = 0 + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="linear", + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_bilinear = np.array( + [[[[0.0000, 0.5000, 1.7000, 2.5000], [2.5000, 1.7000, 4.5000, 1.2500]]]], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_bilinear], + name="test_gridsample_bilinear", + ) + + # setting mode = 'bilinear', align_corners = 1 + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="linear", + align_corners=1, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_align_corners = np.array( + [[[[0.0000, 1.2500, 2.0000, 2.5000], [2.5000, 2.0000, 3.7500, 5.0000]]]], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_align_corners], + name="test_gridsample_aligncorners_true", + ) + + # setting mode = 'nearest' + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="nearest", + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_nearest = np.array( + [[[[0.0, 0.0, 2.0, 2.0], [2.0, 2.0, 5.0, 0.0]]]], + dtype=np.float32, + ) + + expect( + node, inputs=[X, Grid], outputs=[Y_nearest], name="test_gridsample_nearest" + ) + + # setting mode = 'bicubic' + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="cubic", + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_bicubic = np.array( + [[[[-0.1406, 0.3828, 1.7556, 2.9688], [2.9688, 1.7556, 5.1445, 1.3906]]]], + dtype=np.float32, + ) + + expect( + node, inputs=[X, Grid], outputs=[Y_bicubic], name="test_gridsample_bicubic" + ) + + # ============================================================================ + # Additional tests + # The reference output tensors were generated using PyTorch 2.0. + Grid = np.array( + [ + [ + [[-1.0, -0.8], [-0.6, -0.5], [-0.1, -0.2], [0.7, 0.0]], + [[0.0, 0.4], [0.2, -0.2], [-0.3, 0.5], [-1.0, 1.0]], + ] + ], + dtype=np.float32, + ) + + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="nearest", + align_corners=0, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_nearest = np.array( + [[[[0.0, 0.0, 2.0, 3.0], [4.0, 3.0, 4.0, 4.0]]]], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_nearest], + name="test_gridsample_nearest_align_corners_0_additional_1", + ) + + # setting mode = 'nearest' + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="nearest", + align_corners=1, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_nearest = np.array( + [[[[0.0, 0.0, 2.0, 3.0], [2.0, 3.0, 4.0, 4.0]]]], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_nearest], + name="test_gridsample_nearest_align_corners_1_additional_1", + ) + + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="linear", + align_corners=0, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_bilinear = np.array( + [[[[0.0000, 0.4500, 1.8000, 2.4000], [3.7000, 2.1000, 3.7000, 1.0000]]]], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_bilinear], + name="test_gridsample_bilinear_align_corners_0_additional_1", + ) + + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="linear", + align_corners=1, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_bilinear = np.array( + [[[[0.4000, 1.2000, 2.0500, 2.8500], [3.3000, 2.2000, 3.3500, 4.0000]]]], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_bilinear], + name="test_gridsample_bilinear_align_corners_1_additional_1", + ) + + # These two new bicubic tests produces slightly higher error ~5e-5 + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="cubic", + align_corners=0, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_bicubic = np.array( + [ + [ + [ + [-0.173250, 0.284265, 1.923106, 2.568000], + [5.170375, 2.284414, 4.744844, 1.046875], + ] + ] + ], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_bicubic], + name="test_gridsample_bicubic_align_corners_0_additional_1", + ) + + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="cubic", + align_corners=1, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_bicubic = np.array( + [ + [ + [ + [0.304001, 1.128750, 2.266270, 3.144844], + [4.531500, 2.455360, 4.599819, 4.000000], + ] + ] + ], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_bicubic], + name="test_gridsample_bicubic_align_corners_1_additional_1", + ) + + @staticmethod + def export_volumeetric_gridsample_mode_aligncorners() -> None: + X = np.array( + [ + [ + [ + [[1.0, 2.0], [3.0, 4.0]], + [[5.0, 6.0], [7.0, 8.0]], + [[9.0, 10.0], [11.0, 12.0]], + ] + ] + ], + dtype=np.float32, + ) + + Grid = np.array( + [ + [ + [ + [[-1.0, -1.0, -1.0], [-1.0, -0.5, 0.3]], + [[-0.5, -0.5, -0.5], [1.0, -0.6, -1.0]], + [[-0.2, -0.2, -0.2], [0.4, 0.2, 0.6]], + [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]], + ], + [ + [[0.0, 0.0, 0.0], [-1.0, 1.0, 0.0]], + [[-0.2, -0.2, -0.2], [1.0, 0.4, -0.2]], + [[0.5, 0.5, 0.5], [-1.0, -0.8, 0.8]], + [[1.0, 1.0, 1.0], [0.4, 0.6, -0.3]], + ], + ] + ], + dtype=np.float32, + ) + + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="nearest", + align_corners=0, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_nearest = np.array( + [ + [ + [ + [[1.0, 5.0], [1.0, 0.0], [5.0, 12.0], [5.0, 5.0]], + [[5.0, 0.0], [5.0, 0.0], [12.0, 9.0], [0.0, 8.0]], + ] + ] + ], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_nearest], + name="test_gridsample_volumetric_nearest_align_corners_0", + ) + + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="nearest", + align_corners=1, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_nearest = np.array( + [ + [ + [ + [[1.0, 5.0], [1.0, 2.0], [5.0, 12.0], [5.0, 5.0]], + [[5.0, 7.0], [5.0, 8.0], [12.0, 9.0], [12.0, 8.0]], + ] + ] + ], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_nearest], + name="test_gridsample_volumetric_nearest_align_corners_1", + ) + + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="linear", + align_corners=0, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_bilinear = np.array( + [ + [ + [ + [ + [0.1250, 3.4000], + [2.0000, 0.4500], + [4.7000, 10.9000], + [6.5000, 3.0000], + ], + [ + [6.5000, 1.7500], + [4.7000, 3.3000], + [11.0000, 2.5200], + [1.5000, 5.4900], + ], + ] + ] + ], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_bilinear], + name="test_gridsample_volumetric_bilinear_align_corners_0", + ) + + node = onnx.helper.make_node( + "GridSample", + inputs=["X", "Grid"], + outputs=["Y"], + mode="linear", + align_corners=1, + ) + # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4] + Y_bilinear = np.array( + [ + [ + [ + [ + [1.0000, 6.7000], + [3.7500, 2.4000], + [5.4000, 9.3000], + [6.5000, 6.0000], + ], + [ + [6.5000, 7.0000], + [5.4000, 6.6000], + [9.2500, 8.4000], + [12.0000, 6.1000], + ], + ] + ] + ], + dtype=np.float32, + ) + + expect( + node, + inputs=[X, Grid], + outputs=[Y_bilinear], + name="test_gridsample_volumetric_bilinear_align_corners_1", + ) + + """ + For someone who want to test by script. Comment it cause github ONNX CI + do not have the torch python package. + @staticmethod + def export_gridsample_torch(): # type: () -> None + node = onnx.helper.make_node( + 'GridSample', + inputs=['X', 'Grid'], + outputs=['Y'], + mode='bilinear', + padding_mode='zeros', + align_corners=0, + ) + + # X shape, [N, C, H, W] - [1, 1, 4, 4] + # Grid shape, [N, H_out, W_out, 2] - [1, 6, 6, 2] + # Y shape, [N, C, H_out, W_out] - [1, 1, 6, 6] + import torch + X = torch.arange(3 * 3).view(1, 1, 3, 3).float() + d = torch.linspace(-1, 1, 6) + meshx, meshy = torch.meshgrid((d, d)) + grid = torch.stack((meshy, meshx), 2) + Grid = grid.unsqueeze(0) + Y = torch.nn.functional.grid_sample(X, Grid, mode='bilinear', + padding_mode='zeros', align_corners=False) + expect(node, inputs=[X.numpy(), Grid.numpy()], outputs=[Y.numpy()], + name='test_gridsample_torch') + """ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/groupnormalization.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/groupnormalization.py new file mode 100644 index 0000000000000000000000000000000000000000..df8e46aa0dbefcf0b6b57bbb18c82e81d73c57e0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/groupnormalization.py @@ -0,0 +1,77 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +# Group normalization's reference implementation +def _group_normalization(x, num_groups, scale, bias, epsilon=1e-5): + # Assume channel is first dim + assert x.shape[1] % num_groups == 0 + group_size = x.shape[1] // num_groups + # Reshape to [N, group_size, C/group_size, H, W, ...] + new_shape = [x.shape[0], num_groups, group_size, *list(x.shape[2:])] + x_reshaped = x.reshape(new_shape) + axes = tuple(range(2, len(new_shape))) + mean = np.mean(x_reshaped, axis=axes, keepdims=True) + var = np.var(x_reshaped, axis=axes, keepdims=True) + x_normalized = ((x_reshaped - mean) / np.sqrt(var + epsilon)).reshape(x.shape) + dim_ones = (1,) * (len(x.shape) - 2) + scale = scale.reshape(-1, *dim_ones) + bias = bias.reshape(-1, *dim_ones) + return scale * x_normalized + bias + + +class GroupNormalization(Base): + @staticmethod + def export() -> None: + c = 4 + num_groups = 2 + x = np.random.randn(3, c, 2, 2).astype(np.float32) + scale = np.random.randn(c).astype(np.float32) + bias = np.random.randn(c).astype(np.float32) + y = _group_normalization(x, num_groups, scale, bias).astype(np.float32) + + node = onnx.helper.make_node( + "GroupNormalization", + inputs=["x", "scale", "bias"], + outputs=["y"], + num_groups=num_groups, + ) + + expect( + node, + inputs=[x, scale, bias], + outputs=[y], + name="test_group_normalization_example", + ) + + @staticmethod + def export_epsilon() -> None: + c = 4 + num_groups = 2 + x = np.random.randn(3, c, 2, 2).astype(np.float32) + scale = np.random.randn(c).astype(np.float32) + bias = np.random.randn(c).astype(np.float32) + epsilon = 1e-2 + y = _group_normalization(x, num_groups, scale, bias, epsilon).astype(np.float32) + + node = onnx.helper.make_node( + "GroupNormalization", + inputs=["x", "scale", "bias"], + outputs=["y"], + epsilon=epsilon, + num_groups=num_groups, + ) + + expect( + node, + inputs=[x, scale, bias], + outputs=[y], + name="test_group_normalization_epsilon", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/gru.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gru.py new file mode 100644 index 0000000000000000000000000000000000000000..5c1d2c6db4b1d415e5f54b0af68fcf67caaa4c96 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/gru.py @@ -0,0 +1,263 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import Any, Tuple + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class GRUHelper: + def __init__(self, **params: Any) -> None: + # GRU Input Names + X = "X" + W = "W" + R = "R" + B = "B" + H_0 = "initial_h" + LBR = "linear_before_reset" + LAYOUT = "layout" + number_of_gates = 3 + + required_inputs = [X, W, R] + for i in required_inputs: + assert i in params, f"Missing Required Input: {i}" + + self.num_directions = params[W].shape[0] + + if self.num_directions == 1: + for k in params: + if k != X: + params[k] = np.squeeze(params[k], axis=0) + + hidden_size = params[R].shape[-1] + batch_size = params[X].shape[1] + + layout = params.get(LAYOUT, 0) + x = params[X] + x = x if layout == 0 else np.swapaxes(x, 0, 1) + b = ( + params[B] + if B in params + else np.zeros(2 * number_of_gates * hidden_size) + ) + h_0 = params[H_0] if H_0 in params else np.zeros((batch_size, hidden_size)) + lbr = params.get(LBR, 0) + + self.X = x + self.W = params[W] + self.R = params[R] + self.B = b + self.H_0 = h_0 + self.LBR = lbr + self.LAYOUT = layout + + else: + raise NotImplementedError() + + def f(self, x: np.ndarray) -> np.ndarray: + return 1 / (1 + np.exp(-x)) + + def g(self, x: np.ndarray) -> np.ndarray: + return np.tanh(x) + + def step(self) -> Tuple[np.ndarray, np.ndarray]: + seq_length = self.X.shape[0] + hidden_size = self.H_0.shape[-1] + batch_size = self.X.shape[1] + + Y = np.empty([seq_length, self.num_directions, batch_size, hidden_size]) + h_list = [] + + [w_z, w_r, w_h] = np.split(self.W, 3) + [r_z, r_r, r_h] = np.split(self.R, 3) + [w_bz, w_br, w_bh, r_bz, r_br, r_bh] = np.split(self.B, 6) + gates_w = np.transpose(np.concatenate((w_z, w_r))) + gates_r = np.transpose(np.concatenate((r_z, r_r))) + gates_b = np.add(np.concatenate((w_bz, w_br)), np.concatenate((r_bz, r_br))) + + H_t = self.H_0 + for x in np.split(self.X, self.X.shape[0], axis=0): + gates = np.dot(x, gates_w) + np.dot(H_t, gates_r) + gates_b + z, r = np.split(gates, 2, -1) + z = self.f(z) + r = self.f(r) + h_default = self.g( + np.dot(x, np.transpose(w_h)) + + np.dot(r * H_t, np.transpose(r_h)) + + w_bh + + r_bh + ) + h_linear = self.g( + np.dot(x, np.transpose(w_h)) + + r * (np.dot(H_t, np.transpose(r_h)) + r_bh) + + w_bh + ) + h = h_linear if self.LBR else h_default + H = (1 - z) * h + z * H_t + h_list.append(H) + H_t = H + + concatenated = np.concatenate(h_list) + if self.num_directions == 1: + Y[:, 0, :, :] = concatenated + + if self.LAYOUT == 0: + Y_h = Y[-1] + else: + Y = np.transpose(Y, [2, 0, 1, 3]) + Y_h = Y[:, :, -1, :] + + return Y, Y_h + + +class GRU(Base): + @staticmethod + def export_defaults() -> None: + input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32) + + input_size = 2 + hidden_size = 5 + weight_scale = 0.1 + number_of_gates = 3 + + node = onnx.helper.make_node( + "GRU", inputs=["X", "W", "R"], outputs=["", "Y_h"], hidden_size=hidden_size + ) + + W = weight_scale * np.ones( + (1, number_of_gates * hidden_size, input_size) + ).astype(np.float32) + R = weight_scale * np.ones( + (1, number_of_gates * hidden_size, hidden_size) + ).astype(np.float32) + + gru = GRUHelper(X=input, W=W, R=R) + _, Y_h = gru.step() + expect( + node, + inputs=[input, W, R], + outputs=[Y_h.astype(np.float32)], + name="test_gru_defaults", + ) + + @staticmethod + def export_initial_bias() -> None: + input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype( + np.float32 + ) + + input_size = 3 + hidden_size = 3 + weight_scale = 0.1 + custom_bias = 0.1 + number_of_gates = 3 + + node = onnx.helper.make_node( + "GRU", + inputs=["X", "W", "R", "B"], + outputs=["", "Y_h"], + hidden_size=hidden_size, + ) + + W = weight_scale * np.ones( + (1, number_of_gates * hidden_size, input_size) + ).astype(np.float32) + R = weight_scale * np.ones( + (1, number_of_gates * hidden_size, hidden_size) + ).astype(np.float32) + + # Adding custom bias + W_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype( + np.float32 + ) + R_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32) + B = np.concatenate((W_B, R_B), axis=1) + + gru = GRUHelper(X=input, W=W, R=R, B=B) + _, Y_h = gru.step() + expect( + node, + inputs=[input, W, R, B], + outputs=[Y_h.astype(np.float32)], + name="test_gru_with_initial_bias", + ) + + @staticmethod + def export_seq_length() -> None: + input = np.array( + [ + [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], + [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]], + ] + ).astype(np.float32) + + input_size = 3 + hidden_size = 5 + number_of_gates = 3 + + node = onnx.helper.make_node( + "GRU", + inputs=["X", "W", "R", "B"], + outputs=["", "Y_h"], + hidden_size=hidden_size, + ) + + W = np.random.randn(1, number_of_gates * hidden_size, input_size).astype( + np.float32 + ) + R = np.random.randn(1, number_of_gates * hidden_size, hidden_size).astype( + np.float32 + ) + + # Adding custom bias + W_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32) + R_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32) + B = np.concatenate((W_B, R_B), axis=1) + + gru = GRUHelper(X=input, W=W, R=R, B=B) + _, Y_h = gru.step() + expect( + node, + inputs=[input, W, R, B], + outputs=[Y_h.astype(np.float32)], + name="test_gru_seq_length", + ) + + @staticmethod + def export_batchwise() -> None: + input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32) + + input_size = 2 + hidden_size = 6 + number_of_gates = 3 + weight_scale = 0.2 + layout = 1 + + node = onnx.helper.make_node( + "GRU", + inputs=["X", "W", "R"], + outputs=["Y", "Y_h"], + hidden_size=hidden_size, + layout=layout, + ) + + W = weight_scale * np.ones( + (1, number_of_gates * hidden_size, input_size) + ).astype(np.float32) + R = weight_scale * np.ones( + (1, number_of_gates * hidden_size, hidden_size) + ).astype(np.float32) + + gru = GRUHelper(X=input, W=W, R=R, layout=layout) + Y, Y_h = gru.step() + expect( + node, + inputs=[input, W, R], + outputs=[Y.astype(np.float32), Y_h.astype(np.float32)], + name="test_gru_batchwise", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/hammingwindow.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hammingwindow.py new file mode 100644 index 0000000000000000000000000000000000000000..fc90c0cb813853909a8dd00ed6715cc4703683c8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hammingwindow.py @@ -0,0 +1,37 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class HammingWindow(Base): + @staticmethod + def export() -> None: + # Test periodic window + node = onnx.helper.make_node( + "HammingWindow", + inputs=["x"], + outputs=["y"], + ) + size = np.int32(10) + a0 = 25 / 46 + a1 = 1 - a0 + y = a0 - a1 * np.cos(2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size) + expect(node, inputs=[size], outputs=[y], name="test_hammingwindow") + + # Test symmetric window + node = onnx.helper.make_node( + "HammingWindow", inputs=["x"], outputs=["y"], periodic=0 + ) + size = np.int32(10) + a0 = 25 / 46 + a1 = 1 - a0 + y = a0 - a1 * np.cos( + 2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1) + ) + expect(node, inputs=[size], outputs=[y], name="test_hammingwindow_symmetric") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/hannwindow.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hannwindow.py new file mode 100644 index 0000000000000000000000000000000000000000..466da0eef2ff8a74603a266f4f3e3bf6fcf3b18b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hannwindow.py @@ -0,0 +1,37 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class HannWindow(Base): + @staticmethod + def export() -> None: + # Test periodic window + node = onnx.helper.make_node( + "HannWindow", + inputs=["x"], + outputs=["y"], + ) + size = np.int32(10) + a0 = 0.5 + a1 = 0.5 + y = a0 - a1 * np.cos(2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size) + expect(node, inputs=[size], outputs=[y], name="test_hannwindow") + + # Test symmetric window + node = onnx.helper.make_node( + "HannWindow", inputs=["x"], outputs=["y"], periodic=0 + ) + size = np.int32(10) + a0 = 0.5 + a1 = 0.5 + y = a0 - a1 * np.cos( + 2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1) + ) + expect(node, inputs=[size], outputs=[y], name="test_hannwindow_symmetric") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/hardmax.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hardmax.py new file mode 100644 index 0000000000000000000000000000000000000000..4751ef956578b98e614113676907cff21a1b9ac0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hardmax.py @@ -0,0 +1,91 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def hardmax(x: np.ndarray, axis: int = -1) -> np.ndarray: + x_argmax = np.argmax(x, axis=axis) + y = np.zeros_like(x) + np.put_along_axis(y, np.expand_dims(x_argmax, axis=axis), 1, axis=axis) + return y + + +class Hardmax(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Hardmax", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype( + np.float32 + ) + # expect result: + # [[1. 0. 0. 0.] + # [0. 1. 0. 0.] + # [0. 0. 1. 0.] + # [0. 0. 0. 1.]] + y = hardmax(x) + expect(node, inputs=[x], outputs=[y], name="test_hardmax_example") + + # For multiple occurrences of the maximal values, the first occurrence is selected for one-hot output + x = np.array([[3, 3, 3, 1]]).astype(np.float32) + # expect result: + # [[1, 0, 0, 0]] + y = hardmax(x) + expect(node, inputs=[x], outputs=[y], name="test_hardmax_one_hot") + + @staticmethod + def export_hardmax_axis() -> None: + x = np.random.randn(3, 4, 5).astype(np.float32) + node = onnx.helper.make_node( + "Hardmax", + inputs=["x"], + outputs=["y"], + axis=0, + ) + y = hardmax(x, axis=0) + expect(node, inputs=[x], outputs=[y], name="test_hardmax_axis_0") + + node = onnx.helper.make_node( + "Hardmax", + inputs=["x"], + outputs=["y"], + axis=1, + ) + y = hardmax(x, axis=1) + expect(node, inputs=[x], outputs=[y], name="test_hardmax_axis_1") + + node = onnx.helper.make_node( + "Hardmax", + inputs=["x"], + outputs=["y"], + axis=2, + ) + y = hardmax(x, axis=2) + expect(node, inputs=[x], outputs=[y], name="test_hardmax_axis_2") + + node = onnx.helper.make_node( + "Hardmax", + inputs=["x"], + outputs=["y"], + axis=-1, + ) + y = hardmax(x, axis=-1) + expect(node, inputs=[x], outputs=[y], name="test_hardmax_negative_axis") + + # default axis is -1 + node = onnx.helper.make_node( + "Hardmax", + inputs=["x"], + outputs=["y"], + ) + expect(node, inputs=[x], outputs=[y], name="test_hardmax_default_axis") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/hardsigmoid.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hardsigmoid.py new file mode 100644 index 0000000000000000000000000000000000000000..20bc543907ce8ece8678f5b1e7fbd32540528eb6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hardsigmoid.py @@ -0,0 +1,38 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class HardSigmoid(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "HardSigmoid", inputs=["x"], outputs=["y"], alpha=0.5, beta=0.6 + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + y = np.clip(x * 0.5 + 0.6, 0, 1) # expected output [0.1, 0.6, 1.] + expect(node, inputs=[x], outputs=[y], name="test_hardsigmoid_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.clip(x * 0.5 + 0.6, 0, 1) + expect(node, inputs=[x], outputs=[y], name="test_hardsigmoid") + + @staticmethod + def export_hardsigmoid_default() -> None: + default_alpha = 0.2 + default_beta = 0.5 + node = onnx.helper.make_node( + "HardSigmoid", + inputs=["x"], + outputs=["y"], + ) + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.clip(x * default_alpha + default_beta, 0, 1) + expect(node, inputs=[x], outputs=[y], name="test_hardsigmoid_default") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/hardswish.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hardswish.py new file mode 100644 index 0000000000000000000000000000000000000000..61678d777a716168ccae3463a3b7fc7dbbb11023 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/hardswish.py @@ -0,0 +1,29 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def hardswish(x: np.ndarray) -> np.ndarray: + alfa = float(1 / 6) + beta = 0.5 + return x * np.maximum(0, np.minimum(1, alfa * x + beta)) + + +class HardSwish(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "HardSwish", + inputs=["x"], + outputs=["y"], + ) + x = np.random.randn(3, 4, 5).astype(np.float32) + y = hardswish(x) + + expect(node, inputs=[x], outputs=[y], name="test_hardswish") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/identity.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/identity.py new file mode 100644 index 0000000000000000000000000000000000000000..984bdfd301c072169aa35a4403fbefba3a846b4e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/identity.py @@ -0,0 +1,92 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Identity(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Identity", + inputs=["x"], + outputs=["y"], + ) + + data = np.array( + [ + [ + [ + [1, 2], + [3, 4], + ] + ] + ], + dtype=np.float32, + ) + + expect(node, inputs=[data], outputs=[data], name="test_identity") + + @staticmethod + def export_sequence() -> None: + node = onnx.helper.make_node( + "Identity", + inputs=["x"], + outputs=["y"], + ) + + data = [ + np.array( + [ + [ + [ + [1, 2], + [3, 4], + ] + ] + ], + dtype=np.float32, + ), + np.array( + [ + [ + [ + [2, 3], + [1, 5], + ] + ] + ], + dtype=np.float32, + ), + ] + + expect(node, inputs=[data], outputs=[data], name="test_identity_sequence") + + @staticmethod + def export_identity_opt() -> None: + ten_in_tp = onnx.helper.make_tensor_type_proto( + onnx.TensorProto.FLOAT, shape=[5] + ) + seq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp) + opt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp) + + identity_node = onnx.helper.make_node( + "Identity", inputs=["opt_in"], outputs=["opt_out"] + ) + + x = [np.array([1, 2, 3, 4, 5]).astype(np.float32)] + + expect( + identity_node, + inputs=[x], + outputs=[x], + name="test_identity_opt", + opset_imports=[onnx.helper.make_opsetid("", 16)], + input_type_protos=[opt_in_tp], + output_type_protos=[opt_in_tp], + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/if.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/if.py new file mode 100644 index 0000000000000000000000000000000000000000..684842983030c6db43477b32a2985313927d7407 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/if.py @@ -0,0 +1,210 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def compute_if_outputs(x, cond): # type: ignore + if cond: + return [] + else: + return x + + +class If(Base): + @staticmethod + def export_if() -> None: + # Given a bool scalar input cond. + # return constant tensor x if cond is True, otherwise return constant tensor y. + + then_out = onnx.helper.make_tensor_value_info( + "then_out", onnx.TensorProto.FLOAT, [5] + ) + else_out = onnx.helper.make_tensor_value_info( + "else_out", onnx.TensorProto.FLOAT, [5] + ) + + x = np.array([1, 2, 3, 4, 5]).astype(np.float32) + y = np.array([5, 4, 3, 2, 1]).astype(np.float32) + + then_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["then_out"], + value=onnx.numpy_helper.from_array(x), + ) + + else_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["else_out"], + value=onnx.numpy_helper.from_array(y), + ) + + then_body = onnx.helper.make_graph( + [then_const_node], "then_body", [], [then_out] + ) + + else_body = onnx.helper.make_graph( + [else_const_node], "else_body", [], [else_out] + ) + + if_node = onnx.helper.make_node( + "If", + inputs=["cond"], + outputs=["res"], + then_branch=then_body, + else_branch=else_body, + ) + + cond = np.array(1).astype(bool) + res = x if cond else y + expect( + if_node, + inputs=[cond], + outputs=[res], + name="test_if", + opset_imports=[onnx.helper.make_opsetid("", 11)], + ) + + @staticmethod + def export_if_seq() -> None: + # Given a bool scalar input cond. + # return constant sequence x if cond is True, otherwise return constant sequence y. + + then_out = onnx.helper.make_tensor_sequence_value_info( + "then_out", onnx.TensorProto.FLOAT, shape=[5] + ) + else_out = onnx.helper.make_tensor_sequence_value_info( + "else_out", onnx.TensorProto.FLOAT, shape=[5] + ) + + x = [np.array([1, 2, 3, 4, 5]).astype(np.float32)] + y = [np.array([5, 4, 3, 2, 1]).astype(np.float32)] + + then_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["x"], + value=onnx.numpy_helper.from_array(x[0]), + ) + + then_seq_node = onnx.helper.make_node( + "SequenceConstruct", inputs=["x"], outputs=["then_out"] + ) + + else_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["y"], + value=onnx.numpy_helper.from_array(y[0]), + ) + + else_seq_node = onnx.helper.make_node( + "SequenceConstruct", inputs=["y"], outputs=["else_out"] + ) + + then_body = onnx.helper.make_graph( + [then_const_node, then_seq_node], "then_body", [], [then_out] + ) + + else_body = onnx.helper.make_graph( + [else_const_node, else_seq_node], "else_body", [], [else_out] + ) + + if_node = onnx.helper.make_node( + "If", + inputs=["cond"], + outputs=["res"], + then_branch=then_body, + else_branch=else_body, + ) + + cond = np.array(1).astype(bool) + res = x if cond else y + expect( + if_node, + inputs=[cond], + outputs=[res], + name="test_if_seq", + opset_imports=[onnx.helper.make_opsetid("", 13)], + ) + + @staticmethod + def export_if_optional() -> None: + # Given a bool scalar input cond, return an empty optional sequence of + # tensor if True, return an optional sequence with value x + # (the input optional sequence) otherwise. + + ten_in_tp = onnx.helper.make_tensor_type_proto( + onnx.TensorProto.FLOAT, shape=[5] + ) + seq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp) + + then_out_tensor_tp = onnx.helper.make_tensor_type_proto( + onnx.TensorProto.FLOAT, shape=[5] + ) + then_out_seq_tp = onnx.helper.make_sequence_type_proto(then_out_tensor_tp) + then_out_opt_tp = onnx.helper.make_optional_type_proto(then_out_seq_tp) + then_out = onnx.helper.make_value_info("optional_empty", then_out_opt_tp) + + else_out_tensor_tp = onnx.helper.make_tensor_type_proto( + onnx.TensorProto.FLOAT, shape=[5] + ) + else_out_seq_tp = onnx.helper.make_sequence_type_proto(else_out_tensor_tp) + else_out_opt_tp = onnx.helper.make_optional_type_proto(else_out_seq_tp) + else_out = onnx.helper.make_value_info("else_opt", else_out_opt_tp) + + x = [np.array([1, 2, 3, 4, 5]).astype(np.float32)] + cond = np.array(0).astype(bool) + res = compute_if_outputs(x, cond) + + opt_empty_in = onnx.helper.make_node( + "Optional", inputs=[], outputs=["optional_empty"], type=seq_in_tp + ) + + then_body = onnx.helper.make_graph([opt_empty_in], "then_body", [], [then_out]) + + else_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["x"], + value=onnx.numpy_helper.from_array(x[0]), + ) + + else_seq_node = onnx.helper.make_node( + "SequenceConstruct", inputs=["x"], outputs=["else_seq"] + ) + + else_optional_seq_node = onnx.helper.make_node( + "Optional", inputs=["else_seq"], outputs=["else_opt"] + ) + + else_body = onnx.helper.make_graph( + [else_const_node, else_seq_node, else_optional_seq_node], + "else_body", + [], + [else_out], + ) + + if_node = onnx.helper.make_node( + "If", + inputs=["cond"], + outputs=["sequence"], + then_branch=then_body, + else_branch=else_body, + ) + + expect( + if_node, + inputs=[cond], + outputs=[res], + name="test_if_opt", + output_type_protos=[else_out_opt_tp], + opset_imports=[onnx.helper.make_opsetid("", 16)], + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/image_decoder.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/image_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..56ecfdb0b7f0139f7aaa291eff9cb81deeefbcf0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/image_decoder.py @@ -0,0 +1,249 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import io + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import _image_decoder_data, expect + + +def generate_checkerboard(width: int, height: int, square_size: int) -> np.ndarray: + # Create an empty RGB image + image = np.zeros((height, width, 3), dtype=np.uint8) + + # Calculate the number of squares in each dimension + num_squares_x = width // square_size + num_squares_y = height // square_size + + # Generate a random color for each square + colors = np.random.randint( + 0, 256, size=(num_squares_y, num_squares_x, 3), dtype=np.uint8 + ) + + # Iterate over each square + for i in range(num_squares_y): + for j in range(num_squares_x): + # Calculate the position of the current square + x = j * square_size + y = i * square_size + + # Get the color for the current square + color = colors[i, j] + + # Fill the square with the corresponding color + image[y : y + square_size, x : x + square_size, :] = color + + return image + + +def _generate_test_data( + format_: str, + frozen_data: _image_decoder_data.ImageDecoderData, + pixel_format: str = "RGB", + height: int = 32, + width: int = 32, + tile_sz: int = 5, +) -> tuple[np.ndarray, np.ndarray]: + try: + import PIL.Image + except ImportError: + # Since pillow is not installed to generate test data for the ImageDecoder operator + # directly use the frozen data from _image_decoder_data.py. + return frozen_data.data, frozen_data.output + np.random.seed(12345) + image = generate_checkerboard(height, width, tile_sz) + image_pil = PIL.Image.fromarray(image) + with io.BytesIO() as f: + image_pil.save(f, format=format_) + data = f.getvalue() + data_array = np.frombuffer(data, dtype=np.uint8) + if pixel_format == "BGR": + output_pil = PIL.Image.open(io.BytesIO(data)) + output = np.array(output_pil)[:, :, ::-1] + elif pixel_format == "RGB": + output_pil = PIL.Image.open(io.BytesIO(data)) + output = np.array(output_pil) + elif pixel_format == "Grayscale": + output_pil = PIL.Image.open(io.BytesIO(data)).convert("L") + output = np.array(output_pil)[:, :, np.newaxis] + else: + raise ValueError(f"Unsupported pixel format: {pixel_format}") + return data_array, output + + +class ImageDecoder(Base): + @staticmethod + def export_image_decoder_decode_jpeg_rgb() -> None: + node = onnx.helper.make_node( + "ImageDecoder", + inputs=["data"], + outputs=["output"], + pixel_format="RGB", + ) + + data, output = _generate_test_data( + "jpeg", _image_decoder_data.image_decoder_decode_jpeg_rgb, "RGB" + ) + expect( + node, + inputs=[data], + outputs=[output], + name="test_image_decoder_decode_jpeg_rgb", + ) + + @staticmethod + def export_image_decoder_decode_jpeg_grayscale() -> None: + node = onnx.helper.make_node( + "ImageDecoder", + inputs=["data"], + outputs=["output"], + pixel_format="Grayscale", + ) + + data, output = _generate_test_data( + "jpeg", _image_decoder_data.image_decoder_decode_jpeg_grayscale, "Grayscale" + ) + expect( + node, + inputs=[data], + outputs=[output], + name="test_image_decoder_decode_jpeg_grayscale", + ) + + @staticmethod + def export_image_decoder_decode_jpeg_bgr() -> None: + node = onnx.helper.make_node( + "ImageDecoder", + inputs=["data"], + outputs=["output"], + pixel_format="BGR", + ) + + data, output = _generate_test_data( + "jpeg", _image_decoder_data.image_decoder_decode_jpeg_bgr, "BGR" + ) + expect( + node, + inputs=[data], + outputs=[output], + name="test_image_decoder_decode_jpeg_bgr", + ) + + @staticmethod + def export_image_decoder_decode_jpeg2k_rgb() -> None: + node = onnx.helper.make_node( + "ImageDecoder", + inputs=["data"], + outputs=["output"], + pixel_format="RGB", + ) + + data, output = _generate_test_data( + "jpeg2000", _image_decoder_data.image_decoder_decode_jpeg2k_rgb, "RGB" + ) + expect( + node, + inputs=[data], + outputs=[output], + name="test_image_decoder_decode_jpeg2k_rgb", + ) + + @staticmethod + def export_image_decoder_decode_bmp_rgb() -> None: + node = onnx.helper.make_node( + "ImageDecoder", + inputs=["data"], + outputs=["output"], + pixel_format="RGB", + ) + + data, output = _generate_test_data( + "bmp", _image_decoder_data.image_decoder_decode_bmp_rgb, "RGB" + ) + expect( + node, + inputs=[data], + outputs=[output], + name="test_image_decoder_decode_bmp_rgb", + ) + + @staticmethod + def export_image_decoder_decode_png_rgb() -> None: + node = onnx.helper.make_node( + "ImageDecoder", + inputs=["data"], + outputs=["output"], + pixel_format="RGB", + ) + + data, output = _generate_test_data( + "png", _image_decoder_data.image_decoder_decode_png_rgb, "RGB" + ) + expect( + node, + inputs=[data], + outputs=[output], + name="test_image_decoder_decode_png_rgb", + ) + + @staticmethod + def export_image_decoder_decode_tiff_rgb() -> None: + node = onnx.helper.make_node( + "ImageDecoder", + inputs=["data"], + outputs=["output"], + pixel_format="RGB", + ) + + data, output = _generate_test_data( + "tiff", _image_decoder_data.image_decoder_decode_tiff_rgb, "RGB" + ) + expect( + node, + inputs=[data], + outputs=[output], + name="test_image_decoder_decode_tiff_rgb", + ) + + @staticmethod + def export_image_decoder_decode_webp_rgb() -> None: + node = onnx.helper.make_node( + "ImageDecoder", + inputs=["data"], + outputs=["output"], + pixel_format="RGB", + ) + + data, output = _generate_test_data( + "webp", _image_decoder_data.image_decoder_decode_webp_rgb, "RGB" + ) + expect( + node, + inputs=[data], + outputs=[output], + name="test_image_decoder_decode_webp_rgb", + ) + + @staticmethod + def export_image_decoder_decode_pnm_rgb() -> None: + node = onnx.helper.make_node( + "ImageDecoder", + inputs=["data"], + outputs=["output"], + pixel_format="RGB", + ) + + data, output = _generate_test_data( + "ppm", _image_decoder_data.image_decoder_decode_pnm_rgb, "RGB" + ) + expect( + node, + inputs=[data], + outputs=[output], + name="test_image_decoder_decode_pnm_rgb", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/instancenorm.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/instancenorm.py new file mode 100644 index 0000000000000000000000000000000000000000..f335dbebb094ec216a101c677a63eb5f4774e0c7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/instancenorm.py @@ -0,0 +1,55 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class InstanceNormalization(Base): + @staticmethod + def export() -> None: + def _instancenorm_test_mode(x, s, bias, epsilon=1e-5): # type: ignore + dims_x = len(x.shape) + axis = tuple(range(2, dims_x)) + mean = np.mean(x, axis=axis, keepdims=True) + var = np.var(x, axis=axis, keepdims=True) + dim_ones = (1,) * (dims_x - 2) + s = s.reshape(-1, *dim_ones) + bias = bias.reshape(-1, *dim_ones) + return s * (x - mean) / np.sqrt(var + epsilon) + bias + + # input size: (1, 2, 1, 3) + x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) + s = np.array([1.0, 1.5]).astype(np.float32) + bias = np.array([0, 1]).astype(np.float32) + y = _instancenorm_test_mode(x, s, bias).astype(np.float32) + + node = onnx.helper.make_node( + "InstanceNormalization", + inputs=["x", "s", "bias"], + outputs=["y"], + ) + + # output size: (1, 2, 1, 3) + expect(node, inputs=[x, s, bias], outputs=[y], name="test_instancenorm_example") + + # input size: (2, 3, 4, 5) + x = np.random.randn(2, 3, 4, 5).astype(np.float32) + s = np.random.randn(3).astype(np.float32) + bias = np.random.randn(3).astype(np.float32) + epsilon = 1e-2 + y = _instancenorm_test_mode(x, s, bias, epsilon).astype(np.float32) + + node = onnx.helper.make_node( + "InstanceNormalization", + inputs=["x", "s", "bias"], + outputs=["y"], + epsilon=epsilon, + ) + + # output size: (2, 3, 4, 5) + expect(node, inputs=[x, s, bias], outputs=[y], name="test_instancenorm_epsilon") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/isinf.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/isinf.py new file mode 100644 index 0000000000000000000000000000000000000000..addbc656752856f98e72549ea192f40e2c37d72c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/isinf.py @@ -0,0 +1,55 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class IsInf(Base): + @staticmethod + def export_infinity() -> None: + node = onnx.helper.make_node( + "IsInf", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1.2, np.nan, np.inf, 2.8, -np.inf, np.inf], dtype=np.float32) + y = np.isinf(x) + expect(node, inputs=[x], outputs=[y], name="test_isinf") + + @staticmethod + def export_positive_infinity_only() -> None: + node = onnx.helper.make_node( + "IsInf", inputs=["x"], outputs=["y"], detect_negative=0 + ) + + x = np.array([-1.7, np.nan, np.inf, 3.6, -np.inf, np.inf], dtype=np.float32) + y = np.isposinf(x) + expect(node, inputs=[x], outputs=[y], name="test_isinf_positive") + + @staticmethod + def export_negative_infinity_only() -> None: + node = onnx.helper.make_node( + "IsInf", inputs=["x"], outputs=["y"], detect_positive=0 + ) + + x = np.array([-1.7, np.nan, np.inf, -3.6, -np.inf, np.inf], dtype=np.float32) + y = np.isneginf(x) + expect(node, inputs=[x], outputs=[y], name="test_isinf_negative") + + @staticmethod + def export_infinity_float16() -> None: + node = onnx.helper.make_node( + "IsInf", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1.2, np.nan, np.inf, 2.8, -np.inf, np.inf], dtype=np.float16) + y = np.isinf(x) + expect(node, inputs=[x], outputs=[y], name="test_isinf_float16") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/isnan.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/isnan.py new file mode 100644 index 0000000000000000000000000000000000000000..af8ba6e1a19fa48f4a607a13adbbcbc1dc6ebcd0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/isnan.py @@ -0,0 +1,35 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class IsNaN(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "IsNaN", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1.2, np.nan, np.inf, 2.8, -np.inf, np.inf], dtype=np.float32) + y = np.isnan(x) + expect(node, inputs=[x], outputs=[y], name="test_isnan") + + @staticmethod + def export_float16() -> None: + node = onnx.helper.make_node( + "IsNaN", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([-1.2, np.nan, np.inf, 2.8, -np.inf, np.inf], dtype=np.float16) + y = np.isnan(x) + expect(node, inputs=[x], outputs=[y], name="test_isnan_float16") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/layernormalization.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/layernormalization.py new file mode 100644 index 0000000000000000000000000000000000000000..52b250a2ee67f7c9040c1c2c479e22020fe8c7e9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/layernormalization.py @@ -0,0 +1,177 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +# Layer normalization's reference implementation +def _layer_normalization(X, W, B, axis=-1, epsilon=1e-5): # type: ignore + X_shape = X.shape + X_rank = len(X_shape) + if axis < 0: + # If axis = -1 and rank of X is 4, + # the axis is changed to -1 + 4 = 3, + # which means the last axis. + axis = axis + X_rank + unsqueezed_rank = X_rank - axis + reduction_shape = X_shape[0:axis] + (1,) * unsqueezed_rank + + # Parameter used to convert N-D tensor layer + # normalization to equivalent 2-D matirx operations. + row_number = 1 + col_number = 1 + for i in range(X_rank): + if i < axis: + row_number *= X_shape[i] + else: + col_number *= X_shape[i] + + # After reshaping input tensor X into a matrix, + # layer normalization is equivalent to conducting + # standardization on each column vector (s.t. each + # column has zero mean and unit variance). + x_mat = np.reshape(X, (row_number, col_number)) + # This computes mean for every x_mat's column. + x_mean = np.sum(x_mat, axis=1, keepdims=True) / col_number + x_diff = x_mat - x_mean + x_squared_diff = x_diff * x_diff + # This computes variance for every x_mat's column. + variance = np.sum(x_squared_diff, axis=1, keepdims=True) / col_number + variance_eps = variance + epsilon + std_dev = np.sqrt(variance_eps) + inv_std_dev = np.reciprocal(std_dev) + # Standardization step. y_mat is zero-mean and unit-variance. + y_mat = x_diff * inv_std_dev + # Apply affine transform on normalization outcome. + # W is linear coefficient while B is bias. + Y = np.reshape(y_mat, X_shape) * W + B + # Matrix-level operations' outputs should be reshaped + # to compensate the initial tensor-to-matrix reshape. + X_mean = np.reshape(x_mean, reduction_shape) + X_inv_std_dev = np.reshape(inv_std_dev, reduction_shape) + + return Y, X_mean, X_inv_std_dev + + +def calculate_normalized_shape(X_shape, axis): # type: ignore + X_rank = len(X_shape) + if axis < 0: + axis = axis + X_rank + return X_shape[axis:] + + +class LayerNormalization(Base): + @staticmethod + def export() -> None: + X = np.random.randn(2, 3, 4, 5).astype(np.float32) + + def case(axis: int) -> None: + normalized_shape = calculate_normalized_shape(X.shape, axis) + W = np.random.randn(*normalized_shape).astype(np.float32) + B = np.random.randn(*normalized_shape).astype(np.float32) + Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis) + + node = onnx.helper.make_node( + "LayerNormalization", + inputs=["X", "W", "B"], + outputs=["Y", "Mean", "InvStdDev"], + axis=axis, + ) + + if axis < 0: + name = f"test_layer_normalization_4d_axis_negative_{-axis}" + else: + name = f"test_layer_normalization_4d_axis{axis}" + + expect(node, inputs=[X, W, B], outputs=[Y, mean, inv_std_dev], name=name) + + for i in range(len(X.shape)): + case(i) + case(i - len(X.shape)) + + @staticmethod + def export_default_axis() -> None: + X = np.random.randn(2, 3, 4, 5).astype(np.float32) + + # Default axis in LayerNormalization is -1. + normalized_shape = calculate_normalized_shape(X.shape, -1) + W = np.random.randn(*normalized_shape).astype(np.float32) + B = np.random.randn(*normalized_shape).astype(np.float32) + # Axis is default to -1 in the reference implementation. + Y, mean, inv_std_dev = _layer_normalization(X, W, B) + + # Not specifying axis attribute means -1. + node = onnx.helper.make_node( + "LayerNormalization", + inputs=["X", "W", "B"], + outputs=["Y", "Mean", "InvStdDev"], + ) + + expect( + node, + inputs=[X, W, B], + outputs=[Y, mean, inv_std_dev], + name="test_layer_normalization_default_axis", + ) + + @staticmethod + def export2d() -> None: + X = np.random.randn(3, 4).astype(np.float32) + + def case(axis: int) -> None: + normalized_shape = calculate_normalized_shape(X.shape, axis) + W = np.random.randn(*normalized_shape).astype(np.float32) + B = np.random.randn(*normalized_shape).astype(np.float32) + Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis=axis) + + node = onnx.helper.make_node( + "LayerNormalization", + inputs=["X", "W", "B"], + outputs=["Y", "Mean", "InvStdDev"], + axis=axis, + ) + + if axis < 0: + name = f"test_layer_normalization_2d_axis_negative_{-axis}" + else: + name = f"test_layer_normalization_2d_axis{axis}" + + expect(node, inputs=[X, W, B], outputs=[Y, mean, inv_std_dev], name=name) + + for i in range(len(X.shape)): + case(i) + case(i - len(X.shape)) + + @staticmethod + def export3d_epsilon() -> None: + epsilon = 1e-1 + X = np.random.randn(2, 3, 5).astype(np.float32) + + def case(axis: int) -> None: + normalized_shape = calculate_normalized_shape(X.shape, axis) + W = np.random.randn(*normalized_shape).astype(np.float32) + B = np.random.randn(*normalized_shape).astype(np.float32) + Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis, epsilon) + node = onnx.helper.make_node( + "LayerNormalization", + inputs=["X", "W", "B"], + outputs=["Y", "Mean", "InvStdDev"], + axis=axis, + epsilon=epsilon, + ) + + if axis < 0: + name = f"test_layer_normalization_3d_axis_negative_{-axis}_epsilon" + else: + name = f"test_layer_normalization_3d_axis{axis}_epsilon" + + expect(node, inputs=[X, W, B], outputs=[Y, mean, inv_std_dev], name=name) + + for i in range(len(X.shape)): + case(i) + case(i - len(X.shape)) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/leakyrelu.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/leakyrelu.py new file mode 100644 index 0000000000000000000000000000000000000000..ca13351e6eb2a4182b600d56a388d435ba2f5ac3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/leakyrelu.py @@ -0,0 +1,38 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class LeakyRelu(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "LeakyRelu", inputs=["x"], outputs=["y"], alpha=0.1 + ) + + x = np.array([-1, 0, 1]).astype(np.float32) + # expected output [-0.1, 0., 1.] + y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1 + expect(node, inputs=[x], outputs=[y], name="test_leakyrelu_example") + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1 + expect(node, inputs=[x], outputs=[y], name="test_leakyrelu") + + @staticmethod + def export_leakyrelu_default() -> None: + default_alpha = 0.01 + node = onnx.helper.make_node( + "LeakyRelu", + inputs=["x"], + outputs=["y"], + ) + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * default_alpha + expect(node, inputs=[x], outputs=[y], name="test_leakyrelu_default") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/less.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/less.py new file mode 100644 index 0000000000000000000000000000000000000000..fe57ec139544f1b21c17f5a65787aba3781c5860 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/less.py @@ -0,0 +1,37 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Less(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Less", + inputs=["x", "y"], + outputs=["less"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(3, 4, 5).astype(np.float32) + z = np.less(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_less") + + @staticmethod + def export_less_broadcast() -> None: + node = onnx.helper.make_node( + "Less", + inputs=["x", "y"], + outputs=["less"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(5).astype(np.float32) + z = np.less(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_less_bcast") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/less_equal.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/less_equal.py new file mode 100644 index 0000000000000000000000000000000000000000..3852fec02c090ea3b7432511dd1c10c3614d2206 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/less_equal.py @@ -0,0 +1,37 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Less(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "LessOrEqual", + inputs=["x", "y"], + outputs=["less_equal"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(3, 4, 5).astype(np.float32) + z = np.less_equal(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_less_equal") + + @staticmethod + def export_less_broadcast() -> None: + node = onnx.helper.make_node( + "LessOrEqual", + inputs=["x", "y"], + outputs=["less_equal"], + ) + + x = np.random.randn(3, 4, 5).astype(np.float32) + y = np.random.randn(5).astype(np.float32) + z = np.less_equal(x, y) + expect(node, inputs=[x, y], outputs=[z], name="test_less_equal_bcast") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/log.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/log.py new file mode 100644 index 0000000000000000000000000000000000000000..3570517a83e1d79ae9053ee2ead2f3e812d8f651 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/log.py @@ -0,0 +1,27 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Log(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "Log", + inputs=["x"], + outputs=["y"], + ) + + x = np.array([1, 10]).astype(np.float32) + y = np.log(x) # expected output [0., 2.30258512] + expect(node, inputs=[x], outputs=[y], name="test_log_example") + + x = np.exp(np.random.randn(3, 4, 5).astype(np.float32)) + y = np.log(x) + expect(node, inputs=[x], outputs=[y], name="test_log") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/logsoftmax.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/logsoftmax.py new file mode 100644 index 0000000000000000000000000000000000000000..29e8d0a7e3a42afc5d490e99c5a138687154af42 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/logsoftmax.py @@ -0,0 +1,91 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def logsoftmax(x: np.ndarray, axis: int = -1) -> np.ndarray: + x_max = np.max(x, axis=axis, keepdims=True) + tmp = np.exp(x - x_max) + s = np.sum(tmp, axis=axis, keepdims=True) + return (x - x_max) - np.log(s) + + +class LogSoftmax(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "LogSoftmax", + inputs=["x"], + outputs=["y"], + ) + x = np.array([[-1, 0, 1]]).astype(np.float32) + # expected output + # [[-2.4076061 -1.407606 -0.407606 ]] + y = logsoftmax(x) + expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_example_1") + + @staticmethod + def export_logsoftmax_axis() -> None: + x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32) + # expected output + # [[-3.4401896 -2.4401896 -1.4401896 -0.44018966] + # [-3.4401896 -2.4401896 -1.4401896 -0.44018966]] + y = logsoftmax(x) + + node = onnx.helper.make_node( + "LogSoftmax", + inputs=["x"], + outputs=["y"], + ) + expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_large_number") + + x = np.abs(np.random.randn(3, 4, 5).astype(np.float32)) + node = onnx.helper.make_node( + "LogSoftmax", + inputs=["x"], + outputs=["y"], + axis=0, + ) + y = logsoftmax(x, axis=0) + expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_axis_0") + + node = onnx.helper.make_node( + "LogSoftmax", + inputs=["x"], + outputs=["y"], + axis=1, + ) + y = logsoftmax(x, axis=1) + expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_axis_1") + + node = onnx.helper.make_node( + "LogSoftmax", + inputs=["x"], + outputs=["y"], + axis=2, + ) + y = logsoftmax(x, axis=2) + expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_axis_2") + + node = onnx.helper.make_node( + "LogSoftmax", + inputs=["x"], + outputs=["y"], + axis=-1, + ) + y = logsoftmax(x, axis=-1) + expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_negative_axis") + + # default axis is -1 + node = onnx.helper.make_node( + "LogSoftmax", + inputs=["x"], + outputs=["y"], + ) + expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_default_axis") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/loop.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/loop.py new file mode 100644 index 0000000000000000000000000000000000000000..a4486a5cee269eb4bea013b294c66fe97130c274 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/loop.py @@ -0,0 +1,458 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import Any, List + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +def compute_loop_outputs(x, seq, trip_count): # type: ignore + for i in range(trip_count): + if seq is None: + seq = [] + seq += [x[: int(i + 1)]] + return seq + + +class Loop(Base): + @staticmethod + def export_loop_11() -> None: + # Given a tensor x of values [x1, ..., xN], and initial tensor y + # sum up its elements using a scan + # returning the final state (y+x1+x2+...+xN) as well the scan_output + # [y+x1, y+x1+x2, ..., y+x1+x2+...+xN] + + y_in = onnx.helper.make_tensor_value_info("y_in", onnx.TensorProto.FLOAT, [1]) + y_out = onnx.helper.make_tensor_value_info("y_out", onnx.TensorProto.FLOAT, [1]) + scan_out = onnx.helper.make_tensor_value_info( + "scan_out", onnx.TensorProto.FLOAT, [1] + ) + cond_in = onnx.helper.make_tensor_value_info( + "cond_in", onnx.TensorProto.BOOL, [] + ) + cond_out = onnx.helper.make_tensor_value_info( + "cond_out", onnx.TensorProto.BOOL, [] + ) + iter_count = onnx.helper.make_tensor_value_info( + "iter_count", onnx.TensorProto.INT64, [] + ) + + x = np.array([1, 2, 3, 4, 5]).astype(np.float32) + y = np.array([-2]).astype(np.float32) + + x_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["x"], + value=onnx.helper.make_tensor( + name="const_tensor_x", + data_type=onnx.TensorProto.FLOAT, + dims=x.shape, + vals=x.flatten().astype(float), + ), + ) + + one_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["one"], + value=onnx.helper.make_tensor( + name="const_tensor_one", + data_type=onnx.TensorProto.INT64, + dims=(), + vals=[1], + ), + ) + + i_add_node = onnx.helper.make_node( + "Add", inputs=["iter_count", "one"], outputs=["end"] + ) + + start_unsqueeze_node = onnx.helper.make_node( + "Unsqueeze", inputs=["iter_count"], outputs=["slice_start"], axes=[0] + ) + + end_unsqueeze_node = onnx.helper.make_node( + "Unsqueeze", inputs=["end"], outputs=["slice_end"], axes=[0] + ) + + slice_node = onnx.helper.make_node( + "Slice", inputs=["x", "slice_start", "slice_end"], outputs=["slice_out"] + ) + + y_add_node = onnx.helper.make_node( + "Add", inputs=["y_in", "slice_out"], outputs=["y_out"] + ) + + identity_node = onnx.helper.make_node( + "Identity", inputs=["cond_in"], outputs=["cond_out"] + ) + + scan_identity_node = onnx.helper.make_node( + "Identity", inputs=["y_out"], outputs=["scan_out"] + ) + + loop_body = onnx.helper.make_graph( + [ + identity_node, + x_const_node, + one_const_node, + i_add_node, + start_unsqueeze_node, + end_unsqueeze_node, + slice_node, + y_add_node, + scan_identity_node, + ], + "loop_body", + [iter_count, cond_in, y_in], + [cond_out, y_out, scan_out], + ) + + node = onnx.helper.make_node( + "Loop", + inputs=["trip_count", "cond", "y"], + outputs=["res_y", "res_scan"], + body=loop_body, + ) + + trip_count = np.array(5).astype(np.int64) + res_y = np.array([13]).astype(np.float32) + cond = np.array(1).astype(bool) + res_scan = np.array([-1, 1, 4, 8, 13]).astype(np.float32).reshape((5, 1)) + expect( + node, + inputs=[trip_count, cond, y], + outputs=[res_y, res_scan], + name="test_loop11", + opset_imports=[onnx.helper.make_opsetid("", 11)], + ) + + @staticmethod + def export_loop_13() -> None: + # Given a tensor x of values [x1, ..., xN], + # Return a sequence of tensors of + # [[x1], [x1, x2], ..., [x1, ..., xN]] + + seq_in = onnx.helper.make_tensor_sequence_value_info( + "seq_in", onnx.TensorProto.FLOAT, None + ) + seq_out = onnx.helper.make_tensor_sequence_value_info( + "seq_out", onnx.TensorProto.FLOAT, None + ) + cond_in = onnx.helper.make_tensor_value_info( + "cond_in", onnx.TensorProto.BOOL, [] + ) + cond_out = onnx.helper.make_tensor_value_info( + "cond_out", onnx.TensorProto.BOOL, [] + ) + iter_count = onnx.helper.make_tensor_value_info( + "iter_count", onnx.TensorProto.INT64, [] + ) + + x = np.array([1, 2, 3, 4, 5]).astype(np.float32) + + x_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["x"], + value=onnx.helper.make_tensor( + name="const_tensor_x", + data_type=onnx.TensorProto.FLOAT, + dims=x.shape, + vals=x.flatten().astype(float), + ), + ) + + one_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["one"], + value=onnx.helper.make_tensor( + name="const_tensor_one", + data_type=onnx.TensorProto.INT64, + dims=(), + vals=[1], + ), + ) + + zero_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["slice_start"], + value=onnx.helper.make_tensor( + name="const_tensor_zero", + data_type=onnx.TensorProto.INT64, + dims=(1,), + vals=[0], + ), + ) + + axes_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["axes"], + value=onnx.helper.make_tensor( + name="const_tensor_axes", + data_type=onnx.TensorProto.INT64, + dims=(), + vals=[0], + ), + ) + + add_node = onnx.helper.make_node( + "Add", inputs=["iter_count", "one"], outputs=["end"] + ) + + end_unsqueeze_node = onnx.helper.make_node( + "Unsqueeze", inputs=["end", "axes"], outputs=["slice_end"] + ) + + slice_node = onnx.helper.make_node( + "Slice", inputs=["x", "slice_start", "slice_end"], outputs=["slice_out"] + ) + + insert_node = onnx.helper.make_node( + "SequenceInsert", inputs=["seq_in", "slice_out"], outputs=["seq_out"] + ) + + identity_node = onnx.helper.make_node( + "Identity", inputs=["cond_in"], outputs=["cond_out"] + ) + + loop_body = onnx.helper.make_graph( + [ + identity_node, + x_const_node, + one_const_node, + zero_const_node, + add_node, + axes_node, + end_unsqueeze_node, + slice_node, + insert_node, + ], + "loop_body", + [iter_count, cond_in, seq_in], + [cond_out, seq_out], + ) + + node = onnx.helper.make_node( + "Loop", + inputs=["trip_count", "cond", "seq_empty"], + outputs=["seq_res"], + body=loop_body, + ) + + trip_count = np.array(5).astype(np.int64) + seq_empty: List[Any] = [] + seq_res = [x[: int(i)] for i in x] + cond = np.array(1).astype(bool) + expect( + node, + inputs=[trip_count, cond, seq_empty], + outputs=[seq_res], + name="test_loop13_seq", + opset_imports=[onnx.helper.make_opsetid("", 13)], + input_type_protos=[ + onnx.helper.make_tensor_type_proto( + onnx.TensorProto.INT64, trip_count.shape + ), + onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape), + onnx.helper.make_sequence_type_proto( + onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, []) + ), + ], + ) + + @staticmethod + def export_loop_16_none() -> None: + # Given a tensor sequence of values [x1, ..., xN], and an initial optional sequence of tensors [x0], + # Return a concatenated sequence of tensors of + # [x0, [x1], [x1, x2], ..., [x1, ..., xN]] + + ten_in_tp = onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, []) + seq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp) + opt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp) + opt_in = onnx.helper.make_value_info("opt_seq_in", opt_in_tp) + seq_out = onnx.helper.make_tensor_sequence_value_info( + "seq_out", onnx.TensorProto.FLOAT, [] + ) + cond_in = onnx.helper.make_tensor_value_info( + "cond_in", onnx.TensorProto.BOOL, [] + ) + cond_out = onnx.helper.make_tensor_value_info( + "cond_out", onnx.TensorProto.BOOL, [] + ) + iter_count = onnx.helper.make_tensor_value_info( + "iter_count", onnx.TensorProto.INT64, [] + ) + + x0 = np.array(0).astype(np.float32) + x = np.array([1, 2, 3, 4, 5]).astype(np.float32) + + optional_has_elem_node = onnx.helper.make_node( + "OptionalHasElement", inputs=["opt_seq_in"], outputs=["optional_has_elem"] + ) + + optional_is_none = onnx.helper.make_node( + "Not", inputs=["optional_has_elem"], outputs=["optional_is_none"] + ) + + optional_get_elem = onnx.helper.make_node( + "OptionalGetElement", inputs=["opt_seq_in"], outputs=["seq_in"] + ) + + constant_in = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["constant_in"], + value=onnx.helper.make_tensor( + name="const_tensor", data_type=onnx.TensorProto.FLOAT, dims=(), vals=[0] + ), + ) + + seq_const_in = onnx.helper.make_node( + "SequenceConstruct", inputs=["constant_in"], outputs=["init_seq_in"] + ) + + then_seq_out = onnx.helper.make_tensor_sequence_value_info( + "init_seq_in", onnx.TensorProto.FLOAT, [] + ) + then_body = onnx.helper.make_graph( + [constant_in, seq_const_in], "then_body", [], [then_seq_out] + ) + + else_seq_out = onnx.helper.make_tensor_sequence_value_info( + "seq_in", onnx.TensorProto.FLOAT, [] + ) + else_body = onnx.helper.make_graph( + [optional_get_elem], "else_body", [], [else_seq_out] + ) + + if_node = onnx.helper.make_node( + "If", + inputs=["optional_is_none"], + outputs=["sequence"], + then_branch=then_body, + else_branch=else_body, + ) + + x_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["x"], + value=onnx.helper.make_tensor( + name="const_tensor_x", + data_type=onnx.TensorProto.FLOAT, + dims=x.shape, + vals=x.flatten().astype(float), + ), + ) + + one_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["one"], + value=onnx.helper.make_tensor( + name="const_tensor_one", + data_type=onnx.TensorProto.INT64, + dims=(), + vals=[1], + ), + ) + + zero_const_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["slice_start"], + value=onnx.helper.make_tensor( + name="const_tensor_zero", + data_type=onnx.TensorProto.INT64, + dims=(1,), + vals=[0], + ), + ) + + axes_node = onnx.helper.make_node( + "Constant", + inputs=[], + outputs=["axes"], + value=onnx.helper.make_tensor( + name="const_tensor_axes", + data_type=onnx.TensorProto.INT64, + dims=(), + vals=[0], + ), + ) + + add_node = onnx.helper.make_node( + "Add", inputs=["iter_count", "one"], outputs=["end"] + ) + + end_unsqueeze_node = onnx.helper.make_node( + "Unsqueeze", inputs=["end", "axes"], outputs=["slice_end"] + ) + + slice_node = onnx.helper.make_node( + "Slice", inputs=["x", "slice_start", "slice_end"], outputs=["slice_out"] + ) + + insert_node = onnx.helper.make_node( + "SequenceInsert", inputs=["sequence", "slice_out"], outputs=["seq_out"] + ) + + identity_node = onnx.helper.make_node( + "Identity", inputs=["cond_in"], outputs=["cond_out"] + ) + + loop_body = onnx.helper.make_graph( + [ + identity_node, + optional_has_elem_node, + optional_is_none, + if_node, + x_const_node, + one_const_node, + zero_const_node, + add_node, + axes_node, + end_unsqueeze_node, + slice_node, + insert_node, + ], + "loop_body", + [iter_count, cond_in, opt_in], + [cond_out, seq_out], + ) + + node = onnx.helper.make_node( + "Loop", + inputs=["trip_count", "cond", "opt_seq"], + outputs=["seq_res"], + body=loop_body, + ) + + trip_count = np.array(5).astype(np.int64) + cond = np.array(1).astype(bool) + seq_res = compute_loop_outputs(x, [x0], trip_count) + opt_seq_in: List[Any] = [x0] + expect( + node, + inputs=[trip_count, cond, opt_seq_in], + outputs=[seq_res], + name="test_loop16_seq_none", + opset_imports=[onnx.helper.make_opsetid("", 16)], + input_type_protos=[ + onnx.helper.make_tensor_type_proto( + onnx.TensorProto.INT64, trip_count.shape + ), + onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape), + opt_in_tp, + ], + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/lppool.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/lppool.py new file mode 100644 index 0000000000000000000000000000000000000000..08da57cbe2fdf4747902909ea4e817f5e5919b5d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/lppool.py @@ -0,0 +1,278 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.reference.ops.op_pool_common import ( + get_output_shape_auto_pad, + get_output_shape_explicit_padding, + get_pad_shape, + pool, +) + + +class LpPool(Base): + @staticmethod + def export_lppool_1d_default() -> None: + """input_shape: [1, 3, 32] + output_shape: [1, 3, 31] + """ + p = 3 + kernel_shape = [2] + strides = [1] + node = onnx.helper.make_node( + "LpPool", + inputs=["x"], + outputs=["y"], + kernel_shape=kernel_shape, + strides=strides, + p=p, + ) + x = np.random.randn(1, 3, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", p=p) + + expect(node, inputs=[x], outputs=[y], name="test_lppool_1d_default") + + @staticmethod + def export_lppool_2d_default() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 31, 31] + """ + p = 4 + node = onnx.helper.make_node( + "LpPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + p=p, + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = (2, 2) + strides = (1, 1) + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", p=p) + + expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_default") + + @staticmethod + def export_lppool_3d_default() -> None: + """input_shape: [1, 3, 32, 32, 32] + output_shape: [1, 3, 31, 31, 31] + """ + p = 3 + node = onnx.helper.make_node( + "LpPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2, 2], + p=p, + ) + x = np.random.randn(1, 3, 32, 32, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = [2, 2, 2] + strides = [1, 1, 1] + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", p=p) + + expect(node, inputs=[x], outputs=[y], name="test_lppool_3d_default") + + @staticmethod + def export_lppool_2d_same_upper() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 32, 32] + pad_shape: [1, 1] -> [0, 1, 0, 1] by axis + """ + p = 2 + node = onnx.helper.make_node( + "LpPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + auto_pad="SAME_UPPER", + p=p, + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (2, 2) + strides = (1, 1) + out_shape = get_output_shape_auto_pad( + "SAME_UPPER", x_shape[2:], kernel_shape, strides + ) + pad_shape = get_pad_shape( + "SAME_UPPER", x_shape[2:], kernel_shape, strides, out_shape + ) + pad_top = pad_shape[0] // 2 + pad_bottom = pad_shape[0] - pad_top + pad_left = pad_shape[1] // 2 + pad_right = pad_shape[1] - pad_left + padded = np.pad( + x, + ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), + mode="constant", + constant_values=0, + ) + pads = [pad_top, pad_left, pad_bottom, pad_right] + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", pads, p=p) + + expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_same_upper") + + @staticmethod + def export_lppool_2d_same_lower() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 32, 32] + pad_shape: [1, 1] -> [1, 0, 1, 0] by axis + """ + p = 4 + node = onnx.helper.make_node( + "LpPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + auto_pad="SAME_LOWER", + p=p, + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (2, 2) + strides = (1, 1) + out_shape = get_output_shape_auto_pad( + "SAME_LOWER", x_shape[2:], kernel_shape, strides + ) + pad_shape = get_pad_shape( + "SAME_LOWER", x_shape[2:], kernel_shape, strides, out_shape + ) + pad_bottom = pad_shape[0] // 2 + pad_top = pad_shape[0] - pad_bottom + pad_right = pad_shape[1] // 2 + pad_left = pad_shape[1] - pad_right + padded = np.pad( + x, + ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), + mode="constant", + constant_values=0, + ) + pads = [pad_top, pad_left, pad_bottom, pad_right] + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", pads, p=p) + + expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_same_lower") + + @staticmethod + def export_lppool_2d_pads() -> None: + """input_shape: [1, 3, 28, 28] + output_shape: [1, 3, 30, 30] + pad_shape: [4, 4] -> [2, 2, 2, 2] by axis + """ + p = 3 + node = onnx.helper.make_node( + "LpPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[3, 3], + pads=[2, 2, 2, 2], + p=p, + ) + x = np.random.randn(1, 3, 28, 28).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (3, 3) + strides = (1, 1) + pad_bottom = pad_top = pad_right = pad_left = 2 + pads = [pad_top, pad_left, pad_bottom, pad_right] + out_shape, pads = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = np.pad( + x, + ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), + mode="constant", + constant_values=0, + ) + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", pads, p=p) + + expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_pads") + + @staticmethod + def export_lppool_2d_strides() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 10, 10] + """ + p = 2 + node = onnx.helper.make_node( + "LpPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[5, 5], + strides=[3, 3], + p=p, + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = (5, 5) + strides = (3, 3) + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", p=p) + + expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_strides") + + @staticmethod + def export_lppool_2d_dilations() -> None: + """input_shape: [1, 1, 4, 4] + output_shape: [1, 1, 2, 2] + """ + p = 2 + node = onnx.helper.make_node( + "LpPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + strides=[1, 1], + dilations=[2, 2], + p=p, + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype(np.float32) + + y = np.array( + [ + [ + [ + [14.560219778561036, 16.24807680927192], + [21.633307652783937, 23.49468024894146], + ] + ] + ] + ).astype(np.float32) + + expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_dilations") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/lrn.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/lrn.py new file mode 100644 index 0000000000000000000000000000000000000000..43b53c708f8e0f306cfedd4e82a53241f088a38a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/lrn.py @@ -0,0 +1,69 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import math + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class LRN(Base): + @staticmethod + def export() -> None: + alpha = 0.0002 + beta = 0.5 + bias = 2.0 + nsize = 3 + node = onnx.helper.make_node( + "LRN", + inputs=["x"], + outputs=["y"], + alpha=alpha, + beta=beta, + bias=bias, + size=nsize, + ) + x = np.random.randn(5, 5, 5, 5).astype(np.float32) + square_sum = np.zeros((5, 5, 5, 5)).astype(np.float32) + for n, c, h, w in np.ndindex(x.shape): + square_sum[n, c, h, w] = sum( + x[ + n, + max(0, c - int(math.floor((nsize - 1) / 2))) : min( + 5, c + int(math.ceil((nsize - 1) / 2)) + 1 + ), + h, + w, + ] + ** 2 + ) + y = x / ((bias + (alpha / nsize) * square_sum) ** beta) + expect(node, inputs=[x], outputs=[y], name="test_lrn") + + @staticmethod + def export_default() -> None: + alpha = 0.0001 + beta = 0.75 + bias = 1.0 + nsize = 3 + node = onnx.helper.make_node("LRN", inputs=["x"], outputs=["y"], size=3) + x = np.random.randn(5, 5, 5, 5).astype(np.float32) + square_sum = np.zeros((5, 5, 5, 5)).astype(np.float32) + for n, c, h, w in np.ndindex(x.shape): + square_sum[n, c, h, w] = sum( + x[ + n, + max(0, c - int(math.floor((nsize - 1) / 2))) : min( + 5, c + int(math.ceil((nsize - 1) / 2)) + 1 + ), + h, + w, + ] + ** 2 + ) + y = x / ((bias + (alpha / nsize) * square_sum) ** beta) + expect(node, inputs=[x], outputs=[y], name="test_lrn_default") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/lstm.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/lstm.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa6603e7ad0536789f66e4620fdc980ab4d5c67 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/lstm.py @@ -0,0 +1,277 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from typing import Any, Tuple + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class LSTMHelper: + def __init__(self, **params: Any) -> None: + # LSTM Input Names + X = "X" + W = "W" + R = "R" + B = "B" + H_0 = "initial_h" + C_0 = "initial_c" + P = "P" + LAYOUT = "layout" + number_of_gates = 4 + number_of_peepholes = 3 + + required_inputs = [X, W, R] + for i in required_inputs: + assert i in params, f"Missing Required Input: {i}" + + self.num_directions = params[W].shape[0] + + if self.num_directions == 1: + for k in params: + if k != X: + params[k] = np.squeeze(params[k], axis=0) + + hidden_size = params[R].shape[-1] + batch_size = params[X].shape[1] + + layout = params.get(LAYOUT, 0) + x = params[X] + x = x if layout == 0 else np.swapaxes(x, 0, 1) + b = ( + params[B] + if B in params + else np.zeros(2 * number_of_gates * hidden_size, dtype=np.float32) + ) + p = ( + params[P] + if P in params + else np.zeros(number_of_peepholes * hidden_size, dtype=np.float32) + ) + h_0 = ( + params[H_0] + if H_0 in params + else np.zeros((batch_size, hidden_size), dtype=np.float32) + ) + c_0 = ( + params[C_0] + if C_0 in params + else np.zeros((batch_size, hidden_size), dtype=np.float32) + ) + + self.X = x + self.W = params[W] + self.R = params[R] + self.B = b + self.P = p + self.H_0 = h_0 + self.C_0 = c_0 + self.LAYOUT = layout + + else: + raise NotImplementedError() + + def f(self, x: np.ndarray) -> np.ndarray: + return 1 / (1 + np.exp(-x)) + + def g(self, x: np.ndarray) -> np.ndarray: + return np.tanh(x) + + def h(self, x: np.ndarray) -> np.ndarray: + return np.tanh(x) + + def step(self) -> Tuple[np.ndarray, np.ndarray]: + seq_length = self.X.shape[0] + hidden_size = self.H_0.shape[-1] + batch_size = self.X.shape[1] + + Y = np.empty([seq_length, self.num_directions, batch_size, hidden_size]) + h_list = [] + + [p_i, p_o, p_f] = np.split(self.P, 3) + H_t = self.H_0 + C_t = self.C_0 + for x in np.split(self.X, self.X.shape[0], axis=0): + gates = ( + np.dot(x, np.transpose(self.W)) + + np.dot(H_t, np.transpose(self.R)) + + np.add(*np.split(self.B, 2)) + ) + i, o, f, c = np.split(gates, 4, -1) + i = self.f(i + p_i * C_t) + f = self.f(f + p_f * C_t) + c = self.g(c) + C = f * C_t + i * c + o = self.f(o + p_o * C) + H = o * self.h(C) + h_list.append(H) + H_t = H + C_t = C + + concatenated = np.concatenate(h_list) + if self.num_directions == 1: + Y[:, 0, :, :] = concatenated + + if self.LAYOUT == 0: + Y_h = Y[-1] + else: + Y = np.transpose(Y, [2, 0, 1, 3]) + Y_h = Y[:, :, -1, :] + + return Y, Y_h + + +class LSTM(Base): + @staticmethod + def export_defaults() -> None: + input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32) + + input_size = 2 + hidden_size = 3 + weight_scale = 0.1 + number_of_gates = 4 + + node = onnx.helper.make_node( + "LSTM", inputs=["X", "W", "R"], outputs=["", "Y_h"], hidden_size=hidden_size + ) + + W = weight_scale * np.ones( + (1, number_of_gates * hidden_size, input_size) + ).astype(np.float32) + R = weight_scale * np.ones( + (1, number_of_gates * hidden_size, hidden_size) + ).astype(np.float32) + + lstm = LSTMHelper(X=input, W=W, R=R) + _, Y_h = lstm.step() + expect( + node, + inputs=[input, W, R], + outputs=[Y_h.astype(np.float32)], + name="test_lstm_defaults", + ) + + @staticmethod + def export_initial_bias() -> None: + input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype( + np.float32 + ) + + input_size = 3 + hidden_size = 4 + weight_scale = 0.1 + custom_bias = 0.1 + number_of_gates = 4 + + node = onnx.helper.make_node( + "LSTM", + inputs=["X", "W", "R", "B"], + outputs=["", "Y_h"], + hidden_size=hidden_size, + ) + + W = weight_scale * np.ones( + (1, number_of_gates * hidden_size, input_size) + ).astype(np.float32) + R = weight_scale * np.ones( + (1, number_of_gates * hidden_size, hidden_size) + ).astype(np.float32) + + # Adding custom bias + W_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype( + np.float32 + ) + R_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32) + B = np.concatenate((W_B, R_B), 1) + + lstm = LSTMHelper(X=input, W=W, R=R, B=B) + _, Y_h = lstm.step() + expect( + node, + inputs=[input, W, R, B], + outputs=[Y_h.astype(np.float32)], + name="test_lstm_with_initial_bias", + ) + + @staticmethod + def export_peepholes() -> None: + input = np.array([[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]).astype( + np.float32 + ) + + input_size = 4 + hidden_size = 3 + weight_scale = 0.1 + number_of_gates = 4 + number_of_peepholes = 3 + + node = onnx.helper.make_node( + "LSTM", + inputs=["X", "W", "R", "B", "sequence_lens", "initial_h", "initial_c", "P"], + outputs=["", "Y_h"], + hidden_size=hidden_size, + ) + + # Initializing Inputs + W = weight_scale * np.ones( + (1, number_of_gates * hidden_size, input_size) + ).astype(np.float32) + R = weight_scale * np.ones( + (1, number_of_gates * hidden_size, hidden_size) + ).astype(np.float32) + B = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(np.float32) + seq_lens = np.repeat(input.shape[0], input.shape[1]).astype(np.int32) + init_h = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32) + init_c = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32) + P = weight_scale * np.ones((1, number_of_peepholes * hidden_size)).astype( + np.float32 + ) + + lstm = LSTMHelper( + X=input, W=W, R=R, B=B, P=P, initial_c=init_c, initial_h=init_h + ) + _, Y_h = lstm.step() + expect( + node, + inputs=[input, W, R, B, seq_lens, init_h, init_c, P], + outputs=[Y_h.astype(np.float32)], + name="test_lstm_with_peepholes", + ) + + @staticmethod + def export_batchwise() -> None: + input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32) + + input_size = 2 + hidden_size = 7 + weight_scale = 0.3 + number_of_gates = 4 + layout = 1 + + node = onnx.helper.make_node( + "LSTM", + inputs=["X", "W", "R"], + outputs=["Y", "Y_h"], + hidden_size=hidden_size, + layout=layout, + ) + + W = weight_scale * np.ones( + (1, number_of_gates * hidden_size, input_size) + ).astype(np.float32) + R = weight_scale * np.ones( + (1, number_of_gates * hidden_size, hidden_size) + ).astype(np.float32) + + lstm = LSTMHelper(X=input, W=W, R=R, layout=layout) + Y, Y_h = lstm.step() + expect( + node, + inputs=[input, W, R], + outputs=[Y.astype(np.float32), Y_h.astype(np.float32)], + name="test_lstm_batchwise", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/matmul.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/matmul.py new file mode 100644 index 0000000000000000000000000000000000000000..60f2406decc898167c808afd3a38884b75e98696 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/matmul.py @@ -0,0 +1,37 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class MatMul(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "MatMul", + inputs=["a", "b"], + outputs=["c"], + ) + + # 2d + a = np.random.randn(3, 4).astype(np.float32) + b = np.random.randn(4, 3).astype(np.float32) + c = np.matmul(a, b) + expect(node, inputs=[a, b], outputs=[c], name="test_matmul_2d") + + # 3d + a = np.random.randn(2, 3, 4).astype(np.float32) + b = np.random.randn(2, 4, 3).astype(np.float32) + c = np.matmul(a, b) + expect(node, inputs=[a, b], outputs=[c], name="test_matmul_3d") + + # 4d + a = np.random.randn(1, 2, 3, 4).astype(np.float32) + b = np.random.randn(1, 2, 4, 3).astype(np.float32) + c = np.matmul(a, b) + expect(node, inputs=[a, b], outputs=[c], name="test_matmul_4d") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/matmulinteger.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/matmulinteger.py new file mode 100644 index 0000000000000000000000000000000000000000..fee17590ec4ac7e6495d1a180cda862fac388a53 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/matmulinteger.py @@ -0,0 +1,59 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class MatMulInteger(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "MatMulInteger", + inputs=["A", "B", "a_zero_point", "b_zero_point"], + outputs=["Y"], + ) + + A = np.array( + [ + [11, 7, 3], + [10, 6, 2], + [9, 5, 1], + [8, 4, 0], + ], + dtype=np.uint8, + ) + + a_zero_point = np.array([12], dtype=np.uint8) + + B = np.array( + [ + [1, 4], + [2, 5], + [3, 6], + ], + dtype=np.uint8, + ) + + b_zero_point = np.array([0], dtype=np.uint8) + + output = np.array( + [ + [-38, -83], + [-44, -98], + [-50, -113], + [-56, -128], + ], + dtype=np.int32, + ) + + expect( + node, + inputs=[A, B, a_zero_point, b_zero_point], + outputs=[output], + name="test_matmulinteger", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/max.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/max.py new file mode 100644 index 0000000000000000000000000000000000000000..77569716b1c24b16198db341e333fc07f89a0865 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/max.py @@ -0,0 +1,65 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.backend.test.case.utils import all_numeric_dtypes + + +class Max(Base): + @staticmethod + def export() -> None: + data_0 = np.array([3, 2, 1]).astype(np.float32) + data_1 = np.array([1, 4, 4]).astype(np.float32) + data_2 = np.array([2, 5, 3]).astype(np.float32) + result = np.array([3, 5, 4]).astype(np.float32) + node = onnx.helper.make_node( + "Max", + inputs=["data_0", "data_1", "data_2"], + outputs=["result"], + ) + expect( + node, + inputs=[data_0, data_1, data_2], + outputs=[result], + name="test_max_example", + ) + + node = onnx.helper.make_node( + "Max", + inputs=["data_0"], + outputs=["result"], + ) + expect(node, inputs=[data_0], outputs=[data_0], name="test_max_one_input") + + result = np.maximum(data_0, data_1) + node = onnx.helper.make_node( + "Max", + inputs=["data_0", "data_1"], + outputs=["result"], + ) + expect( + node, inputs=[data_0, data_1], outputs=[result], name="test_max_two_inputs" + ) + + @staticmethod + def export_max_all_numeric_types() -> None: + for op_dtype in all_numeric_dtypes: + data_0 = np.array([3, 2, 1]).astype(op_dtype) + data_1 = np.array([1, 4, 4]).astype(op_dtype) + result = np.array([3, 4, 4]).astype(op_dtype) + node = onnx.helper.make_node( + "Max", + inputs=["data_0", "data_1"], + outputs=["result"], + ) + expect( + node, + inputs=[data_0, data_1], + outputs=[result], + name=f"test_max_{np.dtype(op_dtype).name}", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/maxpool.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/maxpool.py new file mode 100644 index 0000000000000000000000000000000000000000..61c36bf96eeab005b4418d175f9947521fea46b6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/maxpool.py @@ -0,0 +1,713 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.reference.ops.op_pool_common import ( + get_output_shape_auto_pad, + get_output_shape_explicit_padding, + get_pad_shape, + pool, +) + + +class MaxPool(Base): + @staticmethod + def export_maxpool_2d_uint8() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 5, 5] + pad_shape: [4, 4] -> [2, 2, 2, 2] by axis + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[5, 5], + pads=[2, 2, 2, 2], + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.uint8) + y = np.array( + [ + [ + [ + [13, 14, 15, 15, 15], + [18, 19, 20, 20, 20], + [23, 24, 25, 25, 25], + [23, 24, 25, 25, 25], + [23, 24, 25, 25, 25], + ] + ] + ] + ).astype(np.uint8) + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_uint8") + + @staticmethod + def export_maxpool_2d_precomputed_pads() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 5, 5] + pad_shape: [4, 4] -> [2, 2, 2, 2] by axis + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[5, 5], + pads=[2, 2, 2, 2], + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array( + [ + [ + [ + [13, 14, 15, 15, 15], + [18, 19, 20, 20, 20], + [23, 24, 25, 25, 25], + [23, 24, 25, 25, 25], + [23, 24, 25, 25, 25], + ] + ] + ] + ).astype(np.float32) + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_precomputed_pads") + + @staticmethod + def export_maxpool_with_argmax_2d_precomputed_pads() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 5, 5] + pad_shape: [4, 4] -> [2, 2, 2, 2] by axis + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y", "z"], + kernel_shape=[5, 5], + pads=[2, 2, 2, 2], + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array( + [ + [ + [ + [13, 14, 15, 15, 15], + [18, 19, 20, 20, 20], + [23, 24, 25, 25, 25], + [23, 24, 25, 25, 25], + [23, 24, 25, 25, 25], + ] + ] + ] + ).astype(np.float32) + z = np.array( + [ + [ + [ + [12, 13, 14, 14, 14], + [17, 18, 19, 19, 19], + [22, 23, 24, 24, 24], + [22, 23, 24, 24, 24], + [22, 23, 24, 24, 24], + ] + ] + ] + ).astype(np.int64) + + expect( + node, + inputs=[x], + outputs=[y, z], + name="test_maxpool_with_argmax_2d_precomputed_pads", + ) + + @staticmethod + def export_maxpool_2d_precomputed_strides() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 2, 2] + """ + node = onnx.helper.make_node( + "MaxPool", inputs=["x"], outputs=["y"], kernel_shape=[2, 2], strides=[2, 2] + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32) + + expect( + node, inputs=[x], outputs=[y], name="test_maxpool_2d_precomputed_strides" + ) + + @staticmethod + def export_maxpool_with_argmax_2d_precomputed_strides() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 2, 2] + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y", "z"], + kernel_shape=[2, 2], + strides=[2, 2], + storage_order=1, + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32) + z = np.array([[[[6, 16], [8, 18]]]]).astype(np.int64) + + expect( + node, + inputs=[x], + outputs=[y, z], + name="test_maxpool_with_argmax_2d_precomputed_strides", + ) + + @staticmethod + def export_maxpool_2d_precomputed_same_upper() -> None: + """input_shape: [1, 1, 5, 5] + output_shape: [1, 1, 3, 3] + pad_shape: [2, 2] -> [1, 1, 1, 1] by axis + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[3, 3], + strides=[2, 2], + auto_pad="SAME_UPPER", + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[7, 9, 10], [17, 19, 20], [22, 24, 25]]]]).astype(np.float32) + + expect( + node, inputs=[x], outputs=[y], name="test_maxpool_2d_precomputed_same_upper" + ) + + @staticmethod + def export_maxpool_1d_default() -> None: + """input_shape: [1, 3, 32] + output_shape: [1, 3, 31] + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2], + ) + x = np.random.randn(1, 3, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = [2] + strides = [1] + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX") + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_1d_default") + + @staticmethod + def export_maxpool_2d_default() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 31, 31] + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = (2, 2) + strides = (1, 1) + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX") + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_default") + + @staticmethod + def export_maxpool_3d_default() -> None: + """input_shape: [1, 3, 32, 32, 32] + output_shape: [1, 3, 31, 31, 31] + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2, 2], + ) + x = np.random.randn(1, 3, 32, 32, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = [2, 2, 2] + strides = [1, 1, 1] + out_shape, _ = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX") + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_3d_default") + + @staticmethod + def export_maxpool_2d_same_upper() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 32, 32] + pad_shape: [1, 1] -> [0, 1, 0, 1] by axis + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + auto_pad="SAME_UPPER", + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (2, 2) + strides = (1, 1) + out_shape = get_output_shape_auto_pad( + "SAME_UPPER", x_shape[2:], kernel_shape, strides + ) + pad_shape = get_pad_shape( + "SAME_UPPER", x_shape[2:], kernel_shape, strides, out_shape + ) + pad_top = pad_shape[0] // 2 + pad_bottom = pad_shape[0] - pad_top + pad_left = pad_shape[1] // 2 + pad_right = pad_shape[1] - pad_left + padded = np.pad( + x, + ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), + mode="constant", + constant_values=np.nan, + ) + pads = [pad_top, pad_left, pad_bottom, pad_right] + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX", pads) + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_same_upper") + + @staticmethod + def export_maxpool_2d_same_lower() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 32, 32] + pad_shape: [1, 1] -> [1, 0, 1, 0] by axis + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + auto_pad="SAME_LOWER", + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (2, 2) + strides = (1, 1) + out_shape = get_output_shape_auto_pad( + "SAME_LOWER", x_shape[2:], kernel_shape, strides + ) + pad_shape = get_pad_shape( + "SAME_LOWER", x_shape[2:], kernel_shape, strides, out_shape + ) + pad_bottom = pad_shape[0] // 2 + pad_top = pad_shape[0] - pad_bottom + pad_right = pad_shape[1] // 2 + pad_left = pad_shape[1] - pad_right + padded = np.pad( + x, + ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), + mode="constant", + constant_values=np.nan, + ) + pads = [pad_top, pad_left, pad_bottom, pad_right] + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX", pads) + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_same_lower") + + @staticmethod + def export_maxpool_2d_pads() -> None: + """input_shape: [1, 3, 28, 28] + output_shape: [1, 3, 30, 30] + pad_shape: [4, 4] -> [2, 2, 2, 2] by axis + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[3, 3], + pads=[2, 2, 2, 2], + ) + x = np.random.randn(1, 3, 28, 28).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (3, 3) + strides = (1, 1) + pad_bottom = pad_top = pad_right = pad_left = 2 + pads = [pad_top, pad_left, pad_bottom, pad_right] + out_shape, pads = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = np.pad( + x, + ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), + mode="constant", + constant_values=np.nan, + ) + + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX", pads) + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_pads") + + @staticmethod + def export_maxpool_2d_strides() -> None: + """input_shape: [1, 3, 32, 32] + output_shape: [1, 3, 10, 10] + """ + node = onnx.helper.make_node( + "MaxPool", inputs=["x"], outputs=["y"], kernel_shape=[5, 5], strides=[3, 3] + ) + x = np.random.randn(1, 3, 32, 32).astype(np.float32) + x_shape = np.shape(x) + pads = None + kernel_shape = (5, 5) + strides = (3, 3) + out_shape, pads = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides + ) + padded = x + y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX") + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_strides") + + @staticmethod + def export_maxpool_2d_ceil() -> None: + """input_shape: [1, 1, 4, 4] + output_shape: [1, 1, 2, 2] + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[3, 3], + strides=[2, 2], + ceil_mode=True, + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32) + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_ceil") + + @staticmethod + def export_maxpool_2d_ceil_output_size_reduce_by_one() -> None: + """input_shape: [1, 1, 2, 2] + output_shape: [1, 1, 1, 1] + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[1, 1], + strides=[2, 2], + ceil_mode=True, + ) + x = np.array([[[[1, 2], [3, 4]]]]).astype(np.float32) + y = np.array([[[[1]]]]).astype(np.float32) + + expect( + node, + inputs=[x], + outputs=[y], + name="test_maxpool_2d_ceil_output_size_reduce_by_one", + ) + + @staticmethod + def export_maxpool_2d_dilations() -> None: + """input_shape: [1, 1, 4, 4] + output_shape: [1, 1, 2, 2] + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2], + strides=[1, 1], + dilations=[2, 2], + ) + x = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32) + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_dilations") + + @staticmethod + def export_maxpool_3d_dilations() -> None: + """input_shape: [1, 1, 4, 4, 4] + output_shape: [1, 1, 2, 2, 2] + """ + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2, 2], + strides=[1, 1, 1], + dilations=[2, 2, 2], + ) + x = np.array( + [ + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[[11, 12], [15, 16]], [[11, 12], [15, 16]]]]]).astype( + np.float32 + ) + + expect(node, inputs=[x], outputs=[y], name="test_maxpool_3d_dilations") + + @staticmethod + def export_maxpool_3d_dilations_use_ref_impl() -> None: + """input_shape: [1, 1, 4, 4, 4] + output_shape: [1, 1, 2, 2, 2] + """ + dilations = [2, 2, 2] + kernel_shape = [2, 2, 2] + strides = [1, 1, 1] + ceil_mode = False + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=[2, 2, 2], + strides=[1, 1, 1], + dilations=dilations, + ) + x = np.array( + [ + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + ] + ] + ] + ).astype(np.float32) + + x_shape = x.shape[2:] + out_shape, pads = get_output_shape_explicit_padding( + None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode + ) + padded = x + y = pool( + padded, + (1, 1, *x_shape), + kernel_shape, + strides, + out_shape, + "MAX", + pads, + dilations=dilations, + ) + + expect( + node, inputs=[x], outputs=[y], name="test_maxpool_3d_dilations_use_ref_impl" + ) + + @staticmethod + def export_maxpool_3d_dilations_use_ref_impl_large() -> None: + x_shape = (32, 32, 32) + dilations = (2, 2, 2) + kernel_shape = (5, 5, 5) + strides = (3, 3, 3) + ceil_mode = True + + node = onnx.helper.make_node( + "MaxPool", + inputs=["x"], + outputs=["y"], + kernel_shape=kernel_shape, + strides=strides, + dilations=dilations, + ceil_mode=ceil_mode, + ) + + x = np.random.randn(1, 1, *x_shape).astype(np.float32) + out_shape, pads = get_output_shape_explicit_padding( + None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode + ) + padded = np.pad( + x, + ( + (0, 0), + (0, 0), + (pads[0], pads[3]), + (pads[1], pads[4]), + (pads[2], pads[5]), + ), + mode="constant", + constant_values=0, + ) + y = pool( + padded, + (1, 1, *x_shape), + kernel_shape, + strides, + out_shape, + "MAX", + pads, + dilations=dilations, + ) + + expect( + node, + inputs=[x], + outputs=[y], + name="test_maxpool_3d_dilations_use_ref_impl_large", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/maxunpool.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/maxunpool.py new file mode 100644 index 0000000000000000000000000000000000000000..3f92dea0711928e03f09d35af99a28800ccfc5ac --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/maxunpool.py @@ -0,0 +1,66 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class MaxUnpool(Base): + @staticmethod + def export_without_output_shape() -> None: + node = onnx.helper.make_node( + "MaxUnpool", + inputs=["xT", "xI"], + outputs=["y"], + kernel_shape=[2, 2], + strides=[2, 2], + ) + xT = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32) + xI = np.array([[[[5, 7], [13, 15]]]], dtype=np.int64) + y = np.array( + [[[[0, 0, 0, 0], [0, 1, 0, 2], [0, 0, 0, 0], [0, 3, 0, 4]]]], + dtype=np.float32, + ) + expect( + node, + inputs=[xT, xI], + outputs=[y], + name="test_maxunpool_export_without_output_shape", + ) + + @staticmethod + def export_with_output_shape() -> None: + node = onnx.helper.make_node( + "MaxUnpool", + inputs=["xT", "xI", "output_shape"], + outputs=["y"], + kernel_shape=[2, 2], + strides=[2, 2], + ) + xT = np.array([[[[5, 6], [7, 8]]]], dtype=np.float32) + xI = np.array([[[[5, 7], [13, 15]]]], dtype=np.int64) + output_shape = np.array((1, 1, 5, 5), dtype=np.int64) + y = np.array( + [ + [ + [ + [0, 0, 0, 0, 0], + [0, 5, 0, 6, 0], + [0, 0, 0, 0, 0], + [0, 7, 0, 8, 0], + [0, 0, 0, 0, 0], + ] + ] + ], + dtype=np.float32, + ) + expect( + node, + inputs=[xT, xI, output_shape], + outputs=[y], + name="test_maxunpool_export_with_output_shape", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/mean.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/mean.py new file mode 100644 index 0000000000000000000000000000000000000000..1ec54a34cff469b31365e1ffc4e488484b8c72d6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/mean.py @@ -0,0 +1,46 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class Mean(Base): + @staticmethod + def export() -> None: + data_0 = np.array([3, 0, 2]).astype(np.float32) + data_1 = np.array([1, 3, 4]).astype(np.float32) + data_2 = np.array([2, 6, 6]).astype(np.float32) + result = np.array([2, 3, 4]).astype(np.float32) + node = onnx.helper.make_node( + "Mean", + inputs=["data_0", "data_1", "data_2"], + outputs=["result"], + ) + expect( + node, + inputs=[data_0, data_1, data_2], + outputs=[result], + name="test_mean_example", + ) + + node = onnx.helper.make_node( + "Mean", + inputs=["data_0"], + outputs=["result"], + ) + expect(node, inputs=[data_0], outputs=[data_0], name="test_mean_one_input") + + result = np.divide(np.add(data_0, data_1), 2.0) + node = onnx.helper.make_node( + "Mean", + inputs=["data_0", "data_1"], + outputs=["result"], + ) + expect( + node, inputs=[data_0, data_1], outputs=[result], name="test_mean_two_inputs" + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/meanvariancenormalization.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/meanvariancenormalization.py new file mode 100644 index 0000000000000000000000000000000000000000..d94dbcdd26e6295d0edcaf498cfe123a3e28b012 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/meanvariancenormalization.py @@ -0,0 +1,48 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class MeanVarianceNormalization(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "MeanVarianceNormalization", inputs=["X"], outputs=["Y"] + ) + + input_data = np.array( + [ + [ + [[0.8439683], [0.5665144], [0.05836735]], + [[0.02916367], [0.12964272], [0.5060197]], + [[0.79538304], [0.9411346], [0.9546573]], + ], + [ + [[0.17730942], [0.46192095], [0.26480448]], + [[0.6746842], [0.01665257], [0.62473077]], + [[0.9240844], [0.9722341], [0.11965699]], + ], + [ + [[0.41356155], [0.9129373], [0.59330076]], + [[0.81929934], [0.7862604], [0.11799799]], + [[0.69248444], [0.54119414], [0.07513223]], + ], + ], + dtype=np.float32, + ) + + # Calculate expected output data + data_mean = np.mean(input_data, axis=(0, 2, 3), keepdims=1) + data_mean_squared = np.power(data_mean, 2) + data_squared = np.power(input_data, 2) + data_squared_mean = np.mean(data_squared, axis=(0, 2, 3), keepdims=1) + std = np.sqrt(data_squared_mean - data_mean_squared) + expected_output = (input_data - data_mean) / (std + 1e-9) + + expect(node, inputs=[input_data], outputs=[expected_output], name="test_mvn") diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/melweightmatrix.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/melweightmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..ad491058bb7727fbabc16c40bb8399ce85b31ec6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/melweightmatrix.py @@ -0,0 +1,89 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect + + +class MelWeightMatrix(Base): + @staticmethod + def export() -> None: + node = onnx.helper.make_node( + "MelWeightMatrix", + inputs=[ + "num_mel_bins", + "dft_length", + "sample_rate", + "lower_edge_hertz", + "upper_edge_hertz", + ], + outputs=["output"], + ) + + num_mel_bins = np.int32(8) + dft_length = np.int32(16) + sample_rate = np.int32(8192) + lower_edge_hertz = np.float32(0) + upper_edge_hertz = np.float32(8192 / 2) + + num_spectrogram_bins = dft_length // 2 + 1 + frequency_bins = np.arange(0, num_mel_bins + 2) + + low_frequency_mel = 2595 * np.log10(1 + lower_edge_hertz / 700) + high_frequency_mel = 2595 * np.log10(1 + upper_edge_hertz / 700) + mel_step = (high_frequency_mel - low_frequency_mel) / frequency_bins.shape[0] + + frequency_bins = frequency_bins * mel_step + low_frequency_mel + frequency_bins = 700 * (np.power(10, (frequency_bins / 2595)) - 1) + frequency_bins = ((dft_length + 1) * frequency_bins) // sample_rate + frequency_bins = frequency_bins.astype(int) + + output = np.zeros((num_spectrogram_bins, num_mel_bins)) + output.flags.writeable = True + + for i in range(num_mel_bins): + lower_frequency_value = frequency_bins[i] # left + center_frequency_point = frequency_bins[i + 1] # center + higher_frequency_point = frequency_bins[i + 2] # right + low_to_center = center_frequency_point - lower_frequency_value + if low_to_center == 0: + output[center_frequency_point, i] = 1 + else: + for j in range(lower_frequency_value, center_frequency_point + 1): + output[j, i] = float(j - lower_frequency_value) / float( + low_to_center + ) + center_to_high = higher_frequency_point - center_frequency_point + if center_to_high > 0: + for j in range(center_frequency_point, higher_frequency_point): + output[j, i] = float(higher_frequency_point - j) / float( + center_to_high + ) + + # Expected output + # 1.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + # 0.000000, 0.000000, 1.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000, + # 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, + # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, + # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, + # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, + # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, + output = output.astype(np.float32) + expect( + node, + inputs=[ + num_mel_bins, + dft_length, + sample_rate, + lower_edge_hertz, + upper_edge_hertz, + ], + outputs=[output], + name="test_melweightmatrix", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/node/min.py b/MLPY/Lib/site-packages/onnx/backend/test/case/node/min.py new file mode 100644 index 0000000000000000000000000000000000000000..70a0437dd5750778bf61b2fe1df4b16619954b63 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/node/min.py @@ -0,0 +1,65 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +import onnx +from onnx.backend.test.case.base import Base +from onnx.backend.test.case.node import expect +from onnx.backend.test.case.utils import all_numeric_dtypes + + +class Min(Base): + @staticmethod + def export() -> None: + data_0 = np.array([3, 2, 1]).astype(np.float32) + data_1 = np.array([1, 4, 4]).astype(np.float32) + data_2 = np.array([2, 5, 0]).astype(np.float32) + result = np.array([1, 2, 0]).astype(np.float32) + node = onnx.helper.make_node( + "Min", + inputs=["data_0", "data_1", "data_2"], + outputs=["result"], + ) + expect( + node, + inputs=[data_0, data_1, data_2], + outputs=[result], + name="test_min_example", + ) + + node = onnx.helper.make_node( + "Min", + inputs=["data_0"], + outputs=["result"], + ) + expect(node, inputs=[data_0], outputs=[data_0], name="test_min_one_input") + + result = np.minimum(data_0, data_1) + node = onnx.helper.make_node( + "Min", + inputs=["data_0", "data_1"], + outputs=["result"], + ) + expect( + node, inputs=[data_0, data_1], outputs=[result], name="test_min_two_inputs" + ) + + @staticmethod + def export_min_all_numeric_types() -> None: + for op_dtype in all_numeric_dtypes: + data_0 = np.array([3, 2, 1]).astype(op_dtype) + data_1 = np.array([1, 4, 4]).astype(op_dtype) + result = np.array([1, 2, 1]).astype(op_dtype) + node = onnx.helper.make_node( + "Min", + inputs=["data_0", "data_1"], + outputs=["result"], + ) + expect( + node, + inputs=[data_0, data_1], + outputs=[result], + name=f"test_min_{np.dtype(op_dtype).name}", + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/test_case.py b/MLPY/Lib/site-packages/onnx/backend/test/case/test_case.py new file mode 100644 index 0000000000000000000000000000000000000000..6ac006f05e5ddf807654e5bea69fd452ae044ed3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/test_case.py @@ -0,0 +1,25 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +from dataclasses import dataclass +from typing import Optional, Sequence, Tuple + +import numpy as np + +import onnx + + +@dataclass +class TestCase: + name: str + model_name: str + url: Optional[str] + model_dir: Optional[str] + model: Optional[onnx.ModelProto] + data_sets: Optional[Sequence[Tuple[Sequence[np.ndarray], Sequence[np.ndarray]]]] + kind: str + rtol: float + atol: float + # Tell PyTest this isn't a real test. + __test__: bool = False diff --git a/MLPY/Lib/site-packages/onnx/backend/test/case/utils.py b/MLPY/Lib/site-packages/onnx/backend/test/case/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bc9c7dcd0d522b864425808c5d504b656d1d7ab5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/case/utils.py @@ -0,0 +1,43 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import importlib +import pkgutil +from types import ModuleType +from typing import List, Optional + +import numpy as np + +from onnx import ONNX_ML + +all_numeric_dtypes = [ + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float16, + np.float32, + np.float64, +] + + +def import_recursive(package: ModuleType) -> None: + """Takes a package and imports all modules underneath it.""" + pkg_dir: Optional[List[str]] = None + pkg_dir = package.__path__ # type: ignore + module_location = package.__name__ + for _module_loader, name, ispkg in pkgutil.iter_modules(pkg_dir): + module_name = f"{module_location}.{name}" # Module/package + if not ONNX_ML and module_name.startswith( + "onnx.backend.test.case.node.ai_onnx_ml" + ): + continue + + module = importlib.import_module(module_name) + if ispkg: + import_recursive(module) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/cmd_tools.py b/MLPY/Lib/site-packages/onnx/backend/test/cmd_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..83031fd7926cd135ac8249569d9d757fff1c053e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/cmd_tools.py @@ -0,0 +1,189 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import argparse +import json +import os +import shutil +import warnings + +import onnx.backend.test.case.model as model_test +import onnx.backend.test.case.node as node_test +from onnx import ONNX_ML, TensorProto, numpy_helper + +TOP_DIR = os.path.realpath(os.path.dirname(__file__)) +DATA_DIR = os.path.join(TOP_DIR, "data") + + +def generate_data(args: argparse.Namespace) -> None: + def prepare_dir(path: str) -> None: + if os.path.exists(path): + shutil.rmtree(path) + os.makedirs(path) + + # Clean the output directory before generating data for node testcases + # It is used to check new generated data is correct in CIs + node_root = os.path.join(args.output, "node") + original_dir_number = len( + [name for name in os.listdir(node_root) if os.path.isfile(name)] + ) + if args.clean and os.path.exists(node_root): + for sub_dir in os.listdir(node_root): + if ONNX_ML or not sub_dir.startswith("test_ai_onnx_ml_"): + shutil.rmtree(os.path.join(node_root, sub_dir)) + + cases = model_test.collect_testcases() + # If op_type is specified, only include those testcases including the given operator + # Otherwise, include all of the testcases + if args.diff: + cases += node_test.collect_diff_testcases() + else: + cases += node_test.collect_testcases(args.op_type) + node_number = 0 + + for case in cases: + output_dir = os.path.join(args.output, case.kind, case.name) + prepare_dir(output_dir) + if case.kind == "node": + node_number += 1 + if case.kind == "real": + with open(os.path.join(output_dir, "data.json"), "w") as fi: + json.dump( + { + "url": case.url, + "model_name": case.model_name, + "rtol": case.rtol, + "atol": case.atol, + }, + fi, + sort_keys=True, + ) + else: + assert case.model + with open(os.path.join(output_dir, "model.onnx"), "wb") as f: + f.write(case.model.SerializeToString()) + assert case.data_sets + for i, (inputs, outputs) in enumerate(case.data_sets): + data_set_dir = os.path.join(output_dir, f"test_data_set_{i}") + prepare_dir(data_set_dir) + for j, input in enumerate(inputs): + with open(os.path.join(data_set_dir, f"input_{j}.pb"), "wb") as f: + if case.model.graph.input[j].type.HasField("map_type"): + f.write( + numpy_helper.from_dict( + input, case.model.graph.input[j].name + ).SerializeToString() + ) + elif case.model.graph.input[j].type.HasField("sequence_type"): + f.write( + numpy_helper.from_list( + input, case.model.graph.input[j].name + ).SerializeToString() + ) + elif case.model.graph.input[j].type.HasField("optional_type"): + f.write( + numpy_helper.from_optional( + input, case.model.graph.input[j].name + ).SerializeToString() + ) + else: + assert case.model.graph.input[j].type.HasField( + "tensor_type" + ) + if isinstance(input, TensorProto): + f.write(input.SerializeToString()) + else: + f.write( + numpy_helper.from_array( + input, case.model.graph.input[j].name + ).SerializeToString() + ) + for j, output in enumerate(outputs): + with open(os.path.join(data_set_dir, f"output_{j}.pb"), "wb") as f: + if case.model.graph.output[j].type.HasField("map_type"): + f.write( + numpy_helper.from_dict( + output, case.model.graph.output[j].name + ).SerializeToString() + ) + elif case.model.graph.output[j].type.HasField("sequence_type"): + f.write( + numpy_helper.from_list( + output, case.model.graph.output[j].name + ).SerializeToString() + ) + elif case.model.graph.output[j].type.HasField("optional_type"): + f.write( + numpy_helper.from_optional( + output, case.model.graph.output[j].name + ).SerializeToString() + ) + else: + assert case.model.graph.output[j].type.HasField( + "tensor_type" + ) + if isinstance(output, TensorProto): + f.write(output.SerializeToString()) + else: + f.write( + numpy_helper.from_array( + output, case.model.graph.output[j].name + ).SerializeToString() + ) + if not args.clean and node_number != original_dir_number: + warnings.warn( + "There are some models under 'onnx/backend/test/data/node' which cannot not" + " be generated by the script from 'onnx/backend/test/case/node'. Please add" + " '--clean' option for 'python onnx/backend/test/cmd_tools.py generate-data'" + " to cleanup the existing directories and regenerate them.", + Warning, + stacklevel=2, + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser("backend-test-tools") + subparsers = parser.add_subparsers() + + subparser = subparsers.add_parser( + "generate-data", help="convert testcases to test data." + ) + subparser.add_argument( + "-c", + "--clean", + default=False, + action="store_true", + help="Clean the output directory before generating data for node testcases.", + ) + subparser.add_argument( + "-o", + "--output", + default=DATA_DIR, + help="output directory (default: %(default)s)", + ) + subparser.add_argument( + "-t", + "--op_type", + default=None, + help="op_type for test case generation. (generates test data for the specified op_type only.)", + ) + subparser.add_argument( + "-d", + "--diff", + default=False, + action="store_true", + help="only generates test data for those changed files (compared to the main branch).", + ) + subparser.set_defaults(func=generate_data) + + return parser.parse_args() + + +def main() -> None: + args = parse_args() + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/README.md b/MLPY/Lib/site-packages/onnx/backend/test/data/light/README.md new file mode 100644 index 0000000000000000000000000000000000000000..787c0a3dcc213a92da75b5109335ec0dd3338a66 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/README.md @@ -0,0 +1,16 @@ + + +# Light models + +The models in this folder were created by replacing +all float initializers by nodes `ConstantOfShape` +with function `replace_initializer_by_constant_of_shape`. +The models are lighter and can be added to the repository +for unit testing. + +Expected outputs were obtained by using CReferenceEvaluator +implemented in [PR 4952](https://github.com/onnx/onnx/pull/4952). diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_bvlc_alexnet.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_bvlc_alexnet.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ad972bae5ee76a02d4a651e201b74d7b8612069d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_bvlc_alexnet.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2afa78cef5a88aed9d6e3d63fb92bd330c9177ac150d19189c6b3e7204ba0212 +size 3968 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_bvlc_alexnet_output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_bvlc_alexnet_output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3fcf1467da0a52cc7751c6f7299ac6c3cfa37e1b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_bvlc_alexnet_output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97d6bcc28b6ad731bc3281a8b03068d15fa9d538769b5b24ca5448ea143db100 +size 4010 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_densenet121.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_densenet121.onnx new file mode 100644 index 0000000000000000000000000000000000000000..7e34c4b147d682266c4f36f759bdb33a94c40b4a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_densenet121.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49ddb5712797d6164f1d864bedaad927de4f3909ad1b4ba390a92c2f8150e9f6 +size 214344 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_densenet121_output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_densenet121_output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6c54f8eb6394cad398b9f14ca205e2a529a02dbf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_densenet121_output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6cc6130fcbf90314522678603bb3db7ed8f3a79754b37d3ac95c9a2e3f4de0c +size 4014 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v1.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v1.onnx new file mode 100644 index 0000000000000000000000000000000000000000..d1e88acb7d747aea26a59c83273315a5768b7e7d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v1.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb7a0e6c370c709f5615eeef961b43628de13d0009ae4d6f4bfb0d5aea5d8270 +size 36869 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v1_output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v1_output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3fcf1467da0a52cc7751c6f7299ac6c3cfa37e1b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v1_output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97d6bcc28b6ad731bc3281a8b03068d15fa9d538769b5b24ca5448ea143db100 +size 4010 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v2.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v2.onnx new file mode 100644 index 0000000000000000000000000000000000000000..93622ca410b41ab13c8c314c4a98627e48bef270 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v2.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:224d77d55b26559a959db627c3f417a623fbf3b3000d25f0939327aa935d933f +size 159024 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v2_output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v2_output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3fcf1467da0a52cc7751c6f7299ac6c3cfa37e1b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_inception_v2_output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97d6bcc28b6ad731bc3281a8b03068d15fa9d538769b5b24ca5448ea143db100 +size 4010 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_resnet50.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_resnet50.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9ee7e864375e4cbed0c902c972013c2d98762c8d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_resnet50.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05e77a5c9c9ce0913f549a50d6ebaced5e0ff6817b61e09bae26e4c5bd9055e4 +size 79770 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_resnet50_output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_resnet50_output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3fcf1467da0a52cc7751c6f7299ac6c3cfa37e1b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_resnet50_output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97d6bcc28b6ad731bc3281a8b03068d15fa9d538769b5b24ca5448ea143db100 +size 4010 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_shufflenet.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_shufflenet.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8f30725f663d75936a44895ee3a109fe2f1afedd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_shufflenet.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6f406d62be36d6b4572542c0950a2abd59f56237068793290680bba89fbafe5 +size 67666 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_shufflenet_output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_shufflenet_output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3fcf1467da0a52cc7751c6f7299ac6c3cfa37e1b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_shufflenet_output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97d6bcc28b6ad731bc3281a8b03068d15fa9d538769b5b24ca5448ea143db100 +size 4010 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_squeezenet.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_squeezenet.onnx new file mode 100644 index 0000000000000000000000000000000000000000..af4c9d1982dc8f6127a8238aabfd6b08ca638a20 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_squeezenet.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:770b0f3c8623e18bf58b53754d710051b4c268248422142980a132bbe6dfe908 +size 15618 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_squeezenet_output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_squeezenet_output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a878d320b4f4400fd246238734107013525b1b7e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_squeezenet_output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32eee74b7e589729a8069267de65ba6aba2881d0f24041aae8e50f685303c136 +size 4014 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_vgg19.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_vgg19.onnx new file mode 100644 index 0000000000000000000000000000000000000000..f7e0eb39e38cd7116e369b65eb70c06f8176aafd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_vgg19.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e547d732b3a3d66eeb8fa64a026adb994d3db552f0bbd52e436d06300d89afe +size 9311 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_vgg19_output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_vgg19_output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3fcf1467da0a52cc7751c6f7299ac6c3cfa37e1b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_vgg19_output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97d6bcc28b6ad731bc3281a8b03068d15fa9d538769b5b24ca5448ea143db100 +size 4010 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_zfnet512.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_zfnet512.onnx new file mode 100644 index 0000000000000000000000000000000000000000..329e925929eabdad78d9a7dced4e27a4087f6d36 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_zfnet512.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6444bb58b98c3d14f551a3bdb83eea9e5db7e147790db3115c447e9c9a8338b0 +size 4506 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_zfnet512_output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_zfnet512_output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3fcf1467da0a52cc7751c6f7299ac6c3cfa37e1b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/light/light_zfnet512_output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97d6bcc28b6ad731bc3281a8b03068d15fa9d538769b5b24ca5448ea143db100 +size 4010 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_abs/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_abs/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..17cf800c1d362a6f799e3688fc709d82790798fc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_abs/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87e8c85a3c5ac0401781fd1327a04a8eec7b8fac8df7e813b2bed5134403905a +size 97 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_abs/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_abs/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..58f5af5f3237bae3e4133f0888f56b1ed0292c51 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_abs/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0180f5665325e343077736e4903fa43513ae714ceb131deee45b842370df289a +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_abs/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_abs/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7822c08274018d353acfc4ff8233d7f984d80524 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_abs/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8135951529d61fddab48dd748bf0e231443a818c47c5055ba1c141a4a749dd0 +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..24be2c40fb380a6cf5a4b01c868aa7912aa8e99c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b85831618cf33187c7f035bcc8889bb1fd56f24238cc86d8a0a0d8217512840 +size 99 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..691aa143f8af9be7b79e4cf0a81853feb1fa8d0c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05c023929a1b32b4bdbd7882ff5e53b9b53762ee2929c66be76d0f0ba7ca393d +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..93d9f0f07230cca6fa958435f391583314a346bc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95160d2f094737585fb9d86413581f1a776b9d6aaf6fa6ea677600bba4dff364 +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..b5dd89c5dc3c8dd3474f341183c125f483139e11 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0576861b8660a0f70bb75f5da782694460ab06bf727877ebd2c0c7724365cc7e +size 91 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..88128780e03bf763149dd948c9cf0d40a0bb2071 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:703de0c3be505a78596235789682bc4e6f0e8523f34fb731207b4c076701abdd +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c59bdec79fdc95952197e2dcd7b35f420419238d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b6b3deb8a89769e130c6c6d3237b29d5d1d01c84425f70d9f15af6920fc8ff9 +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..643fe43f9307eac9cc0d4c377c6eb3c70e3d4a29 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57b983aab54e9e5744a1e4ac8a4bdb8557a4de6fd7e185d2e6f09058fb0121d5 +size 101 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..843cfd0470938ceca1fe0082b5fc285329ded0f9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30ab226b20d339612014c98f4f0a3997297293bf8cb50ae15bce3d15fd42e3ee +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4d152cc5f9a14aafeeed93fbab5aea68ae34c465 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62bc7dd17437773831e345ce9bdbf7bdbe4b42b0fdac484b7f29422416453e95 +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..d4d8b26b18c0c08147887415caa9c7d2b0001718 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11d62640ee799a6de2a0c8acec86fafb591058b78231d3e5d8ddcad9fdb7d336 +size 93 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..cfe6ba55a5763de90227723498535d80627f484e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:357200162d60efa020d67af9c220c8884ea296ca27effb74902fd07f92e7eaf8 +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..331c57656769664831c1d43b37e65acfd5f34f50 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3de92cda4a410cf189bfbdde29176f95b3919a2042ea3c80e32800e3cb3c7f +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..c142651f24437eda02f83341d454134c05f7848f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bfd337a65cfc3213420fa06841eb88a6d3b8cfbef6235099fc4950f2c94bfe8 +size 320 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d61b5b6ad90be73b7598b1872073503a017c0633 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b13087d18cf644a39bb101a66dd06a37ff95bac00e83bfbd5ff621b5168aa22 +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..815b7a1a21c44a7787c0caa8b319d07fdf87b418 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c44a418da7d4f82602975579ba1cdda8fdab51f0606a3ee096c9314ea0c68be9 +size 15 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..93a2c173ea96f3fceb18b4d75c750fd5989d4cf2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6eaee0ae6356b407afdb8729f08d7d8aabe3ab262dc79f045208eb7905904bc +size 13 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_3.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_3.pb new file mode 100644 index 0000000000000000000000000000000000000000..33d8167b3310c12c5fa7683668c23e8d8aaa64bb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_3.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86b82874edcb556fcbc536d488af6d06f2c0d90ed666d5d45bd8ad3fda2a7d01 +size 13 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_4.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_4.pb new file mode 100644 index 0000000000000000000000000000000000000000..526e914cef8d7c18760b019441b60231cce23b52 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_4.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91fa92ce704fea3376209b22a90d487862aa2f1f26eb36039dfaa48d7fa15269 +size 13 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6e1ee650338ef0dfdd99a6c3aaab9852cf96b461 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a022a2b019848e93f75c1dd58489ab669fa5ac27c4b92173749124ec320e030f +size 17 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/output_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/output_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..b5cf06076d1b02eb6e21c7267863b641f48a2960 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/output_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbdda18e2257329a378bc02490c04a7e853fe88f3e6107adb9eee5cec9509de8 +size 17 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..f0d25ad0a01a03143e9eebf35801ebf701365210 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d067ca0926d16bdddc302e45d5232bd8a28757e208562001cf06823517c0f999 +size 465 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d61b5b6ad90be73b7598b1872073503a017c0633 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b13087d18cf644a39bb101a66dd06a37ff95bac00e83bfbd5ff621b5168aa22 +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..815b7a1a21c44a7787c0caa8b319d07fdf87b418 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c44a418da7d4f82602975579ba1cdda8fdab51f0606a3ee096c9314ea0c68be9 +size 15 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..f6a713c716572c10cc6d3e57576acba905bbb76d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd209068326e49f8d5f3d94e2a5fdef0502f69874383a52b1f69f87b2902f5b2 +size 14 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_3.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_3.pb new file mode 100644 index 0000000000000000000000000000000000000000..3646541999dfac3811ce5c1e8f259daf8a269bf3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_3.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03ee3416024db9c3cd15bfd692c6cccb467e13f10857b445d033fdd24a9b6c2e +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_4.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_4.pb new file mode 100644 index 0000000000000000000000000000000000000000..831514ad9ab87825c9a6415cd84764819fc16f13 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_4.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b179919a8dec2bc0f85af8c06809d3a2de75c963317d0e06c684e23022ac770 +size 14 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_5.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_5.pb new file mode 100644 index 0000000000000000000000000000000000000000..4830c09b2e19ed128f9b46d183fb4f0cac7617e1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_5.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2d086bc46c15bc8ad8096e4457d699ca662af4f34932b5222e4e6640921e356 +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_6.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_6.pb new file mode 100644 index 0000000000000000000000000000000000000000..1622189c35caa4c5e435e4ee6aef7dab2d615e18 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_6.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7e7e556bccf98aa250f419287e95dd70619c52108531d135298bfd32c1e76a0 +size 14 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_7.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_7.pb new file mode 100644 index 0000000000000000000000000000000000000000..8f424da4709212346b72174b01b03e4d447afe96 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_7.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a552299b569b3bb2ba14115833b2bee6ece125575605dbdcd5933600d184beb +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..85899ba850eb5e7a7dbeebef654daad1ea73f72a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50a4c8438339f3fe9ba7e2c16a039efdde15470ae2b3b2056779e2d1b5a2565f +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..d84927a014148a95c49fc043a6a01a88559f258f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a95550f2372bf749abcc0d7785d5d55f5833308e582a6d6df79d1c9d3811522 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..363aefd64bf7828339f6ae43d65a7de06c3996d4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e89c732917f033254b7c2009c99a93529dfdfc011c39e154a146d235e434cb4 +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_3.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_3.pb new file mode 100644 index 0000000000000000000000000000000000000000..7715d3216f1e5dc83ba096539a7ebbec29cad13c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_3.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2db9fc09b516918d14c7090643afbfdaaeb946be33e1393aaf099253f57db23 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..441d3b9d6cd3849fcce472d3711accacb58deb3f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3d2aef1d34b6f2f22e100d36da8c93945c179131dae6bc55bd197d4c1a56b70 +size 371 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d61b5b6ad90be73b7598b1872073503a017c0633 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b13087d18cf644a39bb101a66dd06a37ff95bac00e83bfbd5ff621b5168aa22 +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..815b7a1a21c44a7787c0caa8b319d07fdf87b418 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c44a418da7d4f82602975579ba1cdda8fdab51f0606a3ee096c9314ea0c68be9 +size 15 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..64f0d05b50f671719de7bf339ed3ba6538e56c09 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aba5acc9dbec722e5924d8aa0de8f52aad7b4334b7c4e3d612bbae2c8828a2b4 +size 17 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_3.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_3.pb new file mode 100644 index 0000000000000000000000000000000000000000..94361f573049ac9fea523cf7b03ed3f96f2e2fbf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_3.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35090aaf7ac2959a77d6019fd4eca1fc40abaf6ff900c5589ff7cd7436b34fc5 +size 17 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_4.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_4.pb new file mode 100644 index 0000000000000000000000000000000000000000..53d40fb210433036d9a450baf4b1499e82564195 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_4.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eaea46455539f8efb9cca5e80eee9c46c736174dfa93a374db0fc234f1c11aa0 +size 17 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_5.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_5.pb new file mode 100644 index 0000000000000000000000000000000000000000..71ab2abf169cdba8dd021e3ac98ec4f05a2ef213 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_5.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fec400734c87eee422a5b7497bf37d115fde31524d3536f46d84ab96f7931d20 +size 17 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c871d73e639ae687a195892acc0ed6a650d8b0a0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d545578a79ab69e3b2badb2bb9ccd7bb7a641cca8a91195a764c44fe44499dc +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/output_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/output_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..f2fc82d4e3d7c2b99a7fcf10ae365da95eb9291b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/output_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acae9993d79ac77b549ecdd2ece6b9e4b378180feff14773bfaba24addf984cb +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/output_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/output_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..cd3725ed4bd0ba8a155a172ad81a4f961e99b099 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/output_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e7e6a4555b57797c308a5ba35377ace194e2f32568e1e8f0f017f1a8b43bd4d +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..205c2cc2dd5abea3e9557f0998dd1b0c65bdb0eb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4e5ae63e47e1f1034b74ad5fd92fe1f908dcdea805ad58a2a67993b7990270e +size 553 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d61b5b6ad90be73b7598b1872073503a017c0633 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b13087d18cf644a39bb101a66dd06a37ff95bac00e83bfbd5ff621b5168aa22 +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..815b7a1a21c44a7787c0caa8b319d07fdf87b418 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c44a418da7d4f82602975579ba1cdda8fdab51f0606a3ee096c9314ea0c68be9 +size 15 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..f6a713c716572c10cc6d3e57576acba905bbb76d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd209068326e49f8d5f3d94e2a5fdef0502f69874383a52b1f69f87b2902f5b2 +size 14 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_3.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_3.pb new file mode 100644 index 0000000000000000000000000000000000000000..3646541999dfac3811ce5c1e8f259daf8a269bf3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_3.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03ee3416024db9c3cd15bfd692c6cccb467e13f10857b445d033fdd24a9b6c2e +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_4.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_4.pb new file mode 100644 index 0000000000000000000000000000000000000000..831514ad9ab87825c9a6415cd84764819fc16f13 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_4.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b179919a8dec2bc0f85af8c06809d3a2de75c963317d0e06c684e23022ac770 +size 14 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_5.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_5.pb new file mode 100644 index 0000000000000000000000000000000000000000..4830c09b2e19ed128f9b46d183fb4f0cac7617e1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_5.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2d086bc46c15bc8ad8096e4457d699ca662af4f34932b5222e4e6640921e356 +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_6.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_6.pb new file mode 100644 index 0000000000000000000000000000000000000000..c2017bdf137ed3ed9e9a094710231c415bc8be49 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_6.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45cf7416b401454942f93ccef63f1d32ba125404d2747d36ec5690cc4b417940 +size 14 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_7.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_7.pb new file mode 100644 index 0000000000000000000000000000000000000000..58779051bf0ef4ab101e13c8bad2f19163a9a94f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_7.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34f7208fff45c8c209dcb9790a15e81f16d434f13b42ab88fa09df32ae2fd34d +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_8.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_8.pb new file mode 100644 index 0000000000000000000000000000000000000000..13df967bb825f4c395e259fae6b203bb1b14d42d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_8.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34fd4c71f3043750d42a04ed4eb2c5a97f90ea7a254bca3421d2f38bcaff62b0 +size 14 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_9.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_9.pb new file mode 100644 index 0000000000000000000000000000000000000000..0b11496e9b8a89470aa2440ca137641833edb8c2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_9.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2356b1a991645e8c7d7946ef5404905a9686b677de2fb50db7890e41220d598f +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0456ba126e71dae4884d39d26a723eac37256e63 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:827fa5cd7808d610c2619dbff5940c730b3e734225ae20a04f8b4f7dd65e327f +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..bd119159e630fd34d2b3abdaf0375a67996841b0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de9a02a1f0fb37283d2ac2184c6fa92547c4b44cfe1bb24d28cbd5ab87c021d7 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..add41a9a5a4eb01f1233d93fc02b563e200e5d2f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e112fa29eb8522bf59fc8a43988eca01fd66b6fb9a970a9089b5e2722ad82c3 +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_3.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_3.pb new file mode 100644 index 0000000000000000000000000000000000000000..e86faba276f063cc018a20636b3133ccfd1ab114 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_3.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:385a22647e251cfa95e18338b0ef490144884e60d32dc14f1ac0e8ffc8379d69 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_4.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_4.pb new file mode 100644 index 0000000000000000000000000000000000000000..c40859e0c0a170fe01df2d99f086184a918c0625 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_4.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f77a721995fa3af985d5ac5e1f7789d7c8c21bd96dd57fcd7bc6843a9dad70cb +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_5.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_5.pb new file mode 100644 index 0000000000000000000000000000000000000000..70d808fe027b3fbd3ace0dada31167b3d7e20c70 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/output_5.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b1938ac765912c174cc73ca62378b00e6f66e073c4f2e69a78a30ebc7a6bef8 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9b91d90e41e695db3c567c47d291f5dfa0554385 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93cf0438706cddabf683adc8b13c8a17c4b8b12d8bccb1b041268e1f4dff0a2d +size 129 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..58f5af5f3237bae3e4133f0888f56b1ed0292c51 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0180f5665325e343077736e4903fa43513ae714ceb131deee45b842370df289a +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..971c920888435745edb3e93065f33e50909e3483 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa59aeb90365dde2edc5a6ca7cd13a71b6478d8723f83ccd06bd4ce2bcfb192a +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7545b134ce9ced7eb808a6bd2faa5f499b51498f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ff4c027c9f60fb0ffb5d125c5eb78090e9eb4ac9b1c026691833bfcc487bb2f +size 256 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..7992652872365ac91305cfe70cae4f943ad668ef --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d562ef62b7e9541e2b9bf2b5ae6297bfeaa7d14a19c689388318b2dcda7a1550 +size 127 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..58f5af5f3237bae3e4133f0888f56b1ed0292c51 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0180f5665325e343077736e4903fa43513ae714ceb131deee45b842370df289a +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..3af3fa0a61a4b0e8f02553740690e4abb5091689 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d89fd14b53313d11325cc455b8f17a927a1465c9e3cf8d4a08fa0b4dca8cc1b +size 29 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..74e88be5b6d32437e99b91aed555ae7622cf6fcc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac6dfc048f596756c8c3e1bebd035a55e6b6fa21058339671ab4a4e09450ebfb +size 256 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8e82b085d56f2312b90ef85c53641d33877d794f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5a2fee709cfd7ad23b6e26559bbf6185a6a0d524b481c3d13cf4740cafc1e33 +size 135 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..8513ffda60c66991bfff225f61993c1c08b041b3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c18f496f3d870f414cec933acf06c16d18461ecb3770380ee9e153829e4bed6f +size 73 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..baebffc6a1c46d9dd817b38c34ae106262dd9c93 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0611a0a39375dcc74113788d26037a81608fe06485cc96547280084f98c8ba89 +size 73 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6764edcfe5922da5a5634cb7573e035db2c2f8c7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_add_uint8/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce905d1cfbb6a815c7694720a3c6bbc5dc65ad2dea38b240e968a9da6ffb3ac3 +size 75 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..08813218b178967bb42674c48468236f9b34679b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3753181a124e932f355ca1790490e6cefec90eae56089bcedf8c4087459eecdc +size 182 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..301c9ceed2255173ddf8e120069692cc16e6e112 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4d865924e9378c3f4176c6398ba20ea50490a2dafa95d132e60d88303b16286 +size 65 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6f72d80ee0c6de4d7a4d06d6af2b29b578739b9c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f487b16c83919ec686af1ebb4fe2fdf3e61803e2a6b4f0df1b3643e9a7ab7cb2 +size 44 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..34cf41e9bba1582cc832e9458e2663750905fd59 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b91650d6b5d278f4584dbfd956be012e3d3602acf6b7fa3dfd8407031ad09845 +size 499 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..34b522091acd21cf9490cb2b82c83bc35d09ebbb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80540a5b6c6375bd5e84d2d8070d7eaaa6774738de7990f19935350d65d92c1a +size 196 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..301c9ceed2255173ddf8e120069692cc16e6e112 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4d865924e9378c3f4176c6398ba20ea50490a2dafa95d132e60d88303b16286 +size 65 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6f72d80ee0c6de4d7a4d06d6af2b29b578739b9c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f487b16c83919ec686af1ebb4fe2fdf3e61803e2a6b4f0df1b3643e9a7ab7cb2 +size 44 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3aaa5cd620809e968557bbafb81f917920817afc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0937efda93d24a6fee7e7e413f88074c4d10c73743dde508be08c62f515f5310 +size 499 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..18627150e20649d3362463da76de9e3c75f17e70 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43e6a21f26fcaa48f11bc067b55aa0afe47c51515fdce36bbc4fadbc9ca7b517 +size 27753 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..301c9ceed2255173ddf8e120069692cc16e6e112 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4d865924e9378c3f4176c6398ba20ea50490a2dafa95d132e60d88303b16286 +size 65 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6f72d80ee0c6de4d7a4d06d6af2b29b578739b9c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f487b16c83919ec686af1ebb4fe2fdf3e61803e2a6b4f0df1b3643e9a7ab7cb2 +size 44 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3aaa5cd620809e968557bbafb81f917920817afc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0937efda93d24a6fee7e7e413f88074c4d10c73743dde508be08c62f515f5310 +size 499 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..2de65dad71210b61a7af6f293cadb7c5a2ae08f3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b952d7b8fe0645c2a46ea151303abe77ade3906d7a7b2c15b6884904cb20a313 +size 22975 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..301c9ceed2255173ddf8e120069692cc16e6e112 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4d865924e9378c3f4176c6398ba20ea50490a2dafa95d132e60d88303b16286 +size 65 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6f72d80ee0c6de4d7a4d06d6af2b29b578739b9c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f487b16c83919ec686af1ebb4fe2fdf3e61803e2a6b4f0df1b3643e9a7ab7cb2 +size 44 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..34cf41e9bba1582cc832e9458e2663750905fd59 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b91650d6b5d278f4584dbfd956be012e3d3602acf6b7fa3dfd8407031ad09845 +size 499 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0cf76c56270ada6bc9e56eb72b8e420bf7025652 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bef4c6d7ee705cefa17c64523089b525b3bae6ed9296033f84e7e0a5c8816c2e +size 186 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bb09e13fa8c27d4271697941a1c9c0bbad4f2d3d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18a2a1254591393360a8c17404137abb58d6c67813524bdf9aaf75c52c3cba31 +size 113 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6000e249c1c27664a68748ccbf026c32c89233c6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f287f38024fd6edefa31c26a2d16c6ebe4552c39ff16ede52cce5fbc842f9579 +size 52 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a67c7a5c7ec585084cb0fc8b5a1fceeba9607b2f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5454147c5fbc725c4cacee5ced9780869b3acdf30f2aa9c36b6a27e3019ca3b +size 2901 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..d1f0f64edea03b27f102dd94a877dac6c214379d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb33d44b2ed76d85f7ae21728ca7d5ff784afcf5a2b34bcdb4f14278f3d2c907 +size 200 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bb09e13fa8c27d4271697941a1c9c0bbad4f2d3d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18a2a1254591393360a8c17404137abb58d6c67813524bdf9aaf75c52c3cba31 +size 113 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6000e249c1c27664a68748ccbf026c32c89233c6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f287f38024fd6edefa31c26a2d16c6ebe4552c39ff16ede52cce5fbc842f9579 +size 52 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bd45857eb7b0fe652cea745f1c65424022e2aa0e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cacd57330a8a4014fcc20fa73508932e7d1ba90f1c3522157086fe448219d5bd +size 2901 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..2465a9454ce62f6a68250807386a63a8e502dd5b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79f59cf0e7c18003b39989977275417c505a9bacbd4b6dd23039031294d1efd6 +size 27757 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bb09e13fa8c27d4271697941a1c9c0bbad4f2d3d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18a2a1254591393360a8c17404137abb58d6c67813524bdf9aaf75c52c3cba31 +size 113 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6000e249c1c27664a68748ccbf026c32c89233c6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f287f38024fd6edefa31c26a2d16c6ebe4552c39ff16ede52cce5fbc842f9579 +size 52 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bd45857eb7b0fe652cea745f1c65424022e2aa0e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cacd57330a8a4014fcc20fa73508932e7d1ba90f1c3522157086fe448219d5bd +size 2901 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..a7fa82295c78f9d1191a689aa89cffab8eaf3d71 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a04208848424b3e01e6237b00211aed75285156a7afb25778f3e831fb799ea2 +size 22979 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bb09e13fa8c27d4271697941a1c9c0bbad4f2d3d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18a2a1254591393360a8c17404137abb58d6c67813524bdf9aaf75c52c3cba31 +size 113 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6000e249c1c27664a68748ccbf026c32c89233c6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f287f38024fd6edefa31c26a2d16c6ebe4552c39ff16ede52cce5fbc842f9579 +size 52 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a67c7a5c7ec585084cb0fc8b5a1fceeba9607b2f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5454147c5fbc725c4cacee5ced9780869b3acdf30f2aa9c36b6a27e3019ca3b +size 2901 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..257844fb6e4d2c326b296e67e574ba42179111d4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8aa80cb66010d1c32a9f8a799f8fcee435a8d974e2d0e7e87f1d9aa7adc6b57 +size 181 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5be9e71c259c9879477655e81e5f6caf0382d6e5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c8d34aa16af6242a5fda1ca7a2f1adc81ff76c2f2848fd8e7b277f30161914d +size 59 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..ee2417f95769245e33af26a8f65ae024e2b0d656 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9b57e2cfa142a6c4c66a61587bb6bb086279579daa585588aba93c765a5494b +size 25 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..be8dd3c069697316956e06ba78565da6f9e2a4aa --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:713ed58c32c02de0b2e9a2a0f5583b2bd6f16390a7bc51abab77b641189f00d9 +size 35 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..bc6d1527aa4765d4ccc07781230587a782ef89a1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:306233d2bb7af438846aa85214ace58b5e65e15845cfdf50ff2b069f29c682ff +size 164 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c0d75ec3ce0d9e8cb40e8a38407cf70e350a5cc9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20788723269b9582f339e281df6fa80f0da06611b90685eba3a86ba68b1d7a0b +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..82ff8eb3ce46ac7926f45174289ff40831c7a2fd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5823f10bac5af68b20e0bc4dd32f743d8712bae769833c63731f601caab02314 +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ab4694c7a6f371becebde1a61055714456af0e0c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:068a53fc858ccd5f31186f843a8c4a3beb024071b12285af0f0c5956cafcbb09 +size 221 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e99f0a5224e76c5e89617f8750157765f76b35b3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10845137c90b1da0d41afc32971d464dc95f367431665a8b6df89f8cc5eada72 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7973f908b8bb5524c308f46034b5ceea4f71242b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a21fd0d2930fd10bf689f21332d15203e122c31165ece20a6252fbe6ccaed522 +size 49 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..da5e4e1ba8c3e421ef492c2d13428f8da9aabbe1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b443af123faaf82e5b0cfa7e4d14dccc679f58b38f312688f4eb2d408a157ae8 +size 210 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e99f0a5224e76c5e89617f8750157765f76b35b3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10845137c90b1da0d41afc32971d464dc95f367431665a8b6df89f8cc5eada72 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7603cc1cd32cfe9d1ea9c0f73e3f0890cbf6a9a7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87d63ddcaec19922f25a5680f7b650abbc2fe1bc0bf6c197609c1538c726ee00 +size 49 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..7191f897265ac9a0e8a3f4b8e9efcf28643841c9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd7882fd3f02e3fb582557eeebf718609df2323ae640cb44c1c2896db7b22175 +size 288 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e99f0a5224e76c5e89617f8750157765f76b35b3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10845137c90b1da0d41afc32971d464dc95f367431665a8b6df89f8cc5eada72 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a676ffd27ad524607aa3412aaa0628a5a2d4d7f0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c726942a470f21886be3834bacddbf861ddb18a9fc317ac95b2a51b2763bc25f +size 19 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_value_only_mapping/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_value_only_mapping/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..111cd171cc24c16ab0d4153715fefc9a455c4970 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_value_only_mapping/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d83d408218e582ccac358298368fb3f7fd97fc06acabb3b3cae9aa63e9c51b4a +size 281 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_value_only_mapping/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_value_only_mapping/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e99f0a5224e76c5e89617f8750157765f76b35b3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_value_only_mapping/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10845137c90b1da0d41afc32971d464dc95f367431665a8b6df89f8cc5eada72 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_value_only_mapping/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_value_only_mapping/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a676ffd27ad524607aa3412aaa0628a5a2d4d7f0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_value_only_mapping/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c726942a470f21886be3834bacddbf861ddb18a9fc317ac95b2a51b2763bc25f +size 19 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0cc8b30d6e8f1f26a7dc07ca713b99c6dd67f01e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6e35d89c68474042317f73a30fa4377a5605018edb8c033f9ec672d52dbc5d2 +size 656 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a7cc9ea97936194351bb71f1dac9cf126b2d2b4a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:140e698c04713db8a7fe3bc98c8769824fa134b886ab6e2469b3e1fab1ecfd18 +size 35 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e2ff03488283c7445e69da1224b09e1d24aac5de --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c6d5beab531d61da4a4e04bc670c5fcfda84a7d5f32032533a4577a5238d4fe +size 107 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e526bf5d4bb866933d3f8bbcccd466a2edb29013 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97b850b10419c64551d9db79fdea98ca8e2949a0fb7f8996dc2aa1ada76f8534 +size 598 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ef936b6fa246b5d819805e95e52606f8fcb77d89 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ed014706c8700b8b0b29cd0ad317c18879bd697e58f7d4f6730f6764c585370 +size 59 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9693128e2d60ea06dd8cfcc62637a02ffb946dd8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dc8998cc56877d2f743778b93a8b84ed97e0a070d67146d2d4d234ca9508c46 +size 59 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..7b7de9ced0b458d799699b53031855d36abd33f7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ef8594efa26783bed9d94632010790bcc464d673e378c286950f5a6393fc9f5 +size 119 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d44c54d97134e1f8a6e35734dacce96c7f393b91 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e3369fe07f91df4e85118fe7ce57e4cbff5d1bedc4eaa0e8fc45f3d334f60b4 +size 23 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6d26947c1a76452030b54f44a2557a4e13a6c283 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:828666c5e8221193e34535d271489053d7e9c4f9dd17812a019cbca1e4e86bc9 +size 23 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4ff4772731fcd64f790af6d7d82b991c8d9f4537 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19c8e670cd77c1d5c046f80512e6413d6f7696ab28dbee6c40c989888668bd62 +size 25 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..2081e68050f7c4497919a37cfaa4d1aa7ff6231b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ecb22da8fa94ed2ba4a2ecd2840446b19fd3f95389667549f45c5f98009be76 +size 131 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..706737f3a71c33491b50789dfce99fccf1165e6e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ebe24e5da6936a11a5a616c3145f0fcf6e9ed0a04a6a917bf806876ab7bccc4 +size 73 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..545449e7c1d57c00886ac42c923b83e2bc182f51 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf94a94f31064228e0c69b7719076a15da43f8b0aa94382b3da71eef910be694 +size 73 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f5bc0251f6d1e62133c6f9a9a8a9c1c168fcca5b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8290c29ebb7039165f7df70dce27416edc98d9a4d156270096fb2ba958163bf +size 75 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..796b256da73a7ea57b96ba38db5ff2ffa68109fc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c9d9ae60379d225562bd427491fe4083d7a344e60a9e8a682c45e2df7401201 +size 143 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..dcfbf19893fccf53099ed6f9934bba5d898cef6a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0c2b153980d431ea6bfcf593a4995d1009d12ef5ff04844f8c66ea1635b71e9 +size 376 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..93222ffb4f73c7c802ba73fdbeda35f38c2d9b9a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f49694bd75517ff3584d1c185ad3d8b1b4a82890f03973d9b91060158ee960f7 +size 376 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..496d93cb79632812d51b6efb710e0364c03f3a2f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef3e8372a5383d80f83411b48418c1a10b67f40014d475e8190a7f3fa1d7e596 +size 378 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..a3efc5ffedbad4f30071d91c9bebef022298bce0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57e2fa30b3395022d8792906ea6f1e30ecba3b10d83f2dd7bdc1a07ac6edefa1 +size 131 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..34406f2a343751203a7e067fd1e56f4ff5cf9e36 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c05e7686f2c45c27ebc9337416f8163ca5d2c773cf96989a0bf68067921708c7 +size 73 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..1e911c08718180026da4e1647824cadc5e6703af --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1c3f490727de556e61acc895b7ae5a333cedfc610a1c49cb5ce5bb8b6acd192 +size 14 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7eb42d8bfbf0fef3253d1a5887eedcdc5e631ae5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db36f9707b9d18748e58e15c8e030142d4d125ee3c85b95dd76f1a43685b0b2f +size 75 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..4bc907bdebe2e7f685c9182095156d842c284711 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae43bc81927a51228562951680a6433952d52ab19a5c8b8753ef217955f0c07a +size 135 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0a939e70df3e648db2a294df86ce1925bfe99b0e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d878b9819cc2ca4dd25ad07c761b52e6d20c8a92129cf2462354888f1d3c15ed +size 73 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..e97ad7697b2d768650e2de7a143deebda13bae88 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2759de2ec3aa6474ef19d6a0045f3fdceeac424d5bcae508300051d302710120 +size 31 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..eb1abc57fbcb4039254d38026046abd96e2821d6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99d1bedf600d8d1891aee6c6584301ba06809092ad842c3be21a9c9457948347 +size 75 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e3d32e63dc9b43e33be8b7fc19da14dd1a23f751 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d0ef51216db411bc6bdd52b70258cb40f200ef3f2dc2037c320a6444b50bbee +size 143 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a115114b3764248d5b2c721d3fa55c455ed7c873 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c6f4fa4d2415beae149dc180e99b008ca2d59cf1239ed9ed9c2c0a1768d43ca +size 376 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..a3b6489e463002b0ca7e78fd930b2c2c65c17b07 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f824895f554ce28c2f2b935e76a643ec0d66b7dd8251966bad8ccbe44fed452 +size 41 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c7a5732105e82901ca959137f3a562a67199955b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0570d9133131df4299191957f90440bbf4dfadfb428581a2e231c6bfae12f4d2 +size 378 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8fa5d65e6a22766fb000008978c49d3ab004237e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9695b8bcb560c7d17b1482aba90d8ab8701f812de1c83acbec73638673fb1b8d +size 147 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9180a012ee5a56d8a9c0f0e350b489bafe1ad428 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d606ae9c50b0fc73b46407ffb00a7d6df730551256fda4c923092ffb659858cc +size 376 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..ceb57d97d131d52c4a239c92fc974b48156e1507 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dba80b60d23d790112a8acd6cf1d73ac4f180c308d22f7c013fd26fc5a1e4f71 +size 133 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..8f9331042c399009948b63ee25637adf55c86a0a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b89dcaf3ca790713e70faa595e6aaf14bad80e4a652fe2ba04084b1f23cbbba +size 378 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..c6a5ae0b902ca8933fd8a4120587ff16b0e586fb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4df12fd01f50fb5b082a5297b8a580c1556b7498d65242ba7eb153bce1f37e93 +size 151 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..506465e1a5780c8931965183fe21613a51c5977e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7faddf1a13dc879793900bcc5c2ac94c2d053887612a76927c9f0ec7d867a25d +size 39 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..bd1745c9aa2793f3df7be05dc2c2b2266f536d33 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:882183014e87d7e587bea28a4420f8e1459e96e2ac9c50963297af6ec2790125 +size 105 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7955b2dbe531eac4f3d5fc5980cdaf83812f67d2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v4d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5804b5cbcf140bb9b446a106cf14b3c383415c452df3fd8997290d540cf25553 +size 378 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0786fc4f162832369966453adbebc233afeedc89 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e603ac37e5b8196ae08b1ef9637d4afe08687921eb55b90b679536ee04dbcaf5 +size 149 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3660190fdb924678baf00602cfc5f2eda8d8df68 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb2e68eb1b9128cd443cf8306d0e36a2efc14507305b70759830f5b1296ea62 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e2b8dc44d769a59eede2ed29b098b0eb7cc642d2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:368a335c14dfb860461126aa4c0560bc1698ff6ff52a711768204aa308596207 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e4d56916d045f632575334ec8dc66d40a6559de6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb494d94fd548140d1603387916b81d44fcf75f4fc2b644b2a2f6ad6f7c9660a +size 194 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e2b8dc44d769a59eede2ed29b098b0eb7cc642d2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_example_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:368a335c14dfb860461126aa4c0560bc1698ff6ff52a711768204aa308596207 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..2c90d3ccde168e6cbe9303be6aea56eb968084ef --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa2a106adb032b3d9c74314015fe24565b7fad3561104493d88a28cff668fcb7 +size 157 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..52a6484ec995b58c020e2be22fc1996e8fd68f85 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e119a1e3b3c495f0eb9f18ccb523bb34f913a84b0bca2a6112db08f8e116955 +size 114 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..fdfb5c7880f859c004e99b16c2461a4a8f747169 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f03266416470e0d9a16e064d15e99f6434b627582518daef6941407a38f532d9 +size 201 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..52a6484ec995b58c020e2be22fc1996e8fd68f85 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e119a1e3b3c495f0eb9f18ccb523bb34f913a84b0bca2a6112db08f8e116955 +size 114 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..6a6a8cd90487a877b933619490eb087c4c114078 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6bf4cc19bb568543b4eebbe00b54aa8758998f2c93c0944aabf7c6bf3e47058 +size 159 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3660190fdb924678baf00602cfc5f2eda8d8df68 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb2e68eb1b9128cd443cf8306d0e36a2efc14507305b70759830f5b1296ea62 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c79ff510c4a7f3e3b3b736e9fc9eb297f63a39d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e05d3e3d1557470cc4296d74740066f50ce42cdcd5691a0c9182e2ec2bd2a670 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..18607d72b989e820bfa4b8a7e1874f5af48e4a69 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37987711b1000dd60a939702d5794c1ecc69a163d70d809bb084db01dc349bcc +size 203 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..52936052aab7fdc8016e952ceb98893e430f949d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca30782ce442025b7510b41f61ea6d7593525a8cc49d790713f630d05385e0b9 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..afd4c1c8caa46b3bfc0ce9aa7048cac491a38ff0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ad2d309385710d7fa583dcb5f674e699d8501d75c2818088204f8400d7bf3c3 +size 166 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6198d9df26a052dc7272c6155dbc7f0685229f94 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c712948f59cf972ea8677d436f56654fbee7d9630e73ddcf84696cfebe3a3722 +size 82 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..257bf7a169f022a2f754c7b93e053eb127dba3cf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c64f168361054c05707d7cbf0c02ca85d63e3021eb7505bef5f1231c3353a02d +size 210 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6198d9df26a052dc7272c6155dbc7f0685229f94 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c712948f59cf972ea8677d436f56654fbee7d9630e73ddcf84696cfebe3a3722 +size 82 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..3d7fde82008ce337e30a4e88e021524b8f8fc719 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e01bd3b80e010db5ab252fa67d51edd9b16a9e12e059f9fc1d126f724d3ae1ca +size 182 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3660190fdb924678baf00602cfc5f2eda8d8df68 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb2e68eb1b9128cd443cf8306d0e36a2efc14507305b70759830f5b1296ea62 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c79ff510c4a7f3e3b3b736e9fc9eb297f63a39d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e05d3e3d1557470cc4296d74740066f50ce42cdcd5691a0c9182e2ec2bd2a670 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..519ba8bea35da0277adbeb597930e7dcff4ddec3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1a93e6012ff1e341598a72ac247a57793fb9923051d5563935907c6ddb9964c +size 226 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..52936052aab7fdc8016e952ceb98893e430f949d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca30782ce442025b7510b41f61ea6d7593525a8cc49d790713f630d05385e0b9 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..92253784937bed3196ca2bd492e86dbf8fa53665 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cdafa85dff4e70275d1089630c2dc28e443d8793ebd4a89817dc9d156afb7f4 +size 189 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..1039b1fbdf2cad0285ba2946646897c925909de0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aebd74b532e085c2e4322df494c197fb2c5493f6018baaca2fc9b75678edb222 +size 66 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..209a003b22a5711d439fdce2c959e6adb87fb2d9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0932cc042f96116ced932b956f334b2464f6409fa907ea5ebd7d92b07a332c46 +size 233 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..1039b1fbdf2cad0285ba2946646897c925909de0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aebd74b532e085c2e4322df494c197fb2c5493f6018baaca2fc9b75678edb222 +size 66 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..79ed3b1be2b5742b6692c213a641029b71e3d6cb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac7b8edce4c3e5e453c86a624eb16c186cb30c2f220f0558b8b0d16cc01f4695 +size 158 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3660190fdb924678baf00602cfc5f2eda8d8df68 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb2e68eb1b9128cd443cf8306d0e36a2efc14507305b70759830f5b1296ea62 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2db76e3c5577ed696fae8150a0ada9f67845974d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:539ef4826803f8528dad7f8be46e8738858e4ad4c4f3e169722cbe07ae3a9145 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e87ac658cc8f52479dd5c5c2d096bbf26674c25e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b48e1466d7d1f2cd5359296280a4ca604744c9e9a26ec7977b60717d1ed7f0e +size 202 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6bd3e0b54b777e675793297bb8e1f9da1b01ddfb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abfa73be6ec1f45d3a0066830a6fb2a5227b88e3b8934f12a8a667216c67e585 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..a654e26812e09d5f9bf212769efd62907a590106 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68cf1d54c3c91350c8dd4a7e25590726f54f463ffb516eee353c65cbd8fe6951 +size 165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ca481bde5dc347715999d1e8118886c9557dfdb4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63883e259eefb73b78f38fd879ac45d2c1838f42140b73b0f61efedc03043b03 +size 80 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..797edfc74634bdff04f1e4ac677528df91505faf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea687052cf2cecfcd3325e199b6d94342c00423461305f776bbbbe061a945685 +size 209 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ca481bde5dc347715999d1e8118886c9557dfdb4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63883e259eefb73b78f38fd879ac45d2c1838f42140b73b0f61efedc03043b03 +size 80 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..b766a034b0b40b4774e46601abf326047968596f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89e6cb3552f57f1c61fa3dc31f96e804918536df2bd98008ffbfeb3f72c00eef +size 149 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3660190fdb924678baf00602cfc5f2eda8d8df68 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb2e68eb1b9128cd443cf8306d0e36a2efc14507305b70759830f5b1296ea62 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0ad83808862368012c131551d40dd4806109323b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:063fe4376f747f74549640017d6521e68e4c42ec6733e8d7b4173ea273ae1139 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..a0e6fc1ede9325b59b6b673171fee1f49926e2f8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a87608561590ffc09bb0c61ec98516ca0e7044f2b6e4741fb87b33fcd8d2afd1 +size 194 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0ad83808862368012c131551d40dd4806109323b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_example_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:063fe4376f747f74549640017d6521e68e4c42ec6733e8d7b4173ea273ae1139 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..6b6aa2c993c3c5aecb2167f8512ac6eb6143e75d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feca5281a2ef54821a24538d34c18a1a2e654780c710071df990346d4768d8f8 +size 157 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e015eb9e4e35a3bcb672e3a280bde1be6cf84d72 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7dc7e8386079a9f7359b51d0b7ce6467ac2fd81998825ce1fa6586adf8e2a8b +size 114 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0f26e2ac0df03b4700e5350217e5cf36df94f59b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38d5ccc166279aceb0f5b5e6619420b67c9812d9cc6c86bc660e6641bdeb5a5d +size 201 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e015eb9e4e35a3bcb672e3a280bde1be6cf84d72 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7dc7e8386079a9f7359b51d0b7ce6467ac2fd81998825ce1fa6586adf8e2a8b +size 114 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..6d2f5accbf3a997dbb7ca86881493d76e21d81cb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c6df22e67ce2eb967fa0d1402ad53e780919731047081de752cb6b2ed4dbef3 +size 159 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3660190fdb924678baf00602cfc5f2eda8d8df68 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb2e68eb1b9128cd443cf8306d0e36a2efc14507305b70759830f5b1296ea62 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ac5bab847db10f055a3ecc4ceba5d56f14065d82 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:434c81deb13de3351c760a1989eba7c344d9c876082afecf1affbfcc83460f03 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ecd1496991b0928a947b8f7650380eee7cc7a77d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd5a56963ae97bf334cf172ba4c6204c8c42353e3f5b2fdc5c7b37c4fe448970 +size 203 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ac5bab847db10f055a3ecc4ceba5d56f14065d82 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_example_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:434c81deb13de3351c760a1989eba7c344d9c876082afecf1affbfcc83460f03 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e80492c4f773cb620b56692dcfbac0eb3422cc49 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bc4e358192055174e4dd815578ee7b862a5aa393294ce07ffb49555adb51b18 +size 166 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ae9b6488306b34357e6f20ef7c4c6218ee0de96d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fa0f41df5f2517eb50f46ba99aec8bb8c930c847f022c40e0dc48d9e434c73a +size 82 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..d0a7e7733e72edb3a214754e3369f7746ad4a1d3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c92b46191aeb77c20b7d65a749549087e9c1f63bf08f46e56474491dba50755a +size 210 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ae9b6488306b34357e6f20ef7c4c6218ee0de96d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fa0f41df5f2517eb50f46ba99aec8bb8c930c847f022c40e0dc48d9e434c73a +size 82 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8ef96737a01ffc4fc424a8019d691ffb6ffe61e1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41d730e8fe1285c8ddbb2c966b679d086061b82dbb3cdfd40210d00a60e4a9c9 +size 182 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3660190fdb924678baf00602cfc5f2eda8d8df68 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb2e68eb1b9128cd443cf8306d0e36a2efc14507305b70759830f5b1296ea62 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ac5bab847db10f055a3ecc4ceba5d56f14065d82 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:434c81deb13de3351c760a1989eba7c344d9c876082afecf1affbfcc83460f03 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..4d855e3601a3ae885f0961e15ea9ff5c23df416e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9eef17eef1618fdef2120b9187f980e4188c6f289432e4b0446474ed3925a9e4 +size 226 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ac5bab847db10f055a3ecc4ceba5d56f14065d82 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:434c81deb13de3351c760a1989eba7c344d9c876082afecf1affbfcc83460f03 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..fb8d56134568e056dae6477b2975883b278fe34f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e89ecf803fe4dc58b6c147733fb768045387c44778c38127edbf49219eeb36e4 +size 189 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..15f20185b7dc0c23d7a67504477654ab61544baa --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:422694831a089895731aab0a5e9791a23e58221b384d299936e8c4da34693581 +size 66 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ac2fced1744d9d807daa275d262b4d6461023804 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2eaa3358b9738554a34a6924472e6a1386df0c7af6ce3784873d9cd1e2d0992b +size 233 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..15f20185b7dc0c23d7a67504477654ab61544baa --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_random_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:422694831a089895731aab0a5e9791a23e58221b384d299936e8c4da34693581 +size 66 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ff40cde2e5ee7c70714e9203bdc5e11bf2caa8ab --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c64d6d4278717a72eaaa23566ac4fda8fbd67f4decc481e65915a92152fccf24 +size 158 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3660190fdb924678baf00602cfc5f2eda8d8df68 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb2e68eb1b9128cd443cf8306d0e36a2efc14507305b70759830f5b1296ea62 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aeba3dd7342bc9abb733e0d14e1cd172b989e453 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2b4e8b6d9535c99bbd9502eb88e19e169b931db334676b77ef5771c07e573ae +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..5d22e792b7d9929eb6ddb21f2619d9af13efd5bb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33d9aafcd92cf22fb489cfec521bfa9c9b78c132f485d7ca2d841df023ad889b +size 202 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..aeba3dd7342bc9abb733e0d14e1cd172b989e453 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2b4e8b6d9535c99bbd9502eb88e19e169b931db334676b77ef5771c07e573ae +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0761a1f4845e1550155eb6723d9d91c9ecffe1fc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04fa3bb740fe0dd14235f8320d8a7a0842e3143d58ec6382d7f6f98e7a6efc64 +size 165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..436d73a79959451fdcc5e6fb9c7dc561c303b178 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d64e6d5f85f7e8ed85a3dfb8f2a16c463dde007daa2d252d4b66a5321df684a3 +size 80 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..57015e67d90c60c446a5f104da3b677ae36d11ed --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b6e265a652e51d4688475ec937d849c1fdf904ddb20389279eb65ac7839cac6 +size 209 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..436d73a79959451fdcc5e6fb9c7dc561c303b178 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d64e6d5f85f7e8ed85a3dfb8f2a16c463dde007daa2d252d4b66a5321df684a3 +size 80 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..f7233d9e646291fda4ac84eebebb2ae279ed2728 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73e8897f49120b013356075d525cd12aaea01402e590f0c3d940449c2756dfeb +size 99 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..691aa143f8af9be7b79e4cf0a81853feb1fa8d0c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05c023929a1b32b4bdbd7882ff5e53b9b53762ee2929c66be76d0f0ba7ca393d +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..72d28f737bc43289a4ee5c507549b428638b951f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbdb3f11aa4e2dd7e90e6eff87b8fa90c21cd5773bdc1ca2f92910a58c24e62b +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..d76fa9fcb14ceea698885044650183c5370cf2f5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2c0a55ed1914907af27a107c7094d276d9f9858afbd7ce7f4bdfdaed2a9d168 +size 91 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..88128780e03bf763149dd948c9cf0d40a0bb2071 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:703de0c3be505a78596235789682bc4e6f0e8523f34fb731207b4c076701abdd +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..721ef45b24499df9725ea659c338543e41d889b4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:165943d2cf4746f85aacd3c1a17504b3368893dd4c47bf53cf4b353b0cd7a22f +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..281a7858a90f9075ea2e2d582e3cf7246f57d3a2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:385cc84c2ecd6105d8b7adb5d46308f83cce493743ab4625bfae8cda97807da0 +size 101 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..58f5af5f3237bae3e4133f0888f56b1ed0292c51 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0180f5665325e343077736e4903fa43513ae714ceb131deee45b842370df289a +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ffc66bca72cf4ddb4bc7003e9b67f5facea42acd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:392c9e40561b5ea9d7bbd147c76c2a106a4eba069e095392a014fe10b1964b85 +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9bc74dc6c6383528106160c7fd7c95176d88474d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15c564a6cc87d4d826d73c270e4d21630975c78b4c1ccd1cb414fff40c4c7b66 +size 93 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..65a6ccb2bdcb2e99bd08bd9a43a18901e7cd468f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:458edc0b73c6d8d5359ea58ee82eeb6c58933075a0d612be37c2159e3a7d5085 +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e54897184bb7b348bb72d44576fa6b73ef20e6a9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f05cfad2b4770c8c438309b0ec431a934f03f72b7433a05bf69b176c7d69f24c +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..92ae4402517fed80c7c607d3408eef375b6b7ae2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3fe977596c46bf9c085e52815f9fb6524edb2f9128e9a82362fdef5e35dfae1 +size 99 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..58f5af5f3237bae3e4133f0888f56b1ed0292c51 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0180f5665325e343077736e4903fa43513ae714ceb131deee45b842370df289a +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..678db6dad7226ded9a3c83a088f58f9f688c8cd7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9070fe5f050813557f90c3e65b6bc7906d63d60856c31d24d41d80c50da9f56f +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9bab2c2f623d42d2989a0e9e1b4af994a66f2f1c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acf03351183366186c51883e20e2ec293808015fd481a414afb480bc64bd1c38 +size 91 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..65a6ccb2bdcb2e99bd08bd9a43a18901e7cd468f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:458edc0b73c6d8d5359ea58ee82eeb6c58933075a0d612be37c2159e3a7d5085 +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bdb4a2acaf13bc17b9e68f190f3e68269f534614 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9165a5f600de0b1936974f44d4b13e7f0f553112186313b65c380fb99cf515ff +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..71ce566d7a67185a4ddf8a70cc6daff8f3f83dc5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97ed9b92ef9ea519c739c028e4a58ccbcc9d03db46947c44cfcfc176cd4b2163 +size 101 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..691aa143f8af9be7b79e4cf0a81853feb1fa8d0c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05c023929a1b32b4bdbd7882ff5e53b9b53762ee2929c66be76d0f0ba7ca393d +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..96f20c7c04f348feba0e5db6e363c9933ece7e07 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe7d7d7f4719b5ee6f6109ca9822fb8b740b3d98ac40b7bdec891c4376759b0 +size 254 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ed2f6e5c5e05e5b316efa4d5653077259b1ddcc0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c3aca74dbfaf05165f3b7ed7be081c48826a0d8fd0d38753c1009205ef66afd +size 93 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..88128780e03bf763149dd948c9cf0d40a0bb2071 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:703de0c3be505a78596235789682bc4e6f0e8523f34fb731207b4c076701abdd +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..16a634c716008b9b941d19fa4d2a783d4e54438c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9678e93f893f8102285a3fa9f4f3823cb42c35eee64f753d02b246ef789906ba +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..bb342a5c19683bb8eb8e7b0b8996e99558279f4c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_1d_default/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c6c555ee51925a0d86899411c9ded36361800a09ab3f17bcc1022a5655ec682 +size 145 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_1d_default/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_1d_default/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b4b92befc6efecc2126134e2c5e78ffc284acd84 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_1d_default/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65add2b5a8412fb05b8501a6e6d0486e61cbe3498f54a31ea099acd350c26e45 +size 398 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_1d_default/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_1d_default/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a2ecb302bd6d074b5b33ade6f5474a05420f5ccb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_1d_default/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d98876dde2d0105f8604a0a7e9bf81407c353c59f8f5f0bbaee60bd1e6cc90b +size 386 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..b05c7f7f58763b35294b2d482a19c7964922260a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_ceil/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:170ff4973849e399dff5c51d02ca17c4704e7be0b6f45ece4d03b07c90309a95 +size 189 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_ceil/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_ceil/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0a2419bf3623ae99f66a6d7f9db69a8adade76f5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_ceil/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c169d1e670a61db28d5ae729736ab019603f6f2be267100de045a99815b15a30 +size 79 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_ceil/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_ceil/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0bf2649b5a7511aba0f6c2f6b6d7bd04c5c2d992 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_ceil/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27f36c1e7f6212c989007bafd87ce5416cfb57deade7412c1a28dd7d6aea99e0 +size 31 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..de14a55ba3515c33128d80b20495c4ba2e52f13a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_default/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19344327687a40bb767971e752ff4f2a98052749a535dec27891d2a393113c65 +size 156 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_default/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_default/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2e75ff00a2c0ddcabc9899d4d6fb9ea3fb017c01 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_default/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:527cdefe87f628045a0c16e8f59dd256f94c51f68fd405127a47dd2b454d5a86 +size 12304 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_default/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_default/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..71f9e769b109f4e491b8b935a0d6224a5640b17b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_default/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5aa0effc557770c2444f6618ad3c03d5035a152d47597de5ff080d5aee5fbfc6 +size 11548 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..cc58786dc5f95e339d1643999916ae3a026a2765 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_dilations/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd1c3e047e6f297ca7d0cd46d75e050b0eb2c045a2bacd5ae2b76882bb7a49e0 +size 214 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0a2419bf3623ae99f66a6d7f9db69a8adade76f5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c169d1e670a61db28d5ae729736ab019603f6f2be267100de045a99815b15a30 +size 79 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b8d8d16a2cf3e455b6d2c820495d6836ac5b6474 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_dilations/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b4c6fef36a0981f80d2558a3b33e9eb2e30e6ff2204daded24f15bc06f6ddeb +size 31 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..745a430d8bf5c57addb9a0a133b7bb37fef1cff7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2b5c3a6e4968e91973f847ff8ff43b2d61c9ae4df6bf4e8d105db92606c64fd +size 172 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a78ecf0897d81e6101e921a7478eabec08181b41 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c51a939bf24fbf792a31df0723f123a31fb59c6ea0d8ac06aa89c799fd9cdd2 +size 9424 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6766f83bc122091851c9fde6d59d49c398a35065 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc5be289731859656069bbac3041af2cc1326d94aaec2dbdeada9a3502fa91ff +size 10816 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..6b60911228cce4bd5c2e605ba48f58bce21cc16e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dc6abf8fc6b3778103b68215e651351de474f58eaef51b7c1dbaf90763ba4c2 +size 216 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a78ecf0897d81e6101e921a7478eabec08181b41 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c51a939bf24fbf792a31df0723f123a31fb59c6ea0d8ac06aa89c799fd9cdd2 +size 9424 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9f027bbc8e4c6aa69c9b82f064477ecfd1fc11c3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_pads_count_include_pad/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e54e3a456e5704a515b37518b1bb8f3ceab1c9597e06ce5ca9d966837cc1563b +size 10816 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..303d5e7ab5e063612d9489090690ef3ec3805a07 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:faf39e37a2c7300a9ca330b2fe28d80a9513aa0f460b8d93291c558cbd41017b +size 184 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3322e271fe73c0429207e3d702c54c8dfd0e87c8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39394d9a9aed24e52e2d88838371c74b00d2319f48d3b97633915cc5d40bcacc +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4de204ae053a5d61e3fddba261dadeefe785d80 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11cc68d90c4a424bb53184ebd7a84af297abbec5393f22ff7c636444e9279cff +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..cf7a22bebac1c7be36827a52ec5db0e08e8aa28b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1c253d7131a304b5ee832486438be60d9fc1f782fbd4268a91eb2b681f6adda +size 228 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3322e271fe73c0429207e3d702c54c8dfd0e87c8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39394d9a9aed24e52e2d88838371c74b00d2319f48d3b97633915cc5d40bcacc +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e7e57092062221894ffe862c179e0d5a5dd06e45 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_pads_count_include_pad/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db136d0ad0abb30777ef618345378036c4b0ae312f084043e7633823bee41bc5 +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ed4c9685fbfa82ced4168e9e93267796ff34472c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37341957bb94869e20c76b34b263035881ccb7bb3b0617e418925969aa2c57dc +size 216 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3322e271fe73c0429207e3d702c54c8dfd0e87c8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39394d9a9aed24e52e2d88838371c74b00d2319f48d3b97633915cc5d40bcacc +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..959383e62b46327f9fdd2ab1232d8917444a8b82 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_same_upper/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d73e143036a1dd2f52d9c79283de81851b38cb9cda4f4914541332df0de7e53 +size 51 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..bc4b944f42434ec6db0bede1cf68a8b61c8c0158 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:657c937a9218d2ab51eec2468c5e4a46ea50f96b311b50a26ed0ec197040e10e +size 186 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3322e271fe73c0429207e3d702c54c8dfd0e87c8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39394d9a9aed24e52e2d88838371c74b00d2319f48d3b97633915cc5d40bcacc +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2f4b34044d25b97357a6f4e45c019dcf7bbf8b70 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_precomputed_strides/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea4bab04bda0ad002df59287ae72647d64b764ff93a1e14251fe14e19715709f +size 31 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_lower/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_lower/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..300020a6c4035f23f9f9ef1d3498f8155a381732 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_lower/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:148390ac213145ceff46276feca4190674523aa06ee6e20b47cc5187419bbc7b +size 186 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_lower/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_lower/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2e75ff00a2c0ddcabc9899d4d6fb9ea3fb017c01 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_lower/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:527cdefe87f628045a0c16e8f59dd256f94c51f68fd405127a47dd2b454d5a86 +size 12304 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_lower/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_lower/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0294344d3ce981e267745f94288f1161571baa22 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_lower/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb96598b4b104da048d13e824354afd627f23c07072b3a582c97d56c6ca4feac +size 12304 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_upper/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_upper/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..5032ed93c24e0fddb8718a7995cf614894a58caa --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_upper/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce51b7b9fc7f7f79ed1d2a5883701b8f0c336d2140f27aa4e495db3291df53bb +size 186 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_upper/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_upper/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2e75ff00a2c0ddcabc9899d4d6fb9ea3fb017c01 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_upper/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:527cdefe87f628045a0c16e8f59dd256f94c51f68fd405127a47dd2b454d5a86 +size 12304 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_upper/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_upper/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..46b256f069a52008d1054c98a4ee892aca676be6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_same_upper/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1dc83df0dfbe5f328794af82f98b7f7a313c402859c611cd04e053e114e8703 +size 12304 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_strides/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_strides/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..48f038a4e0b397a3edfe11649505bf230ba40d9d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_strides/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ad184dbe585a8d7a8909b56f9c282430936a833dc601cb341619a99ff8b9408 +size 174 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_strides/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_strides/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2e75ff00a2c0ddcabc9899d4d6fb9ea3fb017c01 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_strides/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:527cdefe87f628045a0c16e8f59dd256f94c51f68fd405127a47dd2b454d5a86 +size 12304 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_strides/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_strides/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..63f061f5551d8092ebc5f89e14b35d0eacff6a3f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_2d_strides/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:211fd0ee562408c494aafed1be3b61f750ba7763b46a57d74318e83dd0ee3210 +size 1216 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..7e395d1fd817dab3c97345381a34ac8e194c763f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_default/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd79c5b1ef52b7ce964bc2a58d4e3e2de19f4aaafd1fdabff99fb618f6a27460 +size 166 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_default/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_default/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ee2aa39211d7e6f572c99efbd1891315f0f611ba --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_default/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ff73d3f9e82980ed6ccf8f2d539645ab2a0626d1891e5c463188f67a555d706 +size 393235 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_default/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_default/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..213e431c2b81a8302a9f8c0d5705a9a52018dc70 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_default/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d69fcd541ace04bbbb90fac02c80d2e8cb9338968e4b67ca8b6280bcef20ba8 +size 357511 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..3c6720b958d7368cb9d7586884c9f95e0ba3a05b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0aa9f93b73840f6edd81cdb774d734736b0441152b9546b0b80e05ebe3419e10 +size 303 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..449dcde9bb326b920726719e77c65a0b894a35bb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b4666691b40279cb0095bfc459762410b5d4ac0c332404eab5dbd50297976e3 +size 131091 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5b0063d03d25bb3117fb740e64279914a686ad27 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62e9e0b770c1e13217df9cc1a40bebdf8f982ab717a44f6940d34ee8db55d366 +size 2066 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..b79125e1f6bf402e2e580a2e11026cace9285b49 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea6ddfeb16e05e5a4644f0989932a8fbd7503acce76c0ad84791ee609b6065af +size 302 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..abafa1c4e99616c9ce77c208c28776de39d49ae6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54a47a4fdf645ecca96425d639c18f6286e73623dd03de6e2b30edbabebe5a9b +size 131091 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a647bbf7e57f596beaff349cd8c304b1f8d062a3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:877d08f40700631515da4e5f4236e1dca5cd101ea9cafa73776cf194c945ae5e +size 2934 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..440185a6f0620997c5d37e6463c260ce55f0a8e3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43afa2d641f3e262816f32aaea3e7d5c4a5aa00d41e07884d892473a6c64c634 +size 303 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f8c46f32fc8ad0e87bae2d1f7da68b4f6269ddae --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:371665cd89dc3379fba19e7908f191312aec6587cabe164bd2fb415b4b220fc3 +size 131091 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..88c7f5394c80ea38c242d42c6a274daee762212e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fc3566bcd03af6e36fb3dc4f3160e6d4ea0797f0a67284fc5fa270bf3f12751 +size 2066 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..54cca04112be3ec4c67c3fd54ef735757804df6f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:363ca7ed5907829912c15355e2bcd59566e1c0ef7b371641e890db85da549894 +size 302 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0bf9d53679b58e082255376ac09810b4d6e84443 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08e56fc96e05857e0fa9ab92b664f7524ac9a5ef0a528417a6a1e30fc37f32a1 +size 131091 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..89e38046376af9bf9214ed28c2ae8dee8594f5da --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95e652b36f4fb061febab6f44d65d5d3afa5a4472cb4a54002106f3f58b2e6e4 +size 2934 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..57726ef5cd4f5f0268745b5142881ba9bdd18554 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3971e779dfc45e10f04225d76a929411c4d6170c2c582b122b7e422204bf701 +size 234 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..81032a4d92a8a14fed8a0a965c7b32865e233cc3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7940594525e89613e63ea7f31571be2097a065fcc6390a5f743a1e2b638492b0 +size 274 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..12a288cdd9b804f25f12b1b4c30f360976448969 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_averagepool_3d_dilations_small/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46daf7663330427d15b9958b20586116546dadd4739cbba679b215ba6b963c39 +size 49 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8c18ccaed7d3dc10a811c5fe97bef8799c7b2e18 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:703ff5b428e86648c0729222d10373aa699ba55fc01b0ff6af4de581cc501596 +size 201 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..15b11eab7223cfe29b6393639e07214911842280 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4672555872aa97ee1ce75037262093bc57beaf2f28e5b6af3a7228b4ecd914af +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..86afca9241f05bd295f9e7bd4bd08756b562e0f6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bcb9893116c8e68788cd0f09a00ad57b8d0b45d759f00b02122d329fd423a08 +size 51 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..687b129892c6676d595550c89fb132f631375a7a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_with_padding/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcb43a19c6efbfe4f072def03ee08ee47937bc8cd527acc0263aac0944e2112b +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..20ce01273053ecb740b683a54182f30738dd66be --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbd45d82fe2295740b6abf36afc4c191d6b92ccde7a2e6dbd1019905527dfa60 +size 204 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..15b11eab7223cfe29b6393639e07214911842280 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4672555872aa97ee1ce75037262093bc57beaf2f28e5b6af3a7228b4ecd914af +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..86afca9241f05bd295f9e7bd4bd08756b562e0f6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bcb9893116c8e68788cd0f09a00ad57b8d0b45d759f00b02122d329fd423a08 +size 51 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6da55a586cf7be1a7a3148ad3af78d6e730073eb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_conv_without_padding/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b8bfa3adad3f8ddd02a7901554aba873abe730b167b45054a5765f947b6801a +size 51 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..3d2625e3d4ee00950237730ce51b8c220a9fe3ef --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ecfb66d932aec9c723809cb9a4d44ec46311a4803979c858d0bcaf878501ceb +size 308 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..df01b0904cb2d02fe9aed94837436e14597b55e7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7418da057ccc7c5228bc5e395454d229ad52cefc620d4d9c9eb033337b4e63b3 +size 51 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..d66d0d6bcf45111fee68ebaaa3175b3d3a7d1f6c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0a8bfbb79fff49cde76b5550a53283820d563b68fe81252a3f620a17fe7dc43 +size 31 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..8472b6e91be16bd97e0800881bac9045e2317765 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9bc65a69f0e93ce299445de7e370a04d2e6b636161f25bc953072486a578956 +size 546 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f7b776f71aba2c2dfa9ed1a117dcca3403b7a7c9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_with_padding/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22bbf5afe91a4889a7c2d219931c95931b04b793ecaad863ccac6a6ab8a6c139 +size 92 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..775311fa7227b87ddfd5536d7775ab3d6f0fb170 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb6c2918b5ccf3b188183b0a81fda288a805f04d85477ec1040d58b053240b7c +size 323 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..df01b0904cb2d02fe9aed94837436e14597b55e7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7418da057ccc7c5228bc5e395454d229ad52cefc620d4d9c9eb033337b4e63b3 +size 51 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..d66d0d6bcf45111fee68ebaaa3175b3d3a7d1f6c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0a8bfbb79fff49cde76b5550a53283820d563b68fe81252a3f620a17fe7dc43 +size 31 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..3dc89f7d86077d34b33d641fe74d5568ca18fbcf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fa8fd1cf0355e45a67eafe32c3d8bf310a1bce49869e1c2f3ae9779161c8d82 +size 165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5934f697101d808de7b5b9a71e7ecafae89f1cb4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_basic_deform_conv_without_padding/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5e249cdad13f832fbf9011a671495df13fe0f0ed951017b0c0a21e78857a33a +size 47 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..03127096c01884b64698cec50786d998a2c3a617 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:357a376dd7a29287830989a43b8e909556386c118465f2ac165444ecfc3946d2 +size 250 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..99e2b144d24c2b761cd2e82531b87d8fb08c012c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2e11ebbb9f03995247c1b80e5bc3e78f058d31ec84b77b8e2835718cc866e44 +size 496 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..71d8979b7f6303fad39b48f74e99d513220c4fc4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3811afbc35b6bd7958ae3c00ee6c0ed763acf834978aba9d6368c3d0ecba0a96 +size 21 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..37e4340f3fcbb4c8028985badeaef900927c4ca6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df0135bb529c4d8a6831317ec01934d370ec5e751cf681064c5c9c214e230973 +size 24 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_3.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_3.pb new file mode 100644 index 0000000000000000000000000000000000000000..3b47eb281243ec24605c4c71354d7a63f67f9f92 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_3.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fcfcbfbe5e064e75b0c65121c858636a6132d3bf5c85ab6a07e3cedf008d2d9 +size 24 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_4.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_4.pb new file mode 100644 index 0000000000000000000000000000000000000000..20ea60dc51d6218fe4222f9c33010d679d73a2b0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/input_4.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e2e82749a2d3e3fb7faff241ea93e6169cd2c5440e56dc681e6ebd651bf4222 +size 23 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..01054c15240cfdd3ca99cfef996266d63cb3659c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/node/test_batchnorm_epsilon/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fa5c52085723496dc42de4c8f648a1f28fcfef8e40128e1e4a225cd5c056e2e +size 496 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..6c61d4e18d650284790f59b3278c1c5c1fff9ab3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f260150e14bcab6f7cdd40f8d939f652d61c8faa3f18ec77d417faace4279a27 +size 234 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..1c9ec57b55ed1a5c0238628509f037d8f306ea83 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd5d55b7c7b8aedec104cc02593be96787b034deaeaf1ac59c4d3ea1301e6d8a +size 155 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..8340555f931557cfed527afc0e8574b60c06522b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa7f737bddca29e9015075f5cdc3c53d0b338676f4c421944367148b8bfe3c17 +size 82 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d_stride/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d_stride/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..6c61d4e18d650284790f59b3278c1c5c1fff9ab3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d_stride/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f260150e14bcab6f7cdd40f8d939f652d61c8faa3f18ec77d417faace4279a27 +size 234 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d_stride/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d_stride/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3d57bdc0b85c7182b3cb5e71cea44c67865a5ba5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d_stride/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f777e17d09325b1b1511b8b09e6871e51b4ddcdb868b1af21477181ab06a8809 +size 155 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d_stride/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d_stride/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c3d7a11d85b3b8050e64a25684207c9b3d4b50b2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool1d_stride/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57df2244555569b992c5ae9140694e03b0cbb93b027ccc142c1a29a2ae4e8044 +size 82 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0131b9fdb14f82fe01e74e562ccd3299ce553fa6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c1f2c2519762213043bd154a1ac89d8c33ce45c3746153d5b57b9062a630792 +size 180 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..659f151e6775feb1b45e5949f5c1681ab0761346 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cd2dd80b3c4827795d9d629c78e19fdc35cbbe3fed53b4b7b70711c1cef4927 +size 877 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..406eacf7d7a728e35d74f71f434bc6ee7d111019 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:178d1f0e35a575027169b33f5d890bce910c7b3ee32fa3b37899606e8b9746b7 +size 229 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d_stride/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d_stride/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0131b9fdb14f82fe01e74e562ccd3299ce553fa6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d_stride/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c1f2c2519762213043bd154a1ac89d8c33ce45c3746153d5b57b9062a630792 +size 180 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d_stride/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d_stride/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f6343181cdb47a86f73b145cf4eabcaccbec8d16 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d_stride/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e90cf8acc656069f9f20e760a02781c0b059be06c7e323012ccd341c4d5f32d +size 877 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d_stride/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d_stride/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5de694fe0afa13afd6946c1ca8518f2bbb74dc4a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool2d_stride/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63f72c871380f41ff5c5508c6384819f40a6b7f36e15fde02f9b6687ebcdbcf6 +size 229 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..89c1620fc7d69c863096a1c685422e8ef79bedd3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4946b543220c959ec5e1794c3497c0c4b022371fe03d135172b6def6d5212f4 +size 196 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..74f60d066b26a1c4e047c3cf6e788147709b407f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fed8606684b0f4c5f7ca3e8279167141116d6407468cc997444ff19689fc3a4 +size 1551 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..87167a9a9ef33ee0d294fadaaa043279e6d11e5e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15285cebd3b7765076834990f3e8c8da41a93324f259d300f13005a2cacfc90a +size 207 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..dd963a11aeac7e4c281e688a63473ef84ddffcd6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b80276bbf2f4e3e031bfeef5a8f5342aa7cad1d52b0c250ccd3ed119b6771d6 +size 196 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b7e2f9690775bd4d5ca41c903937a3cab72b500d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbf4e5fe5af9ba815faff983395a75d28d51ab0b68ddd517eeecaa6d9a61b656 +size 3015 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..85a266942a0a64f5635dec27876dc24427a78e74 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7422e8702d212f9724b043389744aa6506872334446f2ff520fd056f02d67b1 +size 207 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8778364e2e0a49d48c1c664f61482e58789a7b1a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edbb2cf2c959d75ff13e1e9e90b9db3eac12c7ed96ee7cc195f48dad682e1ffb +size 196 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..788f0fb56c3fc7b7270bd3f0d96fc8628491f302 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8497a5c52894960fe8f122fb1a72926f0ccad714c9985c0e19b627ca650b73ad +size 1551 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5779b3b4f25cecf3c6bca3c8a3128b38d579ac2b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d19ae234298ff644f42db56b47f2620103bd406f554b47ae41a8adaf9aa3d74 +size 207 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm1d_3d_input_eval/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm1d_3d_input_eval/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..206146475382fe455c94e088b82e1174c28378fe --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm1d_3d_input_eval/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29abcf99a4feed8997af5caff39b9e52d7a6bab8deb320799e80f4bf731ced17 +size 378 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm1d_3d_input_eval/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm1d_3d_input_eval/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..09a27e2380aac9572e23ccb1f13e5010b336bafe --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm1d_3d_input_eval/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59499822f69cbd4c4ebfc678e15b79a161cfdd1ab49518d7574b9346b541e0ef +size 251 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm1d_3d_input_eval/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm1d_3d_input_eval/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c0dfc7778c9ecda036fe037d2fff564883bc5962 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm1d_3d_input_eval/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c8577f8ba36b0f7090a5c9387db6b32f5df827c10cba75df2d7a84ff3f420e +size 251 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_eval/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_eval/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..4cb457e59973f98ba171343e66d91e76bbae82cf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_eval/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f187749e55da7ed28169e9638a6098257316f7d314eaba3a0c33243454336752 +size 354 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_eval/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_eval/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7e2f2e5d321f7e6bb544db64248f011ce528ea68 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_eval/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59922c97c81d4d2a64c9b55d336782f940dd9f051c949a6fe828b3d5d25ad95a +size 877 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_eval/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_eval/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..abfe299cadb6f74747e1cf8f13450ef2a70511b0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_eval/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6b326b09b3ce9ef6a01f22c8a4f5ae042196c223f62075c79b3bb987e7bd4a2 +size 877 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_momentum_eval/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_momentum_eval/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..808fdea75200f3ddf2ef216b409af0ccbeb194db --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_momentum_eval/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4e27a43a45f8dacc42910c71b7b5f925de23e07feed7cc387535d5bacf5cc80 +size 354 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_momentum_eval/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_momentum_eval/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..33adb55c6c3991ba05ff4d1f0d7b43ffc012a0cc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_momentum_eval/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e5fdbbb9c90eec03a85e5c11f4002d7273038a2b3ef1f32c839d7a862ac34a3 +size 877 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_momentum_eval/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_momentum_eval/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..cb6f9fa6852429f4b792db55d2a9122ce1895603 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm2d_momentum_eval/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d144d278eca9e0080cd414aa7d7f05d73b78031c60e5e5a9c69ad04656febf44 +size 877 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_eval/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_eval/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..27a42d1801c78ed30b9d75b575b3b4a3dfc803cb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_eval/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1008501548ffcbc33f180ffc3742183df6caad9c9f943c4485561c9f9bf67cd +size 362 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_eval/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_eval/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d5c1eac0547d755199a12890299a97bec476bfcb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_eval/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5670e94b4b6d716ef4ad5cae55cae40eaa13e39581c95725e5eec6fa0e13b0a +size 1551 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_eval/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_eval/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f7f833868b174902d5b0ad5766eb93e77f047073 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_eval/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40bbd328716a26c20868a37224fc62dd0079feb42d43b549fdcbd0d6c74f38aa +size 1551 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_momentum_eval/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_momentum_eval/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..abe1da5bc520686d00863e9029e24f298e46192f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_momentum_eval/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca30f9f606c114c8eece702964fb093b10e8c8a93571ef267a84f8cb568fba7e +size 362 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_momentum_eval/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_momentum_eval/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..81027da6c646bc270321c362a2c40b14be2c9117 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_momentum_eval/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:288c9a6c44c98f63e44b40a4ef2e483c08445eb3d0dd02ec425b2dc19bc20a76 +size 1551 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_momentum_eval/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_momentum_eval/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..620ba243b05fdf7078d557d11be051955d7c0b55 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_BatchNorm3d_momentum_eval/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ade124ac423eb248e713f6387c4e7cf04c689c9b29e055969fa899d6434abee +size 1551 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConstantPad2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConstantPad2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..21350a15222a21617e37356d6ef98b3fdf569d71 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConstantPad2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d12b040bf6740eb10b4151eb6971ba07344f7eb76b23bf1e0fdde7778d205d73 +size 177 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConstantPad2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConstantPad2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..890da5390fecb1d0559c1efda1e2b4cc9b1defd3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConstantPad2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:428394333ec2226efd562efe16a16b347fc5a66da9c238894c9e4a1c6b01faec +size 397 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConstantPad2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConstantPad2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ec178b68ea6a3ae569861d7941859562bb2ae5e3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConstantPad2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72b1e95e8e05fd33cf927883b95629805dd37ee6d52d915e4842afaa14e4be9a +size 1861 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ca18a4d28d4b2719dabe502d8559d5107f20e374 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6784029f6f72d5d8c9dc9667b858f838882c02aa92663e3b8c9f85a50a9cf10e +size 525 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..da528f7cae1f056d172c76abbc0cd93b6def9c97 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:335a771df5f7b7208dc4c3945f2b5dc7972ae00505af7f0c19df02e62e8f4c71 +size 331 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..50a337900304ca56585d2279842817205589709e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c6f15ce3daa142241c12d73939925d7a9ad0d95ecdc68e44ea5fb9181c836a3 +size 331 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_dilated/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_dilated/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..cf1eb0da8bfb26abed2458c694c3e48b679600bf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_dilated/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:096963d90634e312f4577f517ff43f132cbdd245545cb2ba956202229113d8a1 +size 525 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_dilated/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_dilated/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f2d96ec31ba44d07c7847a06caa37dea1410598a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_dilated/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc4d996072ea3c469507b1d143936f2655f00b696e131c69fddd56d976148712 +size 331 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_dilated/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_dilated/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..562c23a5dfbe38ecb2d18017b05fbb7f4ec69e18 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_dilated/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46ceb2b4c11099b5047ce2c35fbe98010ea10d2f89c54dea478bff962b20c9de +size 251 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_groups/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_groups/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1127c73fa84c66b7cf0c8de1beca2605b5cc9284 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_groups/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:442f258806824a6df94ccc4146736b24a20c6a6f364245b52647343bfa918b9c +size 433 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_groups/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_groups/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c4f52c605019ec7fbc63a27dc85290f16ae853b0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_groups/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b65c22420c44ac106a9c649881a41b6efa344f57387b2b587d3de72302d5cad +size 203 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_groups/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_groups/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ec0318e4d72d079c119e965dd534aed95f3dacce --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_groups/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e8d308d9ab17ca6c50ac662ae7d4834ab574d848dd3aebd567287c95fbbc2fc +size 203 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..edb7c82174c729d4c7b5f7bb8e25279b27d6feb6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c04576b8b1a0d55ffea26134abfcddd50161a3021eb96a55309898b6edd3818 +size 525 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..eefc9ffd762a57fe82cecc90358f332042575e04 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc2b37f24957817bcc538c5a408036e2b8748ebae583267e2649e50faba4ff4c +size 331 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..92082c77f2013b57d744364d8aad3312cd6e71b1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e65ee1ba0ecea1aea5ff5a58efe04f24040f7b4d5811a9e5dfb50a11a95cb6f5 +size 411 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1size1/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1size1/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0db293637047d1032003e44b7a78f4ba7f639921 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1size1/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44f2689b09450bed99316db0c25ea8e987d802dba9ad8b9582f9cd13e34cb26a +size 473 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1size1/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1size1/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ccc68470e033faf2d79ce3e039f4e82aaee560b6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1size1/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:055f4bf74f8c8f0fe1285268d4a0ec7244ecf4a35f0c46183733ad1031006c46 +size 26 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1size1/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1size1/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..30cb9478a4ddda8d089d960fd63886e282939066 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad1size1/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42b84f52453522269a406f5ca3a69e07b3cf9fa23cce0aa3446e03cadd71734c +size 26 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..a57220edbbe79fecfe37a7816951fa2265e7a53c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e81b5f92462708e1ff39d75006bd9f8ba35b6f08e8f585823caa271710815bb0 +size 685 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a88ce3dfbbf2047fef2819f97f16784bde8ad4b6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b02153a44147d942cc958b858e926c5043a8ebb6a71b542f479eeeb76686e54 +size 331 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4db1a456872fa23215e550c6f2e432e13392b06b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fc2370839a5bd4562879eb3e02047be8fe1c2afc53f802d4da6f89d889e3f86 +size 411 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2size1/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2size1/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..11dbf841114f55944fcb9b8fbb38a571b3bb85c4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2size1/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65645f48212b3a34cfd6f1d96109421ccbf7a680aa1cd757948650d908556a46 +size 601 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2size1/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2size1/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7df21a5ff73c17751a71f7664f2e9a559eb2e4ba --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2size1/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b92fdfbfff19258ca9a1ee5df8404b38b85a3d0e004c4fca3c73884371aa7ada +size 26 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2size1/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2size1/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..939d4d2b8bfac72d72d49811cd84ad0f71d21d08 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_pad2size1/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0af238013056ee244f3bb9259d6bb98c2830e3141984d15b6cd7bc76f60dc37 +size 26 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_stride/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_stride/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..76b8e58da4d1c0ea3793425a7d09263e5698df29 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_stride/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6d8e22558acd9e17f71883bc09b90cb4d6449d6772d98eea082e5da149d9d1e +size 525 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_stride/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_stride/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f963d176847f4b0c07003be12f4abdd88506c850 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_stride/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f421cb144abf8a4fd5e4b6aa4fb99ccfa48d78738886e22ebff8efc9335246b9 +size 331 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_stride/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_stride/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..69b707a88eac6ae6b84b2ffb98e94580dadd9c37 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv1d_stride/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d29361ed8d237ae03f4460b5deb6828a9675aa2a99bbe633edc2f58c084e9448 +size 171 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..b7126ab14f7a09fcb1debad56ab5a4aa1cea5772 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb8df62b22401aa644e46e13b55b7ac5f3c3814e002ff939a4bbe112720fc066 +size 593 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d67a0a2316c2f6ad5b2215ee81a60ee0502fbc51 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4e066b8dac7d27dadf0d18760045c8db6ad2488da2b9b4d437af9449f6d7795 +size 853 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5d3ae14acba23c7395142c60bf936636ac0f954c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26593b3a6d313f14df4c3dc44649b5540a34c7f9563f5334d6fb4343e30e1870 +size 653 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..a1d438e564271a5d9318119e98e405685f86eb3a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89f453d1aac82099bdeb72eef7656f499190a12d8e427421df91b00d9545fc15 +size 449 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d12d879486e0b1f9e3cd9437564812d414b6205f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a083c77fd517fd532dfeb36baf8cbe5b06c27f73dad6b8bd773633c8fb430809 +size 1165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4dbdd5ab694bdd605ed72690a402e5c9022ffad8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd13e6427aa4a23e1a55cf5b40884789a4938204c4deddc0b5411f2f50e175d3 +size 525 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_padded/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_padded/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..d96078087bcf015503fa06e1ba662fe88ff756ff --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_padded/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e91a334e84ec5e532732b3c8a4323d3ae6294cef7fcb5b6c906f3da1f59aa84 +size 449 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_padded/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_padded/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6bfa3f6fb1c9ad08f00fbd380b9a4666995f3f26 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_padded/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f118af2993a3d8f5a3fffe7b9856ecf6b9c65d1cb2752f6e31708aeff0f5fa1b +size 1165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_padded/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_padded/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a9b54e444dabcf5070de83fc0aedc75d596a834e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_padded/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c73bbba981d2a7548cffec8d1a37ea07d69ac56c40e858a1e1514b83e6e9cd0 +size 1165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_strided/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_strided/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..966168acdb3835b0fb91ac7fc1f08e4cf222d0d4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_strided/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4164370dfc5438879f86bb22a7b2b162a8596e0c1855cf1cbb663418018bfd6 +size 449 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_strided/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_strided/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..858083efde005594562359ba6dfec9a8ef1886a1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_strided/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf8c52c69446d56c0225d787d5dbd447707ef29ce1b4283b22d003413c24a0db +size 1165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_strided/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_strided/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6e4e119b6507302e4f59196a0e381ad582969fa5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_strided/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84a30a3f316a3332505bf519ad6114e2eb63c87ccf6af494ae333f40b71c081f +size 141 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_with_multiplier/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_with_multiplier/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..cde815b92a11f9c867054209e558d57276196b6d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_with_multiplier/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a74ce65ac65a324b29d2a52fe5d4ec35ae1bb7e2cb3c85a469439e077f10fd81 +size 609 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_with_multiplier/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_with_multiplier/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d70e50ce2e12ccafdb644e28215833e9368314c2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_with_multiplier/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e103ad686b0bd49ced278ea064dc53a0d2adfa1cef0bf6fe81abdb53e253aef7 +size 1165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_with_multiplier/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_with_multiplier/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7cfee0cbbbef70900c408031a4cbe99aaa3c0dab --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_depthwise_with_multiplier/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77f1a0f0fc22f9410897264f208675688a4516fd70862e947e397afa3eb8cb1c +size 1037 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_dilated/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_dilated/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..bb7a8f8b430890de42efeff7a5e80132ec9b4734 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_dilated/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3384e8834947a897ab76ef63702b8a3630ae115396d21f44ab8bb7e93ee18f04 +size 513 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_dilated/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_dilated/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0d24712220fd5444fbda473de7161ffdcd0a228c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_dilated/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47a18a7b21299a3077fab44727b9a78c6ad345f2821497e0b8b8c1127662187f +size 1549 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_dilated/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_dilated/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..cc600bd6fb7652dcc5cf29b8b0744f1111a34d8e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_dilated/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c34da36bd4318d8183c3ad3a69693bfe84abe65502a12b66cad6655667b57afc +size 157 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..16ed9d1b02514da006e059fa3c261936ed28b973 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba1fa246bcd26c5eabf7a26ae0d4c69ca57bdba1c10e7bfee69863a778a21092 +size 601 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ee226f55c028d687bc7b40baa3e2c2d0b3c32e24 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:013cc774e28717b58b8c45922664ae5bfeafda7132848da2c51076afc95b7efb +size 973 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d87d9b8423232cc82638a45e5846be8709cabd2c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c4092923a2c39422200270d42b6e667dc12b9ad74c355e2bee6d327d1312f91 +size 781 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups_thnn/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups_thnn/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..b1c754ee80b0c0cff778a38f1a1ac75a58671d0e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups_thnn/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e31fc9569a5c9b5c41d84b3464a752bb9e084f75f551f37aa45d8b4ddb115590 +size 601 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups_thnn/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups_thnn/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..950d1a241a1efef5e622f038ab235bd7b569e354 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups_thnn/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b6ad13ef4861ac3f32a9515a45070d9e4756dfb1d32ee2fa3aea6c9a7006f43 +size 973 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups_thnn/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups_thnn/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..cd98a781b7faf0d6b68757537b32680ec5103b01 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_groups_thnn/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7d1a23c2ff27ae1ea300e25f9f5873b736b7e9b6a9f587aca2c87e558728f8d +size 781 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_no_bias/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_no_bias/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..aa021fc0a496bbc0c931ab1fa79f012d545838e3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_no_bias/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66c2ee5a70dae7f8ba9acd68e7c44e7fa35e05ff36247abc27283a64ad233714 +size 546 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_no_bias/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_no_bias/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3c5f771274f244008f1a3f624d7ebbcf2901f4a1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_no_bias/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e408d720feb472f84076f2a552fd4c7f476767b60e1128b21a1607e00df3ccc7 +size 733 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_no_bias/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_no_bias/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d850e81209bbbc5f5421d94f6e8ac6bfbccca690 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_no_bias/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c7f991938036d018e9d27dc36afa8ad5e92d82f31d75c733c35549275022ef8 +size 525 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_padding/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_padding/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..dd4239fa090ed66fa808fccd7a9f048ade8b988b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_padding/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed1ddb4594fbaf1242ea597fa5aa47f4bab10bac4b3172df8e331b392caef0d5 +size 737 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_padding/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_padding/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a602ab7de57f370188dad4663add07ef4b65744c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_padding/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b2f97b0f64f9072a269f649f58ee247bce3eec5552f78b84402236ff1cdd34a +size 877 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_padding/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_padding/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..635c6a05db14dbe83d93f9bc89184e865c82a53c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_padding/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61907fedef6ea80f04475e6ad507a71dfac8517b767d09cf80e4db2d7f0217e5 +size 301 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_strided/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_strided/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..c0bacff326bfd5b8796c7cb1aee64a73b79bb5b7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_strided/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a067ef80bf9ad828224c1841869ecfa96ce56f1f9b6ac884b1c7a050a9978e2 +size 737 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_strided/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_strided/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..972e1e9cd4d474072a33795a2fad6b3f91bc88a2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_strided/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c895e7ec7d22fd3d56c69f8299491e989da8c9ef5bddf1d5b5348deb957450d +size 877 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_strided/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_strided/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..df0f4d80a6e2d485255b679cd5b5cb40c5914b74 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv2d_strided/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fce395275b74ee506c0f73a6c8c8e10850ddf6241e330d7d8255440b894e21a5 +size 141 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..01f4ef26ec61759f2854d98b03d60bc421880a4f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7a2643307ad773209a837bd20de6bb03e82d9f08d65459ab78abb90e5bdbd80 +size 1481 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f7a78f0642914732645c9cde51468935799d462e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e087f4b71a027f4d92c726fe4cab154a69dfc8f782c89525d2f94372385bff76 +size 1455 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ce761baf740391242dc26d6e3813ed20f7bca52f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bba44bc357e6527b24b6ad4f7a61a14afca2ca18f32dba514b409c7b1113703 +size 271 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..fbeb60b38543b2f14d1cb4e9746f1ea5bad982b3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:809a11e78f1a126417ee4130f837a02f5f25e961bd3d98911b89b20066492cb0 +size 713 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b9de1156666e1ac5180f295bc2bbf690b70d957e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2053ec05ecba39974770157fc581757607aa4ee8f064f02e90f15b98977ec9a +size 3015 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..04b7970deb14245f22e10659fbefe1737e9996d0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6555e6ecf6ab107be30f415170bc83c34b024ad8f0360f4bcf27c1d9814b6992 +size 879 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated_strided/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated_strided/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..63877f57c886504536b340f7f116aeddf3f9501b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated_strided/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:305efe8553722b1a5239808f3ad32a8f83cf2ee999bc4c8d2e928a747d1567c5 +size 713 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated_strided/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated_strided/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bf646caae345b0a32987efe2047a31bfc5b2d408 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated_strided/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12416934f234b95f8f12d66aaf99c9e032f1864f782a2a7c28da731c42f452ae +size 3015 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated_strided/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated_strided/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e706b351621fb0a2b7c363d4195ae7e11baa4061 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_dilated_strided/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:528c5c02a23d1c73844452a46167f6ec6f41dbcf8c15a90102d200a18850881f +size 271 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_groups/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_groups/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1b26be46e416edae998f9582b521050c21d0967f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_groups/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfd202874c3886a639b27313c83f768131e15b583b550e96567610a0b49ad36a +size 1633 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_groups/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_groups/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6486373447a5cb4f1b7019d39f65c8f550b764a9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_groups/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33f3d14bdb3aa33e44e51e211a34891bb3ad25d7c62d6196e39167fd4656618f +size 2575 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_groups/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_groups/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..edd584c97ea0ee62d01abb7ed2c4fe55148e9678 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_groups/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e93da49609ce30ff9c41138bb25c1d984711966bf8639b14dd382ae4de46fe1 +size 591 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_no_bias/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_no_bias/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..abcea49d7ef49d4e6371098359162e838ba19a55 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_no_bias/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33e21992067ff81af56288b042fbadd941455dfb54145e54baa70d9e2aa7fd39 +size 1434 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_no_bias/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_no_bias/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0a25a10681603c014111edaac5d27966754dd566 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_no_bias/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcc1d2a91d6b17f5d46e513fe076e322735f10ce35ee29de8b4e6ecb570f21e0 +size 1455 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_no_bias/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_no_bias/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4e1c467c9daf996034dd74c440e1d9472f2236a7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_no_bias/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10b9d466ee93b9bdcdb7e49ba809ee67f6c5d7aeeda56aaac43f13136aec6b79 +size 271 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..220ba83b98c37d8241bbc295583ea12180ab2ed5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1196f59fa6d825c88795c64f033574f168bbab51dcc6f2570b2e5a3ce7f29d0e +size 713 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2009353dce4b3f2a9a835089d63afdf837d5d3d1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d29385285f62a97069578354a79e7588d6250a8783e43d7c5d18b971e9f5d07 +size 3015 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0a3da6fee730efe984d70d2688468b147cd28dfd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:401a6bb16f11ce656b0282d5334550f43d4338166a87d62696ea0db830253ca5 +size 271 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride_padding/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride_padding/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..4045279ae635e47a5e74872dc2e8e648d3b582f2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride_padding/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c33d6d095c325e667858235d912affc8c0876b4b5537187afcb98118fc8be2f +size 713 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride_padding/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride_padding/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..db58b80fbc5c262243af030abe37486c43ad2511 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride_padding/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4acc121a8c18908fdc7cfb24d60f61115b2eadeacdfc9da014927f68f8862b08 +size 3015 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride_padding/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride_padding/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b7fa44164cacc95a1220e71b2299f139f1de338a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Conv3d_stride_padding/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e68b433ba94ebbbd9ca0e7485d44ee3a414df92d3a764a6ca4c849ebde32b56 +size 879 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ada985df8e0540806bb484446da9fc74aed90652 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba26b02adad99f5aa585a2958df0fdecc783e70a563f8f8181fb205aa00702a1 +size 772 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2c0bebf450ec8ea29bfda77fdf969ae3e148d182 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d38576c232a6479be9297b240e0c97fbd10f6ec3da87f8faa673e0767e66e70c +size 517 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b3dd5629ab98d77fce642e9b040b3877713ea366 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dacc56b95f1562eb365f9d52856de44465d707f62049a3f8a456cf915c56beda +size 3853 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d_no_bias/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d_no_bias/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..94811f4d66105f36a2c90d0e79992e3d309d0997 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d_no_bias/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e29cc361f4ea0ce6e934a2d00e73683b849fe150b300b9219e3ea0fca31b6574 +size 725 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d_no_bias/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d_no_bias/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..24738049c379d46304d052aa5989878927703bd5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d_no_bias/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:703ada80743b852fb1b4dd5f308196c8c5c9d8d7ab6709ff2520fc08d6521802 +size 517 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d_no_bias/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d_no_bias/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b0ef72510778abad814385cc99e21848f70d82f1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ConvTranspose2d_no_bias/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:200d0bdabdb5e4eefac4cefad56fe29d6241215d9d2b84af29bea0ba3efa92dd +size 3853 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ELU/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ELU/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..190b08ccd45e9a51c6f4226ea380230adb678f6c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ELU/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d927976b1d0c2339216bc1dadfd71cd9b40b5cc4cb214c008289ab1a1f716da2 +size 120 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ELU/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ELU/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4e7568bcd30b05c9f3d790a166f91dcda59687e7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ELU/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96ebd365539a8102806c18c1f7e1fb4c195810523652abb7153411313c9b9a99 +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ELU/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ELU/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d5bf261381f11b2a2f1ef2d69f81eabbc920901e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ELU/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de9dd7efe99168b2fdb2cb1cb1dafd987718dfe1e7f9bd1b6c027ae2a2a15b2a +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9715208d1501bb8fa00c32620a023f2c9e72c7c9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff4a3e2cffc38cfc1b056d03c5aa79069ae3ee37651e35f851f6e62a7e1d67d4 +size 188 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..86b10f76760aee04a3f4a85e8515e22dbf26fe1d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff4adcebe31cb6232f5b2f0ce8d49656c9b86d5c4b0032c58ce86c4eff3120cc +size 40 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..763f31de2406a7e1060e0beee2a371675f637c41 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f583e01e392d647095d12fbd8ff141d9b2e685b765220a35175283432a76512 +size 58 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding_sparse/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding_sparse/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..09cb81a3dee9b33b47744b47743adcfe470f2294 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding_sparse/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:179bd4f6f4d062aaee42795d277f1048c30b3ae2d02440df9b0cb65e50cede16 +size 188 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding_sparse/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding_sparse/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..86b10f76760aee04a3f4a85e8515e22dbf26fe1d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding_sparse/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff4adcebe31cb6232f5b2f0ce8d49656c9b86d5c4b0032c58ce86c4eff3120cc +size 40 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding_sparse/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding_sparse/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..67b867488cdac777f595008525596e564b7b56c6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Embedding_sparse/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a685f01e2bcac20824b531da7b7e953394261bc76bc975f648eb4d01552cb2e3 +size 58 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..470a07af9c98c241e3854afad97df29da260562d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d09a105a897b5ebbde7a0473fda31780c22a8c941a83c125fab44b2b0476aef +size 156 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ea9cc928359ebf76cb581823feb6dbdbe9fc5d38 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b7c06408c186f018dd09a2943e49808caa7224050ec46eac04e4e411501279d +size 128 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5c7dceaa1666c38b6f3b866204ca043cacc8a2b2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:285cdbdf161e15e03dc4798a94d848e805ed7d80b3f7d38df035731b5a228661 +size 68 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU_dim/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU_dim/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1c6484f396095c593a249c2a465d8e1711f8378a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU_dim/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ebcb7132d4e866c31d0ebb82659dc0e86cd6c9552a24909d82c86e6a8fa0ec2 +size 155 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU_dim/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU_dim/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..207936e80833e9771e64c312a04306869767feab --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU_dim/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa06819703dc94eac1a11a4359a4502a8e3b310cd4ad7707128268f68a7dbe73 +size 851 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU_dim/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU_dim/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c83b67fb5178cb3cab803106bec4e153ade8ebae --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_GLU_dim/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1041a6ce9d670bdd5038ca4edc15cfbac48ac18481a5aebae0b48e90ecc2247f +size 431 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..040602a984e4d887c34803d8d7a9c50fab634eef --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:890a6d8a3ea0779d838a02bfc15664d4f3594a4805c20ccec74c3a89e3cb79e1 +size 126 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..584bf629b24a6c00d9300d91bf7a140abde9044b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48aef0448f20fa8d66bd23630d2f6eb57b441bc01359733676bd5518e5b707b1 +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c589832d33ed84dade8f7e4134ca1f04d536bd2e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abed185b01eda5dcfff4359a9fd21ba24855d69f6d8bbdcc3289e4149a49ef5d +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU_with_negval/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU_with_negval/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ae0dc5cbac8515f62bac38827552ba8b3f80109b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU_with_negval/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34ce09526f79bd9bdd333013d85e43dbfab55f27e6b5c0aca1c445e887290be3 +size 126 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU_with_negval/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU_with_negval/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..adf526be2e89c58397ca002ceb1bbcf27fdabaab --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU_with_negval/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:763617c7449e8d37bd8b9c0715c284a3669bb38cdfa4c6dc44bb54c629024891 +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU_with_negval/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU_with_negval/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..12f2efe6266439600421fd1c63b800708a55e958 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LeakyReLU_with_negval/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:042bc3bf628343a8fe2ed916cf965c4acffa66037255806c4c4a58e6cfd2c584 +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..cc341c7efbdf93746b7dc935b97aa4c7587bea26 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60a3546925d832686c788d3ea22732feaef5bc527e04691c32ec9f677c81f612 +size 585 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..fc060dc83ff19ad2bf1c52f88055d48a231f9d40 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11504362c0fa099cec3e80034c27aa1c99966ab493be7a92524b9c06a37c61e6 +size 169 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f5d536a7e957140caabf6d5272349f4f88378fa0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63631630f831239f084c258dbfd475a647d1f88e352402aa220773c6f4e3a314 +size 137 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear_no_bias/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear_no_bias/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e485330dc41211a1b74ec9f8566a08cceb6dfb75 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear_no_bias/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:071ce49b698635b090487697ece6d1822cd025d0902f4a39f9711bb9bd433e8e +size 492 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear_no_bias/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear_no_bias/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4b1be3c4c09968451464bd87109fa31fc1596786 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear_no_bias/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5d4d4fd48dc94b5da890bfcd5f72dda17f46b3322c3e90452bbd1e18ede7806 +size 169 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear_no_bias/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear_no_bias/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..eb63f24dfdad5e47125c5680eca678befdf76763 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Linear_no_bias/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18ba13745d2637359625293b121d5d5037748c9b4661166ff75e45bbee30ad8b +size 137 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LogSoftmax/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LogSoftmax/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1d3bb1c738a2d8bccecd50ce724fddc01b96fc83 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LogSoftmax/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ece1bf87cacddd80dff54ddf083671484d6c9dbc2b919138a83cad94e9cb9b47 +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LogSoftmax/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LogSoftmax/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..db0c65b2872e2779770d8dc407cd733f4ce5ff5f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LogSoftmax/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:688bc46f385aeb3556a0e3320d09345f4f85ef271598e4d040f0cfcc22bf7dd3 +size 809 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LogSoftmax/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LogSoftmax/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f319d1dd5ab58b06ea34d090d3ee857a9dedad4b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_LogSoftmax/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91c2df10525d3a4e3a627247d8029f0fd6bd51ea0cdf94d928e2d9dc7d564dd5 +size 809 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1ee22256c16bffab6ce70d4eb25a330a88dccb0c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:204b5c894d67495d151596eb21fe7927a95fa59ebfdd55cebe0c8f6dd4ad9ebe +size 160 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0aa585241ddde87572a360963ffdab52701d8578 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:346b8f9cab3a8087256e4b782112f5cfb5af6cf9bc0341aed9f5fe49a35578f1 +size 331 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f538042c19e08377e62544aa97ef731b224a84ab --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efdb220cd29ef3ae83d1f0a1f715a009ab7f225b71be6ee7f82fbc87a1cadaa6 +size 90 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1ee22256c16bffab6ce70d4eb25a330a88dccb0c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:204b5c894d67495d151596eb21fe7927a95fa59ebfdd55cebe0c8f6dd4ad9ebe +size 160 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6b3979c7297edbf27f05267c9b01ee401dbe759d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53a7e3d0a090e2c820183814b6ab0fafcd5bded86ec520df21802e20adead1a7 +size 331 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..636b0b893d999f588775e85adaacdf8a7cf4ea08 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f65eaff0f88c328d9422c03d998a1972e4bb7b0c5db2ba9f6103f0a775c3900a +size 90 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride_padding_dilation/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride_padding_dilation/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..528a2d3d8b23a27a82963f19ccc96266658dfb93 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride_padding_dilation/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e55328f92cc0ad554cc9bda789196d71901e7bee980457860871ccdd18f3556 +size 165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride_padding_dilation/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride_padding_dilation/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..590979a501f7943c3d1cc63e7ff6191caa65e3c3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride_padding_dilation/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2a26f50e594a377fdd68f0975d39311133a3137288662ab212c849de7c87a23 +size 880014 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride_padding_dilation/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride_padding_dilation/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0a76007056d497edeaffd7cc34cf6bc80fb0d330 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool1d_stride_padding_dilation/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4e831f4939253f7beda74b90d9a5a6ae245f4da5405c0e337d3936325658002 +size 87298 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..92086ee58d142b17e6115d5d423a0b64ccc572fe --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eaac34265d1d2597205177609276ec2973c4b5bc483c1c414a6b9653b4d60660 +size 176 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..8777cab21460764145515981e46d147677611010 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3e2276af48d653ea007a533aeb0843f85f4de66f704c34a57c1d9c589bba68 +size 601 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..09c1d300854363e22982b0245ad69a60c6b525e3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e4aeab61341803bd821a566fdbbfa836768f2fbecfb400c6ec2822f17fb0c98 +size 205 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d_stride_padding_dilation/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d_stride_padding_dilation/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..79aec42e088a6c1a4adca001b40726856ada5ef8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d_stride_padding_dilation/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07d74ed26c95a5990f39335113f29258d199d1adb300ba8fa112292048c15a0f +size 180 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d_stride_padding_dilation/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d_stride_padding_dilation/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a9e215b4219a1b0ca108a9d9aebe56f1a948a7fe --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d_stride_padding_dilation/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28444dcd7d1d3a1d9d6564dfd1d561ddfb6e6abd0dd5a4d81fd917252b888df5 +size 4000017 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d_stride_padding_dilation/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d_stride_padding_dilation/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ca8ea6f4ecce464ae14fe84cd41e1396f929b145 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool2d_stride_padding_dilation/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa681dc60c527639be817a75ebf586fdb1909fa536f5745998bccd37cb258e2b +size 4313 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..95175072891413c0c9d7d811089d3102d17a07ef --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:124548c54d86bddfb4dce2ef87343074ff60338f0895ad78a4d9acc921e54c9f +size 192 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a61138a3193311c4594f6f45f2720e3306446322 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:749e4d5145052c80652a003c088c6cd94d427537a515033a19a39572375f5c7f +size 3015 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3b3c6165a8812a7be55b6cf8cb3381292a3bf463 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3314986cae66c39f04c2e733ea89a691549cc4b05b6921f18672286986c8a2f1 +size 207 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..95175072891413c0c9d7d811089d3102d17a07ef --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:124548c54d86bddfb4dce2ef87343074ff60338f0895ad78a4d9acc921e54c9f +size 192 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2fbe7614fecd78d93369305ec8cde8cfc51e04ba --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3167f333970b64ada34b7c10cc0e394d1d1f3b7117822b868dea32824af7e27 +size 3015 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a613046a791b4727539b0e14ad2aa3e02ed94ea1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e47af8693e16b69e55ce86963353a6cf86288d862c566c3749cf9621e964f8e +size 207 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride_padding/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride_padding/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9cc48230a8a3759bd9a82f8806ea1adfffc3a8a9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride_padding/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63ed68a3e9628f4ea8a587ca130bbae437bf13fc27d7f335d2cc91c990fe3ab5 +size 192 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride_padding/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride_padding/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7939339253985cd3c55fb9a1ce0b361fbf522905 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride_padding/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc5662f7cf66ba3e37036da9f3ae0dd32d937ae2c88ab1754045a97a8d20d1c4 +size 3015 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride_padding/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride_padding/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..8d91969e52d3b272a846f1e93f07f37d356ea6ca --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_MaxPool3d_stride_padding/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41633821dca76774de04458996b59c9a22ee47317cf86503b088d4f1145a7913 +size 663 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..2f954dca227e754438864cfbfea354abe7b6ebd6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e268ef13688982483bcd14d2d18d0cc268b7adbc43ccf3405c5ea5c36916d010 +size 140 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..33f33ddf95ef9c5fa6d24b47b213bc25511e00ad --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7700a3e5d3a8d176f42d6a0511ad363178cd08129e93531bee9f65a2a9fea816 +size 106 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..282a523078136b47238e164455bd400339583e38 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59555fc3b94de1b50ef7596326e382ac5d1a02c01578eed5c4c58b2423cf7f6e +size 106 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d_multiparam/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d_multiparam/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ad19f8c1b1847e042882c1cd0eb423fd83c2d798 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d_multiparam/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22f63cb6948493484995b46db7927f233ad0b080eb6d8bc67519878ff9008328 +size 148 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d_multiparam/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d_multiparam/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c8e3bd0525cb50ad21a70c48b75a8a08325d755a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d_multiparam/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a94825136b7d4e5186d8f38308daf297de886423094679f6b3690528fb6276e8 +size 106 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d_multiparam/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d_multiparam/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e2065cd67cadb0d66117aa1d7c9f402fc1450c84 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_1d_multiparam/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:913c1d0ee63425ac255c3b50c8a6daf1a95ff3b3cebcb5be1f4b2f722259211b +size 106 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..874a3181a257a09d58ad0c792f12dc2ac8890329 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2df9a380dd056908b0e54e9acbbfee504d7282f71a85a0017dd06ea6fc3d4bb +size 148 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c2188da5d33a18d8f57f5803465ec79d9a7b6f33 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f59244e7311babd88052926097b2138c5c1337d53ae9de072c0671cba31c0f6 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..013c555122d8cef0b94d30de2d2a089ced3658a3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bf0e9e8fba2136db930362524a9da8de89894c0b0593c26ff712f1633b37eda +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d_multiparam/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d_multiparam/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1bd17587ef855109f8436bdb8a37c845cf6aa225 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d_multiparam/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92e920bed8f828c5116321ba6717996fbb8f901972a257cf0920c8d8ef7d264f +size 157 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d_multiparam/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d_multiparam/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c9fae5c4c0ad181ddc071216db87c85986b26a27 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d_multiparam/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3552b9cf7041fbb7cccd2a440157bd289f3fb235bafe8b5e43e945b34ed83260 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d_multiparam/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d_multiparam/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a42f418000e77925370a649a60d1d9e74ce4e68a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_2d_multiparam/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e523e4454fe79323d2a966b01cffece48e6716719771e6bb8baae2841fef5876 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..bcf37ddc75a59636851e2f80bac1204f3184924a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56f66df2d06fae1d6fb9a5d871adc3c5e54e0cec69b33d88692456b4f821fe34 +size 157 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..fc3a2381a0dc0e6fedb894e5cf02f0675f79ce75 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9233487531057dbd7aa0c47fdb58271f4fb1d72b9f436d2bd09270b44b7ee27a +size 2895 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..dd029aba7a3f59f9835a7e08db7d1ab2547e0e1e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93a57c84d6941172a76425132a5c827000db3ddc99c1a6eef7b0f8ab3c94eb71 +size 2895 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d_multiparam/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d_multiparam/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..7a4397ca6310de0b557884151ee886f1361f93c8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d_multiparam/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e88e381630d135d3a09aec1d79ec2dccd6506db4d8b8fb39d02c724b6d510c85 +size 165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d_multiparam/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d_multiparam/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..552cbd9a43aa199afbfbbf5388ba32df52eb6574 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d_multiparam/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec22db847811f6385febe7d2c7da155433e4c81dea549c91a90555b2333c4e70 +size 2895 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d_multiparam/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d_multiparam/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..44737e4623ea335086d708e1a5828d7271b4aa01 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PReLU_3d_multiparam/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6faa503f34b7f5bae1833fc7919b10688535aef4593fe38d825dfa9f6bc688a0 +size 2895 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PixelShuffle/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PixelShuffle/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..54783b740bedebc98bfb0ad916f85c05290f47d4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PixelShuffle/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0912a957ccd84ac2a9e268b08e592152b9e59fb8e9fb55b6852abe362881ca0 +size 331 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PixelShuffle/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PixelShuffle/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d49e23035d390c2623098c8448cf4dbbd3f423e7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PixelShuffle/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:039efa92fb46bb159c808f3bd96e7e73cf463015758904edafa673b2285f25fe +size 589 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PixelShuffle/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PixelShuffle/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9e080deb7361d56344cdaf918753c7a28cd21e42 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PixelShuffle/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e9b075273812c8a180fb7992b6576430aba85a0cd550235fe98388c9679666f +size 589 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PoissonNLLLLoss_no_reduce/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PoissonNLLLLoss_no_reduce/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..08bbc88d168f87c3e6de44c20162c4c9447b255e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PoissonNLLLLoss_no_reduce/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b425a3f9ae177a598eef2d9228a86604acad45af60cd61f4e0beb8071a180463 +size 569 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PoissonNLLLLoss_no_reduce/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PoissonNLLLLoss_no_reduce/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b9f34887906fc2a439b5a92c4acfdd1bd418f3d6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PoissonNLLLLoss_no_reduce/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50a343bc36cc0031cceb7205f2dc06f50203ea94c9dc276c77701acf309eb2f9 +size 409 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PoissonNLLLLoss_no_reduce/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PoissonNLLLLoss_no_reduce/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..8f98f2b2d7eb247d212f4893e627331b5fbd8b72 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_PoissonNLLLLoss_no_reduce/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b6832c53d1883d5318679b2b0513d973ac64ffe85e19cb0224f4de6cc64880b +size 409 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReLU/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReLU/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..37c026d529f212fac16aab8bbc0333c0afd106f3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReLU/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e786d816f737440d4b8ec7faf2b156a686b3c88d1ba3fe15f273b316791114a6 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReLU/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReLU/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f444d87b5d260a95e8b973e48f1ae7669953973e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReLU/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:132da94694a056f661029fdf5dd24b215022764a6287fed74cc6acd9a53fc776 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReLU/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReLU/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..1dce3e12363d55254cfef4b853e39a4791301b96 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReLU/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a752e31026e3dc21edcdcddd22bd396c1ac3fc678bcd639dd31355b2d020fa92 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReflectionPad2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReflectionPad2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..06b152a68146e68d625418693e0835d0445a6f44 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReflectionPad2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad18f5806ab0b52956775e04e63a53b7c8b881747e82c1e4b4e147d9579c9696 +size 159 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReflectionPad2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReflectionPad2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..49f2282aeb5adfe4c674631d971414c109c2144f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReflectionPad2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19dac24e73ebf6545563f5cf44378c0e0efa6d60cac5cce66cba6b6e1173f350 +size 1549 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReflectionPad2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReflectionPad2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..848dcff82a63f83fb17097d07ecc4aa2d8d458f4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReflectionPad2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:308b4a7a7af1b1f7a7d10a9b025dcf231f4c4e1d97f14b15b3355a6266d318d6 +size 3973 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReplicationPad2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReplicationPad2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..bfccabf8f0b319823230cf1d2d842d0916029fe9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReplicationPad2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:896ae776de6beb2f711fdb4959547fb2bbd6777a87448f98cdf30b80721eff1e +size 156 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReplicationPad2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReplicationPad2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d733ab197a0182c9dd36a9c5a3e184e873b0d492 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReplicationPad2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aefa6b08007cee626acb97362b8edd5cacfa95788083a564d0f09234821ff1f8 +size 397 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReplicationPad2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReplicationPad2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2b743aa970fb676e1fd2315213c58e532177cf2f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ReplicationPad2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a91b296da1b4a9f23e4f89327510b0de91dd01bcfdd81bd54b681df889b29db +size 1861 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_SELU/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_SELU/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e7de91ac29cf5d9d7c5d3a0b235dc550d5be5b35 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_SELU/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91b617d81ad8bd304d82a67f14d371e667a2397c82bbdfbbb45950bf3bf651f4 +size 104 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_SELU/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_SELU/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6dba94f03864e9b8d6acc14900874201e0c54fe4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_SELU/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7b32d5dd4969a4f2b577efe825fa679abcc005acd9352355358d1b519df42fe +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_SELU/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_SELU/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..637b29a8cc4a88e4057d7d5f9e1d55a1ed04e3ce --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_SELU/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd4be99621f7969b8b771c507132e6e3b38ec6e167912944946ce971b6dc4e59 +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Sigmoid/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Sigmoid/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e0c35c6eb0da9b01597f0b22c9475a3e08b800e4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Sigmoid/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe8d7bd3bb6381d6ea07484737c1e5017e2a56fa3f9ba261c44e888b4677598 +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Sigmoid/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Sigmoid/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..79355fa0a59f521e876460dde05af74ceb602882 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Sigmoid/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c46aa18a89508f501717f27d0c8e91b217251dd913b6f51f8c0f5435ea07df88 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Sigmoid/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Sigmoid/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f95e10fd7195e68e0dd1edbf2a9418b8362d53cd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Sigmoid/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdab9df3faa0cbc9b22a8b72e36a2e0c6bdcf0ba81f4fd30ce2335593f5476fd +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmax/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmax/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..3b4882343cd681c919c53f351d227706adf9756f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmax/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33c614f8f96bf15a31fb1be0db2b48f5dbceb554667eb140d5616daabcf546b4 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmax/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmax/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..63be0faf515b0777b4edb5d032e276932594b23e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmax/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31cd93d802919466cc9fd2612900aabdd36130e778cafe9ea22cd1238ad0dc4b +size 809 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmax/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmax/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..cc69d1ac8ea72ec312ff8c6a1e1a95647632b588 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmax/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70405c37203e50eecfbc93f4e413453e0df83b46fdc6e236a081f41ee8db2420 +size 809 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmin/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmin/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..f26b1dc6f95640c4ae070dedc1deb615ee650bd3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmin/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e4cdc2d57f499598a955d50ecb691cf6ae8a9955458e4a1a8b03417b092deac +size 125 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmin/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmin/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..0a3f726d3991179e30a142873071114718e01fb9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmin/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e84be45e0b0cd6a7aacb8c57beb3668f31604087d48352c00ef2ae8eb921bad1 +size 809 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmin/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmin/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..fadff17f453c108449b31e9bab8ad4f71617d2a2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softmin/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74cd19ceebb00658c2602979830b73e9dca5ef16f03ead3a3922696d9cf7ae5d +size 809 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softplus/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softplus/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..06770688b9f7d7ac74c2b4aff58d9c7961023bc2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softplus/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d6edeab57c386acd351ca6b78533c316d4ff925cc53a47ffba4eb2346d279e6 +size 100 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softplus/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softplus/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bf4e5e3ce77301eee148ef3ea2a92bd19b3f5c81 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softplus/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:447096bdb649da9b8972c92b932a7e50eed52578096dbb5f9e0bf9d26f9706b4 +size 809 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softplus/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softplus/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..1037d1cdb15bcefb61693c47ecd66a25cb0d0e8b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softplus/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fcdc154264dce020c25e3bf2a39cb1429777ef3baa48dacc0c9fb01c1efdb3e +size 809 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softsign/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softsign/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..bf7b06f7f6098f49909e56b34509003f6d5f28cd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softsign/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efb02fe36f0865ac12df5e609eb1709d0a319b34b173ccb2be31af6e9720f08e +size 191 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softsign/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softsign/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6bb2a4506d1b6bdc8bc8167e6addd9f269266aff --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softsign/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a047ba2fd2937900154cafa04270dc203b92c8336903dda642f1c6a4a4af895 +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softsign/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softsign/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e628874cf7598f5affe8c888d7bc643ac93d4bf2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Softsign/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:760b176c53929f4298b5b55efc6e1d27ef98c2d286639d26e4bda1732f3c9602 +size 130 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Tanh/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Tanh/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..2362279e85daa7f86266b94110ac03a45393ccea --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Tanh/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83b4446d8a440d02a4f87144513aff79dad3a842733fba7a538d6a03245ef698 +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Tanh/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Tanh/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..661216331e2853a2df3695ad3630d514d6a5d6d2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Tanh/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87ec984ba2ef678c9d00cfbdf73202a3a55e413eced7cee59070dacf12289fbf +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Tanh/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Tanh/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a73d641c5b2886c911a31c445da15067ae54eef6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_Tanh/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:185a818f8306ecf022ba3c4c47abb3713a90183f18ba6c05ef94b3aafbac49da +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ZeroPad2d/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ZeroPad2d/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1ddf94f6832a10e2847a970dcd15b8ed2e49f3ac --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ZeroPad2d/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a28c21f42f5ddf77e5dfa53a30faf3cb8eec7362ba6aa5ec029a271afe6e112 +size 177 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ZeroPad2d/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ZeroPad2d/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..30c5b6f2178f19cdfb8e82adfe6080e78a67eab6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ZeroPad2d/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b6c64a4b1818021e42b7e82879cfd98c40cc1a7a8e3a8184adffde50ed5894b +size 397 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ZeroPad2d/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ZeroPad2d/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c91b069d7344989429ef90c84f3b40f4baa6f623 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_ZeroPad2d/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9a1aad510e25c9a08f34e47005b46d2bc9391b399e8abf38dea0d517f1956e1 +size 1861 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_dim3/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_dim3/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..188e77b60133af62b8a876fde6ced8fa54060b7a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_dim3/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8956b4c1ef7789f5d2a7525aa55463f079baf262e95971764103a68dc059f6bd +size 131 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_dim3/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_dim3/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..352bf93be84565198861ff81def2456714953d4a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_dim3/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfda7a4dce021837dc31ebcc6f7588e91f672952d2ae676c3e7c02cc1fd8473d +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_dim3/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_dim3/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..98ebebd52d3c9e4c7f6a3f9cda2a015235d71e19 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_dim3/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4d5d36fdf64fde6c3659f1076f36dd996d58484441fd738e78837ea16571dc6 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_lastdim/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_lastdim/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ba6c46d047357c8a8163d4e40a8c079e563b5b6c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_lastdim/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9bd6ce0cee451394b4e1a85261ae9b3f1c4103186311c260dcbe0dcb70c93bc +size 126 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_lastdim/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_lastdim/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..29aee34b1fa9b4b1f27520a734530b81402ec25f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_lastdim/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcce89e5c9b6619441e2d099c2e3d6b40e08b45f28e07825862efc8b248382c6 +size 1034 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_lastdim/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_lastdim/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..abe9ffb9079ce1d128cebe78a63e29f3086045b4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_log_softmax_lastdim/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c2a84582dd41fa5b70157866a88543cd53c049d10868fc2db9f61e9695437d5 +size 1034 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_functional_dim3/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_functional_dim3/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..457825ede64559043d374d98b786f7246c4f5213 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_functional_dim3/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:716f35416a512cbe2059b6262b3a3872b35f48fde8e64bfb479f579f53533ad6 +size 128 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_functional_dim3/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_functional_dim3/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..540e8a8d2da555f7133acecc11d78bd237c2d538 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_functional_dim3/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d53ed2da67c0dd5c93c3f52eff14752993a493a7d1f201f595186a7bbee408a7 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_functional_dim3/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_functional_dim3/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9237f982ea91f2e15b861d46d09b31aa7aad519b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_functional_dim3/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b7ad7133004927055c8f23bb99f5bff7aae2537d47d046586b1c93c3ceb23a5 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_lastdim/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_lastdim/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..2f92ab8cd63ec100d177c368c95f154bfbe122fd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_lastdim/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:016597f084a51e47e37c126bfbc70e3c2548569943ad943cd66061d5883fdc44 +size 114 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_lastdim/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_lastdim/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..12455e3818e3319c9ef75b794e189ee97b8bc485 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_lastdim/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:208fdce8d77c1d6e90d92f306fdf6337afffa138e43aad0b56b7ed03dd8ad097 +size 1034 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_lastdim/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_lastdim/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a370794472f683ce8c0edfbe80595d20211ff8bd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-converted/test_softmax_lastdim/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fc64e1eda6470d3fe97ea75ec17265932ba6540c0180e8eb60a52c23170c5f6 +size 1034 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..91e1448600c0d053192a7b70c165e61a8df6c096 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e32a28e969e4ba81388720c78b9caade5df1b3bdcf3dd1890439fcb6c0ad6e68 +size 146 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bad6fe24b10445b62f29e13f42466ac0d74cad77 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59e2931766b480b4a015213b44fbbbb2e0bc05a90b239270c512207180513291 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..5dddd2281d603b9ab5de5e0151fbb69e65559537 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a527a60ef96fa797a5bc467ecd880dd5401ca79c3730d021d4d0aeb48403f14 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6778b2a3379c3ee588ed47ed856edb4904e70916 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_broadcast/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc058fb1e2a988b860e716cbbf86eb0fae8f1ae8d50a8c0058fcb9c477fb30db +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..2e887cdd7c8a78489614e6b496fa7daf4b0323b3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43fdd40bdc58814f99d45083aa8c1782d4181394c533776ffe7bf0e67016dc63 +size 151 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5df9e2264b43c460d8cbb8d2df3dbd219367b423 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7416634ba7f4619abe3c09aeb5fd8a57273921cf2591b6917aa02374b288bfd8 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..d30fb08bb24f16de956843d677850823e179357b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:266de418b18c2df08b763b6589dcedad152d55ccf81da1ca6296688edec56c7a +size 24 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9122ac0202473e7ff807e08151c7ad73596739fe --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_broadcast/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ee170f791f30828f693312cc85c4ee956ced35fa57bbb93aa48b76229de021f +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..91e1448600c0d053192a7b70c165e61a8df6c096 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e32a28e969e4ba81388720c78b9caade5df1b3bdcf3dd1890439fcb6c0ad6e68 +size 146 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..464a64b66f3e99e333dd99e10053f7fb44a2b37a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b5c895101ac1b6b962895fd3d44559481d523518b6cac2af282d69b7bfd9440 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..8b13b4c6ef8af8e844fe51175684f97ef3a0b6a2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:987fdd6549bd2b74748e0be3a9db16bdedaa20e9a1e46bbca9542f780d224322 +size 30 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c90c6ba3957b5b0d265127c490b3dc146bd90e75 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_right_broadcast/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29a0c88ed17c7494ea566c5b9bb6e5f9e36219933edf3e0da9d857f0c157ea68 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..716152d09462b770353a92370abb629dcd8dffef --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b4bba28c4211265077b668c2ad11dd4b3fc4617583c4e36657d21e14d735374 +size 151 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9e290b343d66e409a1c96dc97c3a72d315cbc61c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e0c7d043be3cc02aff6e7a5e5ad53a8f813fd48270e67f4582edff1ddd00db2 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..910d35966bc40f5d3ff41033e90478b4193abfe8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ef595df5829db9e0a9c19389bde46c1d72f2c8f1eec62343c81d5ec56f86a44 +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..652e0a823071d9209577ab4de83c1adaaeb59ea6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_add_size1_singleton_broadcast/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e84618a16650ab9be91650b9ba3d9b8cd42cd3ce14f9fc8025d6f9ccb8a98597 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addconstant/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addconstant/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..6e2b494bdde5ac3975d3280543aa22899d2545e9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addconstant/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c6b202f12d5c44e81f989d3e5d4e281f80efb455406e0e437168b1d0229cc83 +size 158 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addconstant/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addconstant/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..52b9af9ab38845da3696d36e6758638514f49fa2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addconstant/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69e02108c80938d21289814b113306c289669c4594be207ed2bd39c780e3fd01 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addconstant/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addconstant/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..cb4685fbf32128ce4041ae8e32d2c59834126034 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addconstant/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dea4b3411238113bccb144d75350bdd5a52aa885afcf6d7588f711105d6334be +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..084afde18d6a1659db8bb0c21da2fe05db495610 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a645ec71f16a780ebdb6ec3eb3ee47d79628f0ce7f6bbfafc1fa14175136923c +size 245 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5b97a8c02c998130588bd88c7f8341f32f2df552 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c1c8c6e32f2f3758a8d054d563a14aef4135bf5ff2c136361e0b49c8e40afe +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..025406d6e8df1eed661503a07240a91a2ea81891 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c117964fb86ed70be9cd56ddcb8390dd12c79881a5796b9d8d7b4dcd0c4ddf3f +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..6f56fee09b1cd5d7d3ab42d913e35f2fb3a9e167 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8923ded003eacab08169bdc97d04efb9758ac585b58eadf030cd2775a5477a94 +size 22 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6ec50c952ea3c123d6eb1a71770fd7692924abc5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_addmm/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d90d0df5bfc55fafc4962d34ec902bd330eedc558228b9a24eda89a89249e806 +size 40 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..5eeed2e677b85add38d8969ced41b26149cb115e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fc4a80060eb7faf3ed13cb6cc1f42b826d5a513f4580de0d5e60fcaac30b619 +size 168 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..8686b32153fb0dcd47696f899f0c818b27493b7e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66cb29e392752d93ef4ba8c0ce514ec8b139f91eabe226fabb4946c1d82bae5b +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..a163309cd16447e997e6df11389a759282dbbb93 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a21f96395a4dd2d6754cdb726ce7bd0ff82191e3c7d0ac84369d43434f2d54e +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9e244860a4a990f355861478ac5cc194cf93cfd9 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_basic/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64541217698e5e14a5a4b5774a25e9953071e298418d177addb84872f4b4b1c8 +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8ff12a6230ea03a5f8be090a4d4e07e451ab6ea7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3efe57ac9ab9c8322733b62c35b61217175b640c564fcb386b5c8ae6e5fa956c +size 138 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5603f1524c18cfed0bae4f148f8b7c31e8d4920b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e144240bd9b2d4c651ef24be8315312e11e80cf71947138a56f3cb3a617a2366 +size 18 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..3fb5c7fed7954534ded7401b770606eafeec5f1c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdc4fa56de648e23d09ffb95337d3ebd1aac299bdbd041ed487400f997def1d5 +size 14 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/test_data_set_0/output_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/test_data_set_0/output_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..76d076f955c240176257636b2fb8d87c19244a6c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_chunk/test_data_set_0/output_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:452735224a917452cc0660c443b273982eb681306689a9df27effd974e5ebf71 +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_clip/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_clip/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..adba92e2c561a96a139cc3747611bc97ae219bc4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_clip/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:778fc5195b9d5db7f9e9885194a2a8efbe3d5edb55ac8a874ed1e78a19adc06b +size 126 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_clip/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_clip/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d5319a675cc614adf964cc1ff2c2dc7c51349998 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_clip/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:944e1a296dddb9b6c0941e2b70351c96d5972084abe077803752d69e3706970f +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_clip/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_clip/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4ce7f0573dcda36ae8f2f1f867b51d833dca0fdd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_clip/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1a92a67c157f8728da953f66a7a384018a3b59b60d572ab5c15f61cb9036182 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..7f3cf1aaf21912bc69462c4600ea24c661c743c3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd04dc7208cbcbd10f5fd505f895674ef906676431ccef960900957ac823e46c +size 135 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5b97a8c02c998130588bd88c7f8341f32f2df552 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c1c8c6e32f2f3758a8d054d563a14aef4135bf5ff2c136361e0b49c8e40afe +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..209062c26aec5da257ae2442fe1396f06a69472e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c76b841a3f25cbc02eeaf38d07975bdc1d991ec7f395eea57235af701b2b9bad +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..edb693129301c6dd4c2c01401091758a14145232 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_concat2/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a600c30de787f3909fd9789081bdfe75c70aca9e81ece22d1f334891d7aca96 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_conv/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_conv/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1196a966d7bd5cecd414e1dae10b3b27afc82df0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_conv/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8686672d9ed2b539b5c9a670d93ce007c569314f6d74e33b4b10118a3f33d656 +size 7746 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_conv/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_conv/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d60009db6f493f99ae5f9ffd9625e9e26a2b7fbd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_conv/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52f2215b35016c85c17a5ad4d471c71ebedbf49140a881af0f7f80f023ebeb70 +size 2560015 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_conv/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_conv/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4649f17a8f1d08f24c9bd78dad8d31fc808a1bef --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_conv/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb558f63a7fb8e9193c98103f279861c389d171edeab5e0d4d82a5926824d5ea +size 1896974 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_convtranspose/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_convtranspose/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..c481bedc29c3cff138a2575c372be2ad6587547d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_convtranspose/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57fd5707733f1e7473322d19963781d318f42c8cef22041c30b93d04e4ace86e +size 617 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_convtranspose/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_convtranspose/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b895c0fd1d091a7216a4cbe7ad82e76d1bd6e8ba --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_convtranspose/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4457fa4f9c95d9c71d11e0c4a4a397d5091ae99c34253e44dc0e3972dd332517 +size 493 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_convtranspose/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_convtranspose/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..54ea5ef434898396a7179e29984c3ff7c71569f6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_convtranspose/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddeaf41fe4dbafbe9821c4d3ef49b081b738f1bf50a0cc5324a4c98207b9bae3 +size 4333 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_exp/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_exp/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..b856d52e50112cf4665a56aa57e3780e93b1ccdf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_exp/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60b5c2af9b0856877d830ecb3b440325bb96fbac1958e2e0ae2a5b0940a770a5 +size 95 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_exp/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_exp/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d5319a675cc614adf964cc1ff2c2dc7c51349998 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_exp/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:944e1a296dddb9b6c0941e2b70351c96d5972084abe077803752d69e3706970f +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_exp/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_exp/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f1980b88d464cdf17babf9f068943e728f8a72a3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_exp/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10dd6c7aeb8cff55012458d7a33eb1957d4505ea5ff0dd01e66ec14e458cf155 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_flatten/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_flatten/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0ec7a4895d9b2a3d2782d8a59f8f370ecd575720 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_flatten/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebaea8a9d56651b8ba0454f29dad1e1e4725ae8a54549b6abfa13cf93aa39ad0 +size 120 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_flatten/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_flatten/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..62563c46e546efed55746645afafc2a0bd75d0f7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_flatten/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bc3e62692125d1db6c9003ffc87c131af57d753699bc19ff0f0366b2662045f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_flatten/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_flatten/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6e19184370981214007c6fa14604ed47496d674f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_flatten/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c14b75588dbe28a94c3a0802f704b9f6f645f05fd138009d237b3b826a2dc3f5 +size 104 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_index/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_index/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..456f5b9dfdc5a6f83dbafdea12aa484e15de3276 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_index/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:275b72eadf19cf0c94848cfc478ce673ad2b4aa51ad3f264b7ed1358b96e2a6f +size 165 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_index/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_index/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..45a94b35c9ec6bc5579fcd35a87bbd6d8b3ab638 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_index/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:872e582cf3d9df93eedcfa10415a86b3116912f38e7137b5dbbb540a239ffb24 +size 12 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_index/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_index/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..60cd32bc685a60421e8e6f0efa54f2bba1edfdff --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_index/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dba4178c03e74ec137429a898dc3fe3e4485551fbc8a66133e4430cbda9d1293 +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..daa1cd9bc0aeb5c1e4801f69d959e9d0493c8552 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4499da62783819a04e944de2a37b071ce0b13901f0448fc526406f5db6715687 +size 119 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d5319a675cc614adf964cc1ff2c2dc7c51349998 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:944e1a296dddb9b6c0941e2b70351c96d5972084abe077803752d69e3706970f +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..1b86459025188256d506e4c40844b55ef9c2aa22 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1f8008ba09ff2b9fabdfb05edbf198a5a9488bb6c9fe82b8844765338641a98 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..36f5537e1131869de37b4e2000ef0c872ad25a9c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_max/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a62cba1b93cf0b7aa55ebfb457acc71b4cef800ae5b51df4b4f5464bd955ada3 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_maxpool/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_maxpool/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..dee4d4d8e9f84683015b41c0cbf12f487b90d709 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_maxpool/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7baffdd5ed1be0431dce6456181e4936d28719177288b5dd4ff741f187e1628 +size 160 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_maxpool/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_maxpool/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..cd868192c2889a0855d865a6152af2675562c63e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_maxpool/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69afe403394b677ae6dc29d19bd3fb06164a3391c550900a65672926a8d32a16 +size 64012 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_maxpool/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_maxpool/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..86f7f041b473a54db0c5756feec3d52e12219a7c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_maxpool/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a0bb0864762a1813781eee67b5346088f3540f875a588e6c792dda89d3575ae +size 30732 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1b2e1402d5daafb2a3d306acc91691a15046fb3b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ebc29a395593453c5a039e44a64435f33067ad3e1fd6070138b40e98c93132f +size 119 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d5319a675cc614adf964cc1ff2c2dc7c51349998 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:944e1a296dddb9b6c0941e2b70351c96d5972084abe077803752d69e3706970f +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..1b86459025188256d506e4c40844b55ef9c2aa22 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1f8008ba09ff2b9fabdfb05edbf198a5a9488bb6c9fe82b8844765338641a98 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c595a8b588c6fb1a109f9944f3b7a4742dc851b6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_min/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9273b65cbda18dee810af9e30bb28c0060b834d993b0a1ddb7ef768d2f48e5cc +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e20d6714100844f4ab6a9c6bef66fa5faf57444c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd53be4a44cdf737123b37620cfec0ed4c47e8d5042cf727bdeb10d83706fdb8 +size 214 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..5b97a8c02c998130588bd88c7f8341f32f2df552 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c1c8c6e32f2f3758a8d054d563a14aef4135bf5ff2c136361e0b49c8e40afe +size 32 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..025406d6e8df1eed661503a07240a91a2ea81891 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c117964fb86ed70be9cd56ddcb8390dd12c79881a5796b9d8d7b4dcd0c4ddf3f +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d5af018c3c78b82bbc7939e2f5eabe04c6250619 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_mm/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea9959b921bc5c15c8671f3af66a38088b7fbe4d7f61a99fb0a35ea820b187bb +size 40 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_non_float_params/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_non_float_params/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..dd19453546fd86e01b8498c47239c8049acdbbc2 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_non_float_params/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:485c66a0c1b5abdf83d03a351f0761e86b9aa44e821787d4bc5145ccbfda5465 +size 181 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_non_float_params/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_non_float_params/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..44ac8cd01b5629289b452fb192118c047fe8d976 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_non_float_params/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bbd2dc2044046f51d689985796e81fb9a7e5cf78e9d223b5078a4302a86a39c +size 40 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_non_float_params/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_non_float_params/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f200c714beafaf9f23f5cf5fc1fa8cc58e3d5972 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_non_float_params/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83fab6fd56acbb4a476a49755e4a9e1aa6779b15e2ca483eb435aac7cfd5433d +size 40 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pad/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pad/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..94059172d5cd70da22b43923eeefc7c181febfa4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pad/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8176991317b9ac1f5e9ee1a18a89231102086b8f74a44a051602b1a0e1294aa0 +size 159 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pad/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pad/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f208ca6d59f979646b2310f92c404636d89f53d3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pad/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0f9f2fab2c3f55cc6d50af5534ddf730ab2bd41cc81326fd4308cc36b88384a +size 44 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pad/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pad/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..cc245536a07c50d3d78a2e5d48f50b33c1b17f2e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pad/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e49eb16e63c84f9dbb5422e9ed58042fa28f20c10a2a4ad2bedc17387d965a6a +size 120 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_params/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_params/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..5f2c44c7587aa717916504e0b920329d47d1db29 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_params/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4618f50b075490691d11a75161905d979de1b3e12f20f892255039993cb4e0eb +size 209 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_params/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_params/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..68eaacef9ba89b958006899bba09af456f6281fc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_params/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e839fc56e801da9b602fddfe6b9c65f1fffa28da6ab3721d38d9fa580d2b952 +size 24 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_params/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_params/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..418789b6eceb218badd7cc411875bfce183b0b38 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_params/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b56a8748b6ca45a8a4550531a8e494218b1ca8dca85dd8a288c769c98a30947c +size 24 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_permute2/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_permute2/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..90f060074568233a028c48f26ed7a7393d9aa662 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_permute2/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0aac91f4e4b02c2fefcbefbc18ebbad361c2a5ecee02dd80bba13bcf107ad35 +size 157 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_permute2/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_permute2/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..76b2d1906a6d0fa34ca0213cb264a1bcc3f92083 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_permute2/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ff34379c6ece81e777fd0c7de45516f9260ef7de66f3cf5a8b262e9ac905123 +size 20 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_permute2/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_permute2/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..76b2d1906a6d0fa34ca0213cb264a1bcc3f92083 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_permute2/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ff34379c6ece81e777fd0c7de45516f9260ef7de66f3cf5a8b262e9ac905123 +size 20 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..ff046e07c320afcfe1e4cd48c46de668c71713c8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33b5cd9141807d9e44da79058d65bfed25ef2bd5076f725270e6828b1f10bb21 +size 143 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..62563c46e546efed55746645afafc2a0bd75d0f7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bc3e62692125d1db6c9003ffc87c131af57d753699bc19ff0f0366b2662045f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..4e1aa81748ce990f96ca93d89e2f230a37bb2c32 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d0b7e8ef88dce493e1bdbe5a310e53860b2f0a4825bb53adcd4c1c209ecfc3f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c5ec3cfdfcdffa731e5dbb26a247fa6ae0cac7d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_pow/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c437b01b140337678ffc22d8dc9261750cebe621a4333c72d15ae9e40444843 +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9e460748aea0701925bd6f02d160d866d1d5fd6c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b520425abc7a0320ff2475963531e99f2dfbc469b3d2db5a19e70a07a945a40 +size 144 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..62563c46e546efed55746645afafc2a0bd75d0f7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bc3e62692125d1db6c9003ffc87c131af57d753699bc19ff0f0366b2662045f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d2786167e8e7f7f98a7d1443fb45bb1a8b6dc9d6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0646eb204fa965a7d2f8c516abdd6b31d77e6591185e1dd93fc8b8ea3f3e99e +size 42 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean_keepdim/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean_keepdim/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8f28725fffab4624a1ca8a73cbefcbd24f28db53 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean_keepdim/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1a714899ff20760af3a6844d95f8b30fbca0b7e577d2fbf07e8ca3ededddefa +size 148 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean_keepdim/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean_keepdim/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..62563c46e546efed55746645afafc2a0bd75d0f7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean_keepdim/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bc3e62692125d1db6c9003ffc87c131af57d753699bc19ff0f0366b2662045f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean_keepdim/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean_keepdim/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ab12b106e4cda2092f9a9ed9d29bf685a1300889 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_mean_keepdim/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:253e3914a2fb7d0a7dde749c38b347e57573a5ee7f2d2774244468124f996e14 +size 44 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..4018e011c59c49c06c4172463e6a5517c7cd5922 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d494ea7d31091b05d409799923d5a82df8db56e9c290b536ad283f0e53fa7df +size 143 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..62563c46e546efed55746645afafc2a0bd75d0f7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bc3e62692125d1db6c9003ffc87c131af57d753699bc19ff0f0366b2662045f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..a6bf999053103559e6027566ac765ed93fb1be16 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52d9678b6f1f5f705759d5570dcb8bb7472efee9601e3a7fae0babdff950cb36 +size 42 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum_keepdim/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum_keepdim/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e40da729e542a416e414df4532acd8fa25fac4e3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum_keepdim/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fadbf7176846be0b2930f0b54ced7bc668a45b21dac12b889a9935b187c3818 +size 147 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum_keepdim/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum_keepdim/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..62563c46e546efed55746645afafc2a0bd75d0f7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum_keepdim/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bc3e62692125d1db6c9003ffc87c131af57d753699bc19ff0f0366b2662045f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum_keepdim/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum_keepdim/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6ad3314834c407bbf8256f89d1ab16c4ec4ee8b1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_reduced_sum_keepdim/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4cbd3a0c1b05baa454e208bb8524fc7191f61b14083dfa4c0579ab98c383217 +size 44 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..b7f71e4dc0d93326e2b205af367a4313eca20f06 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45024e66e34011dc16e8f942925c4a94686d676da617c902a68bbc1402255b25 +size 183 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..62563c46e546efed55746645afafc2a0bd75d0f7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bc3e62692125d1db6c9003ffc87c131af57d753699bc19ff0f0366b2662045f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d92347cc0c5b3c9aacb407ac8326044254caf753 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9894e282d63106e64582f3a3b0ce39d628595ace004650f6ebcf8c2e3863597 +size 2317 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat_dim_overflow/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat_dim_overflow/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..29f04edaa11ab47c993ccd9d237a3f2bea1f6c7e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat_dim_overflow/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:988af83b725e7dfdc78cd70b3ed46074da3cb2acb78a3fc4991c2d934fbf2e29 +size 262 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat_dim_overflow/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat_dim_overflow/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ea35ea3a9b3dbf33f46552dba9ab490ce1a781f3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat_dim_overflow/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3de7968d997ab3ad7841ac292a54f369ad8cfea0ca92e3bc0416b3faa7e4f2cb +size 16 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat_dim_overflow/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat_dim_overflow/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..357658d364f4468d78fdb24f9f1622e0d161cc52 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_repeat_dim_overflow/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dca7d969fdad14155aa62508cebabb3ae64dc8111c7e6eb367c9044423100df +size 205 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_selu/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_selu/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..cb0b674b354f88fa42923691dd0f22baba511e65 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_selu/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de6a8a5c3b792b00480f9a35d470ed332a082ed591d78ffda0f1ad38dc83b9ac +size 112 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_selu/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_selu/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..62563c46e546efed55746645afafc2a0bd75d0f7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_selu/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bc3e62692125d1db6c9003ffc87c131af57d753699bc19ff0f0366b2662045f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_selu/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_selu/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..827deada870fb735f206ef0ec9ed5f2634c14ba7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_selu/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81dffb4efb1f2bac33245d5cb56728500afbfc40228bc780af5030d3ca80ad4f +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_sqrt/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_sqrt/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0a4a32c3846b33aa674de0c473965e152d3d0cfb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_sqrt/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73e9a3f0a7f0bd21baca809908bd515953c2e1812c16af66f47ed4439fd879cf +size 96 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_sqrt/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_sqrt/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d5319a675cc614adf964cc1ff2c2dc7c51349998 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_sqrt/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:944e1a296dddb9b6c0941e2b70351c96d5972084abe077803752d69e3706970f +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_sqrt/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_sqrt/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9ff7678877f03fc96b9b1a413ea8a2fc51990704 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_sqrt/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7761216d6701e85081eb26a93b78e31c03bcdae4d695f7bba8af7db9f7187a2 +size 56 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..463328928325eca38a4bd8b4c9f55dd60fb7b603 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dcba4239490d9808912d8dbe82fbd4b54e983ddc145656199425c2ef406c819 +size 291 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c9697345162bc101f56d4c61ebb6c1266fde0f0d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b56f70c52f4016af37eb818083d99fed9aa506ceacf55c47ef2c6ca75e293c4 +size 81934 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b169a94d55178adfd75d331f857e2f30cbe7ccb8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eac75cf668adad2964c7c94dcedc4167fbed2a78282abe4c1f6649b5f42d564 +size 81934 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..066b587d6d3b3859ae434b6c630f8a7d256897fd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6391e658fd3db2a43c0fceca282b8dcda7030054d7bd3f5ddaaad3d5442a085b +size 188 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..ccfa1510698e1263f2dcdcf2a3ccf80ff4fdacb4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83817f048d5d2fd670eca44d6bcda1eeaadcda286b68fce894432b6922373eae +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..76d076f955c240176257636b2fb8d87c19244a6c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:452735224a917452cc0660c443b273982eb681306689a9df27effd974e5ebf71 +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..9bf70d1f497256a66ad77c3d89a1d9d800a9ff17 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:166431a361ddfddda5930282813b9b1d1ef87c81aa4837abc94adef19cf65db0 +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..4fd17120be750faa2789183904138d927d1ac392 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddddac1752917511df0a6473fe013a69e5b7537157763e9ae62259d7a19ba8f1 +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/output_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/output_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..0beada33227dfa5621342eac834ce9f2041b6f3f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/output_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbb0d646caa7bd9abd9bfddd072f9a3b0fd6175e97074c10466413837605b2a7 +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/output_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/output_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..0d557463d4196e4804f6fb86921df141c945c28a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_symbolic_override_nested/test_data_set_0/output_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62099674f5d96ff6a75cd79ab1b036019e6fa5de6d281003c6c3aec12064e963 +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_view/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_view/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..d87d8e2d7d9f27de1d1ffee4ce784f3a9975b67d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_view/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d3ed32ba38fbfaabb3b76f7f3df0c174976fabeff635a851bc0bc4c21ac339b +size 108 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_view/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_view/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..60cd32bc685a60421e8e6f0efa54f2bba1edfdff --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_view/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dba4178c03e74ec137429a898dc3fe3e4485551fbc8a66133e4430cbda9d1293 +size 10 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_view/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_view/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..45a94b35c9ec6bc5579fcd35a87bbd6d8b3ab638 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/pytorch-operator/test_operator_view/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:872e582cf3d9df93eedcfa10415a86b3116912f38e7137b5dbbb540a239ffb24 +size 12 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_bvlc_alexnet/data.json b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_bvlc_alexnet/data.json new file mode 100644 index 0000000000000000000000000000000000000000..8172a83e232426f99dfdb8c82ef4d31f724999e0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_bvlc_alexnet/data.json @@ -0,0 +1 @@ +{"atol": 1e-07, "model_name": "bvlc_alexnet", "rtol": 0.001, "url": "onnx/backend/test/data/light/light_bvlc_alexnet.onnx"} \ No newline at end of file diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_densenet121/data.json b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_densenet121/data.json new file mode 100644 index 0000000000000000000000000000000000000000..69bac6cd284a5e8cf3279587be659f49cecfc252 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_densenet121/data.json @@ -0,0 +1 @@ +{"atol": 1e-07, "model_name": "densenet121", "rtol": 0.002, "url": "onnx/backend/test/data/light/light_densenet121.onnx"} \ No newline at end of file diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_inception_v1/data.json b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_inception_v1/data.json new file mode 100644 index 0000000000000000000000000000000000000000..50272a3b542a351e6dc57ddca865c68d7a6c6ba1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_inception_v1/data.json @@ -0,0 +1 @@ +{"atol": 1e-07, "model_name": "inception_v1", "rtol": 0.001, "url": "onnx/backend/test/data/light/light_inception_v1.onnx"} \ No newline at end of file diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_inception_v2/data.json b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_inception_v2/data.json new file mode 100644 index 0000000000000000000000000000000000000000..7c26928046f8b082445bf489cc9f68d1e0ac8b41 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_inception_v2/data.json @@ -0,0 +1 @@ +{"atol": 1e-07, "model_name": "inception_v2", "rtol": 0.001, "url": "onnx/backend/test/data/light/light_inception_v2.onnx"} \ No newline at end of file diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_resnet50/data.json b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_resnet50/data.json new file mode 100644 index 0000000000000000000000000000000000000000..f5a100e528f0233e080012a7887bcdff648050ec --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_resnet50/data.json @@ -0,0 +1 @@ +{"atol": 1e-07, "model_name": "resnet50", "rtol": 0.001, "url": "onnx/backend/test/data/light/light_resnet50.onnx"} \ No newline at end of file diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_shufflenet/data.json b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_shufflenet/data.json new file mode 100644 index 0000000000000000000000000000000000000000..6bb5a0b9d45eb520b93e23bfba869d49068e60bb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_shufflenet/data.json @@ -0,0 +1 @@ +{"atol": 1e-07, "model_name": "shufflenet", "rtol": 0.001, "url": "onnx/backend/test/data/light/light_shufflenet.onnx"} \ No newline at end of file diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_squeezenet/data.json b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_squeezenet/data.json new file mode 100644 index 0000000000000000000000000000000000000000..fb0e7f252a18ed9c836c916f5ae076642fbac9bc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_squeezenet/data.json @@ -0,0 +1 @@ +{"atol": 1e-07, "model_name": "squeezenet", "rtol": 0.001, "url": "onnx/backend/test/data/light/light_squeezenet.onnx"} \ No newline at end of file diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_vgg19/data.json b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_vgg19/data.json new file mode 100644 index 0000000000000000000000000000000000000000..2ad3c4f647dc6df6353b097e2a76e81de8f00565 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_vgg19/data.json @@ -0,0 +1 @@ +{"atol": 1e-07, "model_name": "vgg19", "rtol": 0.001, "url": "onnx/backend/test/data/light/light_vgg19.onnx"} \ No newline at end of file diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_zfnet512/data.json b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_zfnet512/data.json new file mode 100644 index 0000000000000000000000000000000000000000..4fabcd5aa0d169f4e576925471ba08ade1c83e47 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/real/test_zfnet512/data.json @@ -0,0 +1 @@ +{"atol": 1e-07, "model_name": "zfnet512", "rtol": 0.001, "url": "onnx/backend/test/data/light/light_zfnet512.onnx"} \ No newline at end of file diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..07c1ff9c466ff1be9fe7db4cb8cb6f6e18669e13 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4635688307af248c7e17f34246b07bf2edf0cfc278de58b08bfe155d9e734f73 +size 132 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..419a378c8accc12e94e38a000aab441990512a14 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15e69c4f4256a5137c278fb44972fabb9205abcaeaa733624e0e63e7e537f685 +size 25 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..0a318017282ed9c0a76d5724572e92a1866df3ce --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa41683544eb3ce75c5000445edcfc63426f4d8c9a40a0d636822586575175ed +size 29 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..8a8e2612363da0351b10689aa011f44256cfb521 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model1/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3abcb456e21b5a8c5042514dbaf7c72b1b12b6c5d4c7df031ed5752fed25ffc1 +size 25 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..5897cf28d086d968719fff2d270414d9787064b1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2f3017c74d3b1a92f1186001d18b1d11c395252cb5f660b1ff2c721ab90fdd2 +size 132 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..419a378c8accc12e94e38a000aab441990512a14 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15e69c4f4256a5137c278fb44972fabb9205abcaeaa733624e0e63e7e537f685 +size 25 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..a529a91413e0f904c1a4abd6994b5c7fcf935c6a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ed123191690318d9a5da9f1e6dcca495d5a419a82313cff7807be3404ae5ef2 +size 29 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..58c27a912dedcb4c167e9c40a4a45bcaabbc8b14 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model2/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35fafe638847302c7b45977acee1ad86f58e9d3aaae81d3601e28e4dc35489b0 +size 49 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e1f851d1d2c121155880813910cdf2858bcb5a86 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b65bb3af8af5658e446e632afbaa04c986878c27e0d96984ce61c13912b8683 +size 132 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..419a378c8accc12e94e38a000aab441990512a14 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15e69c4f4256a5137c278fb44972fabb9205abcaeaa733624e0e63e7e537f685 +size 25 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..88e84a67b3ea272b9a74eea248698e85358005d1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f74264f2ebbc5be664f882a860363533dca70e275aa5d3a01a50ec9fe361cecd +size 37 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6c035da6a7ce297f3b5b09ebf9f38435bcb944cb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model3/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f7875fc93279927da4b9bdedcd67af21c58b3dab9eaccb3408ad9c011b86327 +size 121 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9834583a415a15b4a996931da67f3efe2c693780 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8b0adea1c6c1d7eb38fb06160870556279abdbdf1244395860cac35ced0fc83 +size 136 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..419a378c8accc12e94e38a000aab441990512a14 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15e69c4f4256a5137c278fb44972fabb9205abcaeaa733624e0e63e7e537f685 +size 25 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..74f87f06fc2f88c687e3e5c8512872277e3ee474 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f3b935cf7f69287d48ac5f1c6c3c88ecf6419db8ec7b9142a8d062ba3347359 +size 45 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e7099dc2759aeee8ec4d7e7585a9185dce29fe0d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_expand_shape_model4/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb9e33fae9dc3761385e9e860a8d0915d27436f8e0ce586bf35daebe4d18ae78 +size 340 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1bf776268e9ee56aef8c2d8a5b8a7f5e386674c4 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86ac4a77e14aca10b18ef794257624ec05c45ef647cb468a1b481fe80cd8cfbf +size 264 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..eba0fb76defda58baaaa904529711510a14ab135 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3592a1813d96f7ac3a4f03d4f9e82bd95ecdb2698b210141b837d0e7976e4cc +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..e96db45deb00d3a7a7bfe0184dd26b9759554caf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c0fef2d265fc51c3413cba28cc50fd008e4b138687d6e877d6666655d9ae7d6 +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f24677ec5d944b1cc40f374db1ff8a3b932ced6e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:328a2912aed8be6948896a7e50b9d943b72060bc9e4d3c6a3b57aa54d9fd2908 +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/output_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/output_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..338b1530965d0e1180afc76c8b0bc22e2658bd04 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/output_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:628e2a70ca237069362593d246970a498fc15a46ca719a11b82fa74f93a50993 +size 15 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/output_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/output_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..d0afd187fc01e877b572a0a455b950fa04bab640 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add/test_data_set_0/output_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d71c756a6abd3f5f9d3cadf4b4da6711d367f9276e3ab7a671e3b0f2e5bc0b79 +size 15 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0da4b034dc8491b4189a7fa59d2039ddeffbdb10 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58dc697eea329aedd0d5292bb2c550001ab9e5e6b71a809c196c62eda48c1c97 +size 297 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..eba0fb76defda58baaaa904529711510a14ab135 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3592a1813d96f7ac3a4f03d4f9e82bd95ecdb2698b210141b837d0e7976e4cc +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..e96db45deb00d3a7a7bfe0184dd26b9759554caf --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c0fef2d265fc51c3413cba28cc50fd008e4b138687d6e877d6666655d9ae7d6 +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..fe9f66d4da8ff423ec6f72a15188c7361f1493b3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93f4a57187b16403ade1f3fa5f682177eedc86250c5cea32bc226f788dcc4d67 +size 11 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/output_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/output_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..91dd84403aab9263c81580fbf9bc8f634fb8481b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/output_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64852bd25350c683afc88bdd16390a364ff1d74274fc497604a842d553b90aab +size 15 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/output_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/output_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..6c28614043c0df2b56e9d8c1d8d3c419812debb6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_gradient_of_add_and_mul/test_data_set_0/output_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d7c99be696eb028cfb6b893020721c179e5c071788403856cfa80f30cf3d4f9 +size 15 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..d3952f49eb6123aaae789e666a8880d23e108e51 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e71145a28844efac2df0598f0b96a86cc0a7cacf5901b36d657c09d72075ce19 +size 371 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7cc8de9e5ba4d3b9b6fc8858da202a83fa6db5d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c11d889aedb20c2047b85397d60ac5ce6f94dfee4ae23a3a0d3a26f4b64529a +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..e3cbb6a4570afdbf3bdc1e5477e74b56b8ebb40d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69c5942492648da7a01f25995e4d53ce652fecf8f603a163e0a6aef8b0faf304 +size 61 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..b960c25bedf2e62e0677ae1831156a6299ecf350 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2373eca6563c96e303e79c81e1679fc129a5b2f163acaab01afe66c9ad19da2 +size 158 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..388e37c90f7075111a76315ff06bccec9caeb15a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model1/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee606df9b33b12e901190cd4beaa32c4bf77e1bd8bac2903c7f237f8f01a5996 +size 63 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..3e410ec763a201061ce1f4f4469355753c65cc29 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:299cd904657eb43bff6e45a8d8a3a38b1afc60a6c75d2fe8ae7c2cb1d3cfa636 +size 322 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7cc8de9e5ba4d3b9b6fc8858da202a83fa6db5d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c11d889aedb20c2047b85397d60ac5ce6f94dfee4ae23a3a0d3a26f4b64529a +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..ddec2d5685dfb557875128e4b1c4dab132ae5406 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:418b83405698264fef64e949bceac58a118d470621408bf15b9f62fdd0c56af0 +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..593a352db68a5c0ea3da20783ae8ccc39934f7e6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb2213e065346a2e637ebacc5b10d8967442cddc9da901ef8fd026cd79f46aa8 +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..85a56e2652c3572e5408cc5857ab6c5a24af21c1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model2/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d157e1a990c2c64330c37905f021ccb1d3d0d9a7baff71c7ad619f514521731b +size 111 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..c97c61b45b5619d928682e7de6422e1ade76e8b7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27daaf8fb59a6174df24959bc2568e66aeee0926357a69d6bfc3ade1b5735668 +size 437 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7cc8de9e5ba4d3b9b6fc8858da202a83fa6db5d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c11d889aedb20c2047b85397d60ac5ce6f94dfee4ae23a3a0d3a26f4b64529a +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..ddec2d5685dfb557875128e4b1c4dab132ae5406 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:418b83405698264fef64e949bceac58a118d470621408bf15b9f62fdd0c56af0 +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..593a352db68a5c0ea3da20783ae8ccc39934f7e6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb2213e065346a2e637ebacc5b10d8967442cddc9da901ef8fd026cd79f46aa8 +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..85a56e2652c3572e5408cc5857ab6c5a24af21c1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model3/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d157e1a990c2c64330c37905f021ccb1d3d0d9a7baff71c7ad619f514521731b +size 111 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8e612227b5f7798162eabab23431f7d24d8765d1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5d67c440a8bd82e0ce6ab766a70052f45e45b8abbdff92b1ab381faf47421e0 +size 219 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7cc8de9e5ba4d3b9b6fc8858da202a83fa6db5d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c11d889aedb20c2047b85397d60ac5ce6f94dfee4ae23a3a0d3a26f4b64529a +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..ddec2d5685dfb557875128e4b1c4dab132ae5406 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:418b83405698264fef64e949bceac58a118d470621408bf15b9f62fdd0c56af0 +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..593a352db68a5c0ea3da20783ae8ccc39934f7e6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb2213e065346a2e637ebacc5b10d8967442cddc9da901ef8fd026cd79f46aa8 +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..e3a4cf7990b14b2664df75acf573f8c65645d152 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model4/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c67e4018fcec362c14ef3bce18cc30dc62b42ba9a36457b272db880890c3fe39 +size 304 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..8e25a4f4403e93be0f451e9f96932bff16d255b5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d58a967fe27c96880158631097dc181ec97225ea33035f68dd42d5de016b21e5 +size 251 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7cc8de9e5ba4d3b9b6fc8858da202a83fa6db5d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c11d889aedb20c2047b85397d60ac5ce6f94dfee4ae23a3a0d3a26f4b64529a +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..ddec2d5685dfb557875128e4b1c4dab132ae5406 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:418b83405698264fef64e949bceac58a118d470621408bf15b9f62fdd0c56af0 +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/input_2.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/input_2.pb new file mode 100644 index 0000000000000000000000000000000000000000..593a352db68a5c0ea3da20783ae8ccc39934f7e6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/input_2.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb2213e065346a2e637ebacc5b10d8967442cddc9da901ef8fd026cd79f46aa8 +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..bf48af7b66576c8e22c2b237a123c5bf1451ccb3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model5/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3346d3d018be3262bbeaaee0fcd1876411ad06a0884643841db67043138c4b83 +size 306 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model6/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model6/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..70ae381c3ce43b44ee502b7be09cdaa0103979b6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model6/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:240368624bf2dca1f0bb4390a7bfc939e6e4daecb2c179c5d643a1021f2ff80f +size 156 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model6/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model6/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..7cc8de9e5ba4d3b9b6fc8858da202a83fa6db5d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model6/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c11d889aedb20c2047b85397d60ac5ce6f94dfee4ae23a3a0d3a26f4b64529a +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model6/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model6/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..97590181c4b64a310998bee3561299e22616083a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model6/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59085bb1db26b263d11dba6a7d9d8c892d0300a7dd7fee5ab51889dcf4126bc2 +size 17 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model7/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model7/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..efc319043d21a64086ef3af02c08154be3afa3c3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model7/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f16d080a63c7e08f916f23e0d6c500a92f20f345632376f5c3a2b05fc2d2b612 +size 209 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model7/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model7/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..26056515251a89d4afb0a3e4c13dc12dfa41a118 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model7/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8836f604417aaf5b3f22f9b695a9a2d827799ae9ab007fa20ae7dda93599b558 +size 206 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model7/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model7/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..d31fb4f30886784b3a64b61d24602c2836a5db0e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model7/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8510e4e3dcd428afb3c7a915842c8f6d45bc1806c7da81e77b8501119f9a6cf9 +size 109 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e7707b79585bced3296c16c2f23f895f659aa567 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b911bee0f943336e3e485d3110c8a09959d17bf793a294e1e623f00264499e00 +size 157 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..9af372f7a2a74a4914574109df73668cbe714c0c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32772570c50137ce977517422210db8b0f225c135563d9b6ce6c8552794c2ccf +size 9 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/test_data_set_0/input_1.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/test_data_set_0/input_1.pb new file mode 100644 index 0000000000000000000000000000000000000000..6bbcc2bb5739af00da55f04aaf26ffe01ac17add --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/test_data_set_0/input_1.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec426005ebfc242ed5d11c3f83062fea9fff5a7ea7b47a22b0f80e5e62e5ebcd +size 38 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c9df237917a188a7c1593cd3904d0ec23514aa0b --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sequence_model8/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d0c4b325adcb2005a5f258b9b99b9416d4f8757b5c41f15a0bb139e9f86f532 +size 17 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_shrink/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_shrink/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..1e8bc6679384a2df59d3e2bc6ef148916f830fb0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_shrink/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c36d1e7550cbb85611ca0fe64c54de5f6f1f0c8bdfe44bb60e47e65f49d51b30 +size 115 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_shrink/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_shrink/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2c56143cafdeb64a2e902c0316121075e14e0f96 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_shrink/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fa786d09a1268d7c567db8de831ee56579b1b88729f9ae6f68db1cd4db0d3e6 +size 29 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_shrink/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_shrink/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..8c67b073d174cf4ef85ba3086ca857664f0bc79a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_shrink/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01b429957279155bb62da373182d2ddc6f446616540ac77efdf9f25d10fd4f62 +size 29 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sign_model/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sign_model/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0f29690e8db6cfb0460fee374c39ba2fbbc9d586 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sign_model/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7e0ee53737e98863dd01e2e471ba1a20285559abc9c75f126b16ce62f9f0ca4 +size 90 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sign_model/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sign_model/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..521daff72f69508a53313904c255da98d22ad67f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sign_model/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:398a0fc7a1ede855dd7e9b335339df387707637e2bb1c53cdef82494994657f0 +size 37 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sign_model/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sign_model/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..939daf2ad891e6f1ab9713e367159a0303e94073 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_sign_model/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09e2b0525408f6d1f2cb310d3f2ab8deff83827690873a911709c84e2a40ed26 +size 37 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_single_relu_model/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_single_relu_model/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..67182b4193ae137b3e0b453a3eac058f25b74863 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_single_relu_model/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f35b768e076a0cdda9c7dcf3a0f3ecbb849396b2f715fd442c7705c7d1fb473b +size 98 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_single_relu_model/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_single_relu_model/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..022f78462ca025fd7d58c8318bada6d20eaaef19 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_single_relu_model/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf73c8c03bf97a56ec4f29558c4226ea4cea6400f0ca7ef05c538a6b39063c3a +size 19 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_single_relu_model/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_single_relu_model/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..53435adb47df7ab764abe1ffcf16b72319dd3e24 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_single_relu_model/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3f28d5710fb67ae20bbb33ff19076483edc6e305083b65c4e2774a1993f95c0 +size 19 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_lower/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_lower/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..0ccce556c294646c9dab3277675894268adfa1a0 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_lower/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:604278554856080e7c393b85eeb7390986b34b0c83be6fe174d6614894523ac1 +size 185 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_lower/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_lower/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..af9e40c3e33302ee98103cc12c663c3f4f3b0e85 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_lower/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ac5cb8ad56d2b68d7675fcbafe590a34273cb3511a6dbcbe4d935f6f4174abf +size 45 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_lower/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_lower/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c6fde3a67d4a1e598645d767d3c4d061e0e94375 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_lower/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffe986459ec6f48f62031d8ff65324dda1d46b03f65ca8cef619f67e947ab696 +size 37 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_nochangecase/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_nochangecase/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..a42c308e097542d2049032c2870ece974994f28a --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_nochangecase/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:721af59c24e1a7a638579db30b65ab4d6509d0664920928238941307aaabba20 +size 153 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_nochangecase/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_nochangecase/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..af9e40c3e33302ee98103cc12c663c3f4f3b0e85 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_nochangecase/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ac5cb8ad56d2b68d7675fcbafe590a34273cb3511a6dbcbe4d935f6f4174abf +size 45 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_nochangecase/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_nochangecase/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..c6fde3a67d4a1e598645d767d3c4d061e0e94375 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_nochangecase/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffe986459ec6f48f62031d8ff65324dda1d46b03f65ca8cef619f67e947ab696 +size 37 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_upper/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_upper/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..d8716587f8960deb1ddeb6c6c60a4d70f4d1f4bc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_upper/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99390c7502fd90ac8583ec17ad119c1998e9fc9904c0b641f96bf86d3b5e5766 +size 185 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_upper/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_upper/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..af9e40c3e33302ee98103cc12c663c3f4f3b0e85 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_upper/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ac5cb8ad56d2b68d7675fcbafe590a34273cb3511a6dbcbe4d935f6f4174abf +size 45 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_upper/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_upper/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..1f43cbe8143bcfd309d8c35f25484046a8d620f8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_casesensintive_upper/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed993e21e09aa8709fb39675cea1e9e4444a30006ef1e31155560d4cc2dd15ce +size 37 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_empty_output/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_empty_output/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..444a6aa57658a9b114b2c3c873a9511595d359bb --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_empty_output/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eeb4233d261f80ae3d8bd35748fd7065b663b0a29a1d072ec9a65795fc5b9490 +size 185 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_empty_output/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_empty_output/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..f01070f8b9cc90e8280bb6d82d55022577a9c5d5 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_empty_output/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c6a24ea9cee99e598996eb561c13680ee3d1593f81e13b92fb55089aa1fa284 +size 23 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_empty_output/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_empty_output/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..b4c577a8755d9daebf3e6a20c5c53f8218fc98d7 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_empty_output/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef7b8d66a93235d48ee45287fb05ecba46d93b4afdb78fcc55f716892cfcd0d +size 9 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_insensintive_upper_twodim/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_insensintive_upper_twodim/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..4bec97c9ca7539d64015e24218bf017e44bec2c3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_insensintive_upper_twodim/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a00fc455335e727394d5ea799fd3981115398aa85a11763b261992404eb2703b +size 167 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_insensintive_upper_twodim/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_insensintive_upper_twodim/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..2fc15b6db50d99cd2a430fc339c219572ea348e8 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_insensintive_upper_twodim/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7f72b47a746d25e8f13765fd26d02d5678fbf676c2ae51449e4811c3e3e7f1f +size 65 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_insensintive_upper_twodim/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_insensintive_upper_twodim/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..da2f0599714b06c12c594a1b0d6d9d0abb0c60bd --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_monday_insensintive_upper_twodim/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ad3fe4796c271e991b7e46bcd02bb30f8d7390c2a6c405e64271899ad496fb3 +size 49 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_nostopwords_nochangecase/model.onnx b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_nostopwords_nochangecase/model.onnx new file mode 100644 index 0000000000000000000000000000000000000000..3d09dbc982cd44e7a9fdf032d665f9849784c512 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_nostopwords_nochangecase/model.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d59a71ed61f4d50d3d83bf1f382b2aed11611ab5353749f9e53c65c3486846c +size 128 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_nostopwords_nochangecase/test_data_set_0/input_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_nostopwords_nochangecase/test_data_set_0/input_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..03f4e1303622f8cd1734bab2bdcf608d0b8738f3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_nostopwords_nochangecase/test_data_set_0/input_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b811eae0cf9a95e015b3f667c90b72f98cfce726bbc0ae83a7b4635e4d1b519c +size 24 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_nostopwords_nochangecase/test_data_set_0/output_0.pb b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_nostopwords_nochangecase/test_data_set_0/output_0.pb new file mode 100644 index 0000000000000000000000000000000000000000..6a1714d914d33a0235851f03330a1e73642d4c0c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/data/simple/test_strnorm_model_nostopwords_nochangecase/test_data_set_0/output_0.pb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3618c65a33d5c66911d1fdf98217f1e98426dc8b3612e7bd2d6082f8d690a906 +size 24 diff --git a/MLPY/Lib/site-packages/onnx/backend/test/loader/__init__.py b/MLPY/Lib/site-packages/onnx/backend/test/loader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb7c5b0bf99dc1ac367e965ee43907cc5e22b06d --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/loader/__init__.py @@ -0,0 +1,61 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import json +import os +from typing import List, Optional + +from onnx.backend.test.case.test_case import TestCase + +DATA_DIR = os.path.join( + os.path.dirname(os.path.realpath(os.path.dirname(__file__))), "data" +) + + +def load_model_tests( + data_dir: str = DATA_DIR, + kind: Optional[str] = None, +) -> List[TestCase]: + """Load model test cases from on-disk data files.""" + supported_kinds = os.listdir(data_dir) + if kind not in supported_kinds: + raise ValueError(f"kind must be one of {supported_kinds}") + + testcases = [] + + kind_dir = os.path.join(data_dir, kind) + for test_name in os.listdir(kind_dir): + case_dir = os.path.join(kind_dir, test_name) + # skip the non-dir files, such as generated __init__.py. + rtol = 1e-3 + atol = 1e-7 + if not os.path.isdir(case_dir): + continue + if os.path.exists(os.path.join(case_dir, "model.onnx")): + url = None + model_name = test_name[len("test_")] + model_dir: Optional[str] = case_dir + else: + with open(os.path.join(case_dir, "data.json")) as f: + data = json.load(f) + url = data["url"] + model_name = data["model_name"] + rtol = data.get("rtol", 1e-3) + atol = data.get("atol", 1e-7) + model_dir = None + testcases.append( + TestCase( + name=test_name, + url=url, + model_name=model_name, + model_dir=model_dir, + model=None, + data_sets=None, + kind=kind, + rtol=rtol, + atol=atol, + ) + ) + + return testcases diff --git a/MLPY/Lib/site-packages/onnx/backend/test/loader/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/loader/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..972b1203394abae8eaacc45f5b91f501b0c442d4 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/loader/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/report/__init__.py b/MLPY/Lib/site-packages/onnx/backend/test/report/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1843a96338e034e373618a3db3b70a6aff8285be --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/report/__init__.py @@ -0,0 +1,44 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Any, Dict, Sequence + +import _pytest +import pytest + +from onnx.backend.test.report.coverage import Coverage + +_coverage = Coverage() +_marks: Dict[str, Sequence[Any]] = {} + + +def _add_mark(mark: Any, bucket: str) -> None: + proto = mark.args[0] + if isinstance(proto, list): + assert len(proto) == 1 + proto = proto[0] + if proto is not None: + _coverage.add_proto(proto, bucket, mark.args[1] == "RealModel") + + +def pytest_runtest_call(item: _pytest.nodes.Item) -> None: + mark = item.get_closest_marker("onnx_coverage") + if mark: + assert item.nodeid not in _marks + _marks[item.nodeid] = mark + + +def pytest_runtest_logreport(report: Any) -> None: + if report.when == "call" and report.outcome == "passed" and report.nodeid in _marks: + mark = _marks[report.nodeid] + _add_mark(mark, "passed") + + +@pytest.hookimpl(trylast=True) # type: ignore +def pytest_terminal_summary( + terminalreporter: _pytest.terminal.TerminalReporter, exitstatus: int +) -> None: + for mark in _marks.values(): + _add_mark(mark, "loaded") + _coverage.report_text(terminalreporter) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/report/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/report/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..839b32e874aed2db9115cd9628e452806291793f Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/report/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/report/__pycache__/base.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/report/__pycache__/base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9867f8e19e769de93c70a46ebaf3f262c93389c Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/report/__pycache__/base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/report/__pycache__/coverage.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/report/__pycache__/coverage.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6529defe202d255acad6c37213ce9de7f35dc96f Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/report/__pycache__/coverage.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/report/base.py b/MLPY/Lib/site-packages/onnx/backend/test/report/base.py new file mode 100644 index 0000000000000000000000000000000000000000..b59683fb9bd5a6ef82bb98d47aa550c5728ebd37 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/report/base.py @@ -0,0 +1,7 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + + +class ReporterBase: + pass diff --git a/MLPY/Lib/site-packages/onnx/backend/test/report/coverage.py b/MLPY/Lib/site-packages/onnx/backend/test/report/coverage.py new file mode 100644 index 0000000000000000000000000000000000000000..ca584cc63d8981d464c8a317b877c08d45fc816e --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/report/coverage.py @@ -0,0 +1,264 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 + +import csv +import datetime +import os +from collections import OrderedDict, defaultdict +from typing import IO, Any, Dict, List, Optional, Set + +from tabulate import tabulate + +import onnx +from onnx import GraphProto, defs, helper + +_all_schemas = defs.get_all_schemas() + + +class AttrCoverage: + def __init__(self) -> None: + self.name: Optional[str] = None + self.values: Set[str] = set() + + def add(self, attr: onnx.AttributeProto) -> None: + assert self.name in {None, attr.name} + self.name = attr.name + value = helper.get_attribute_value(attr) + # Turn list into tuple so we can put it into set + # As value can be string, don't blindly turn `collections.Iterable` + # into tuple. + if isinstance(value, list): + value = tuple(value) + self.values.add(str(value)) + + +class NodeCoverage: + def __init__(self) -> None: + self.op_type: Optional[str] = None + self.attr_coverages: Dict[str, AttrCoverage] = defaultdict(AttrCoverage) + + def add(self, node: onnx.NodeProto) -> None: + assert self.op_type in [None, node.op_type] + + if self.op_type is None: + self.op_type = node.op_type + assert self.op_type is not None + self.schema = defs.get_schema(self.op_type, domain=node.domain) + + for attr in node.attribute: + self.attr_coverages[attr.name].add(attr) + + +class ModelCoverage: + def __init__(self) -> None: + self.name: Optional[str] = None + self.graph: Optional[GraphProto] = None + self.node_coverages: Dict[str, NodeCoverage] = defaultdict(NodeCoverage) + + def add(self, model: onnx.ModelProto) -> None: + assert self.name in [None, model.graph.name] + + if self.name is None: + self.name = model.graph.name + assert self.name is not None + self.graph = model.graph + + for node in model.graph.node: + self.node_coverages[node.op_type].add(node) + + +class Coverage: + def __init__(self) -> None: + self.buckets: Dict[str, Dict[str, NodeCoverage]] = { + "loaded": defaultdict(NodeCoverage), + "passed": defaultdict(NodeCoverage), + } + self.models: Dict[str, Dict[str, ModelCoverage]] = { + "loaded": defaultdict(ModelCoverage), + "passed": defaultdict(ModelCoverage), + } + + def add_node(self, node: onnx.NodeProto, bucket: str) -> None: + self.buckets[bucket][node.op_type].add(node) + + def add_graph(self, graph: onnx.GraphProto, bucket: str) -> None: + for node in graph.node: + self.add_node(node, bucket) + + def add_model(self, model: onnx.ModelProto, bucket: str, is_model: bool) -> None: + self.add_graph(model.graph, bucket) + # Only add model if name does not start with test + if is_model: + self.models[bucket][model.graph.name].add(model) + + def add_proto(self, proto: onnx.ModelProto, bucket: str, is_model: bool) -> None: + assert isinstance(proto, onnx.ModelProto) + self.add_model(proto, bucket, is_model) + + def report_text(self, writer: IO[str]) -> None: + writer.write("---------- onnx coverage: ----------\n") + writer.write( + f"Operators (passed/loaded/total): {len(self.buckets['passed'])}/{len(self.buckets['loaded'])}/{len(_all_schemas)}\n" + ) + writer.write("------------------------------------\n") + + rows = [] + passed = [] + all_ops: List[str] = [] + experimental: List[str] = [] + for op_cov in self.buckets["passed"].values(): + covered_attrs = [ + f"{attr_cov.name}: {len(attr_cov.values)}" + for attr_cov in op_cov.attr_coverages.values() + ] + uncovered_attrs = [ + f"{attr}: 0" + for attr in op_cov.schema.attributes + if attr not in op_cov.attr_coverages + ] + attrs = sorted(covered_attrs) + sorted(uncovered_attrs) + if attrs: + attrs_column = os.linesep.join(attrs) + else: + attrs_column = "No attributes" + rows.append([op_cov.op_type, attrs_column]) + passed.append(op_cov.op_type) + writer.write( + tabulate( + rows, + headers=["Operator", "Attributes\n(name: #values)"], + tablefmt="plain", + ) + ) + writer.write("\n") + if os.environ.get("CSVDIR") is not None: + self.report_csv(all_ops, passed, experimental) + + # This function writes the coverage report to a set of CSV files for + # the Backend Scoreboard (onnx.ai/backend-scoreboard). To enable this + # feature, set a CSVDIR environment variable locally with the directory + # where you would like the files to be written, relative to the + # directory from which you're running pytest. The format of the CSV + # files is a column naming each op or model and columns for each + # backend with indications of whether the tests passed or failed for + # each row. + def report_csv( + self, all_ops: List[str], passed: List[Optional[str]], experimental: List[str] + ) -> None: + for schema in _all_schemas: + if schema.domain in {"", "ai.onnx"}: + all_ops.append(schema.name) + if schema.support_level == defs.OpSchema.SupportType.EXPERIMENTAL: + experimental.append(schema.name) + all_ops.sort() + nodes_path = os.path.join( + str(os.environ.get("CSVDIR")), "nodes.csv" # type: ignore + ) # type: ignore + models_path = os.path.join( + str(os.environ.get("CSVDIR")), "models.csv" # type: ignore + ) # type: ignore + existing_nodes: OrderedDict[str, Dict[str, str]] = OrderedDict() + existing_models: OrderedDict[str, Dict[str, str]] = OrderedDict() + frameworks: List[str] = [] + if os.path.isfile(nodes_path): + with open(nodes_path) as nodes_file: + reader = csv.DictReader(nodes_file) + assert reader.fieldnames + frameworks = list(reader.fieldnames) + for row in reader: + op = row["Op"] + del row["Op"] + existing_nodes[str(op)] = row + if os.path.isfile(models_path): + with open(models_path) as models_file: + reader = csv.DictReader(models_file) + for row in reader: + model = row["Model"] + del row["Model"] + existing_models[str(model)] = row + backend = os.environ.get("BACKEND") + other_frameworks = frameworks[1:] + with open(nodes_path, "w") as nodes_file: + if "Op" not in frameworks: + frameworks.append("Op") + if backend not in frameworks: + frameworks.append(str(backend)) + else: + other_frameworks.remove(str(backend)) + node_writer = csv.DictWriter(nodes_file, fieldnames=frameworks) + node_writer.writeheader() + for node in all_ops: + node_name = node + if node in experimental: + node_name = node + " (Experimental)" + if node_name not in existing_nodes: + # Also add Skipped for other nodes + existing_nodes[node_name] = OrderedDict() + for other_framework in other_frameworks: + existing_nodes[node_name][other_framework] = "Skipped!" + if node in passed: + existing_nodes[node_name][str(backend)] = "Passed!" + else: + existing_nodes[node_name][str(backend)] = "Failed!" + summaries: Dict[Any, Any] = {} + if "Summary" in existing_nodes: + summaries = existing_nodes["Summary"] + del existing_nodes["Summary"] + summaries[str(backend)] = f"{len(passed)}/{len(all_ops)} node tests passed" + summaries["Op"] = "Summary" + for node in existing_nodes: + existing_nodes[node]["Op"] = str(node) + node_writer.writerow(existing_nodes[node]) + node_writer.writerow(summaries) + with open(models_path, "w") as models_file: + frameworks[0] = "Model" + model_writer = csv.DictWriter(models_file, fieldnames=frameworks) + model_writer.writeheader() + # Consider both buckets + num_models = 0 + for bucket in self.models: + for model in self.models[bucket]: # type: ignore + # Both analyze and run the model on the backend + num_covered = 0 + for node in self.models[bucket][model].node_coverages: + if node in passed: + num_covered += 1 + # TODO: Identify if there are models that are being + # skipped/not loaded, but that are in other frameworks + msg = "Passed!" + if bucket == "loaded": + if model in self.models["passed"]: + continue + msg = "Failed!" + num_models += 1 + if model not in existing_models: + # Also add Skipped for other models + existing_models[model] = OrderedDict() + for other_framework in other_frameworks: + existing_models[model][other_framework] = "Skipped!" + existing_models[model][str(backend)] = str( + f"{num_covered}/{len(self.models[bucket][model].node_coverages)} nodes covered: {msg}" + ) + summaries.clear() + if "Summary" in existing_models: + summaries = existing_models["Summary"] + del existing_models["Summary"] + if str(backend) in summaries: + del summaries[str(backend)] + summaries[ + str(backend) + ] = f"{len(self.models['passed'])}/{num_models} model tests passed" + summaries["Model"] = "Summary" + for model in existing_models: # type: ignore + existing_models[model]["Model"] = model + model_writer.writerow(existing_models[model]) + model_writer.writerow(summaries) + with open( + os.path.join(str(os.environ.get("CSVDIR")), "metadata.csv"), # type: ignore + "w", + ) as metadata_file: # type: ignore + metadata_writer = csv.writer(metadata_file) + metadata_writer.writerow( + ["Latest Update", datetime.datetime.now().isoformat().replace("T", " ")] + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/runner/__init__.py b/MLPY/Lib/site-packages/onnx/backend/test/runner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9931da44f747db36cf7b2435ec9fc97f70583584 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/runner/__init__.py @@ -0,0 +1,527 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +import functools +import glob +import os +import re +import shutil +import sys +import tarfile +import tempfile +import time +import unittest +from collections import defaultdict +from typing import Any, Callable, Iterable, Pattern, Sequence +from urllib.request import urlretrieve + +import numpy as np + +import onnx +import onnx.reference +from onnx import ONNX_ML, ModelProto, NodeProto, TypeProto, ValueInfoProto, numpy_helper +from onnx.backend.base import Backend +from onnx.backend.test.case.test_case import TestCase +from onnx.backend.test.loader import load_model_tests +from onnx.backend.test.runner.item import TestItem + + +class BackendIsNotSupposedToImplementIt(unittest.SkipTest): + pass + + +def retry_execute(times: int) -> Callable[[Callable[..., Any]], Callable[..., Any]]: + assert times >= 1 + + def wrapper(func: Callable[..., Any]) -> Callable[..., Any]: + @functools.wraps(func) + def wrapped(*args: Any, **kwargs: Any) -> Any: + for i in range(1, times + 1): + try: + return func(*args, **kwargs) + except Exception: + print(f"{i} times tried") + if i == times: + raise + time.sleep(5 * i) + + return wrapped + + return wrapper + + +class Runner: + def __init__( + self, + backend: type[Backend], + parent_module: str | None = None, + test_kwargs: dict | None = None, + ) -> None: + self.backend = backend + self._parent_module = parent_module + self._include_patterns: set[Pattern[str]] = set() + self._exclude_patterns: set[Pattern[str]] = set() + self._xfail_patterns: set[Pattern[str]] = set() + self._test_kwargs: dict = test_kwargs or {} + + # This is the source of the truth of all test functions. + # Properties `test_cases`, `test_suite` and `tests` will be + # derived from it. + # {category: {name: func}} + self._test_items: dict[str, dict[str, TestItem]] = defaultdict(dict) + + for rt in load_model_tests(kind="node"): + self._add_model_test(rt, "Node") + + for rt in load_model_tests(kind="real"): + self._add_model_test(rt, "Real") + + for rt in load_model_tests(kind="simple"): + self._add_model_test(rt, "Simple") + + for ct in load_model_tests(kind="pytorch-converted"): + self._add_model_test(ct, "PyTorchConverted") + + for ot in load_model_tests(kind="pytorch-operator"): + self._add_model_test(ot, "PyTorchOperator") + + def _get_test_case(self, name: str) -> type[unittest.TestCase]: + test_case = type(str(name), (unittest.TestCase,), {}) + if self._parent_module: + test_case.__module__ = self._parent_module + return test_case + + def include(self, pattern: str) -> Runner: + self._include_patterns.add(re.compile(pattern)) + return self + + def exclude(self, pattern: str) -> Runner: + self._exclude_patterns.add(re.compile(pattern)) + return self + + def xfail(self, pattern: str) -> Runner: + self._xfail_patterns.add(re.compile(pattern)) + return self + + def enable_report(self) -> Runner: + import pytest + + for category, items_map in self._test_items.items(): + for item in items_map.values(): + item.func = pytest.mark.onnx_coverage(item.proto, category)(item.func) + return self + + @property + def _filtered_test_items(self) -> dict[str, dict[str, TestItem]]: + filtered: dict[str, dict[str, TestItem]] = {} + for category, items_map in self._test_items.items(): + filtered[category] = {} + for name, item in items_map.items(): + if self._include_patterns and ( + not any(include.search(name) for include in self._include_patterns) + ): + item.func = unittest.skip("no matched include pattern")(item.func) + for exclude in self._exclude_patterns: + if exclude.search(name): + item.func = unittest.skip( + f'matched exclude pattern "{exclude.pattern}"' + )(item.func) + for xfail in self._xfail_patterns: + if xfail.search(name): + item.func = unittest.expectedFailure(item.func) + filtered[category][name] = item + return filtered + + @property + def test_cases(self) -> dict[str, type[unittest.TestCase]]: + """List of test cases to be applied on the parent scope + Example usage: + globals().update(BackendTest(backend).test_cases) + """ + test_cases = {} + for category, items_map in self._filtered_test_items.items(): + test_case_name = f"OnnxBackend{category}Test" + test_case = self._get_test_case(test_case_name) + for name, item in sorted(items_map.items()): + setattr(test_case, name, item.func) + test_cases[test_case_name] = test_case + return test_cases + + @property + def test_suite(self) -> unittest.TestSuite: + """TestSuite that can be run by TestRunner + Example usage: + unittest.TextTestRunner().run(BackendTest(backend).test_suite) + """ + suite = unittest.TestSuite() + for case in sorted( + self.test_cases.values(), key=lambda cl: cl.__class__.__name__ + ): + suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(case)) + return suite + + # For backward compatibility (we used to expose `.tests`) + @property + def tests(self) -> type[unittest.TestCase]: + """One single unittest.TestCase that hosts all the test functions + Example usage: + onnx_backend_tests = BackendTest(backend).tests + """ + tests = self._get_test_case("OnnxBackendTest") + for items_map in sorted( + self._filtered_test_items.values(), key=lambda cl: cl.__class__.__name__ + ): + for name, item in sorted(items_map.items()): + setattr(tests, name, item.func) + return tests + + @classmethod + def assert_similar_outputs( + cls, + ref_outputs: Sequence[Any], + outputs: Sequence[Any], + rtol: float, + atol: float, + model_dir: str | None = None, + ) -> None: + try: + np.testing.assert_equal(len(outputs), len(ref_outputs)) + except TypeError as e: + raise TypeError( + f"Unable to compare expected type {type(ref_outputs)} " + f"and runtime type {type(outputs)} (known test={model_dir or '?'!r})" + ) from e + for i in range(len(outputs)): + if isinstance(outputs[i], (list, tuple)): + if not isinstance(ref_outputs[i], (list, tuple)): + raise AssertionError( + f"Unexpected type {type(outputs[i])} for outputs[{i}]. Expected " + f"type is {type(ref_outputs[i])} (known test={model_dir or '?'!r})." + ) + for j in range(len(outputs[i])): + cls.assert_similar_outputs( + ref_outputs[i][j], + outputs[i][j], + rtol, + atol, + model_dir=model_dir, + ) + else: + if not np.issubdtype(ref_outputs[i].dtype, np.number): + if ref_outputs[i].tolist() != outputs[i].tolist(): + raise AssertionError(f"{ref_outputs[i]} != {outputs[i]}") + continue + np.testing.assert_equal(outputs[i].dtype, ref_outputs[i].dtype) + if ref_outputs[i].dtype == object: # type: ignore[attr-defined] + np.testing.assert_array_equal(outputs[i], ref_outputs[i]) + else: + np.testing.assert_allclose( + outputs[i], ref_outputs[i], rtol=rtol, atol=atol + ) + + @classmethod + @retry_execute(3) + def download_model( + cls, model_test: TestCase, model_dir: str, models_dir: str + ) -> None: + # On Windows, NamedTemporaryFile can not be opened for a + # second time + download_file = tempfile.NamedTemporaryFile(delete=False) + try: + download_file.close() + assert model_test.url + print( + f"Start downloading model {model_test.model_name} from {model_test.url}" + ) + urlretrieve(model_test.url, download_file.name) + print("Done") + with tarfile.open(download_file.name) as t: + t.extractall(models_dir) + except Exception as e: + print(f"Failed to prepare data for model {model_test.model_name}: {e}") + raise + finally: + os.remove(download_file.name) + + @classmethod + def prepare_model_data(cls, model_test: TestCase) -> str: + onnx_home = os.path.expanduser( + os.getenv("ONNX_HOME", os.path.join("~", ".onnx")) + ) + models_dir = os.getenv("ONNX_MODELS", os.path.join(onnx_home, "models")) + model_dir: str = os.path.join(models_dir, model_test.model_name) + if not os.path.exists(os.path.join(model_dir, "model.onnx")): + if os.path.exists(model_dir): + bi = 0 + while True: + dest = f"{model_dir}.old.{bi}" + if os.path.exists(dest): + bi += 1 + continue + shutil.move(model_dir, dest) + break + os.makedirs(model_dir) + + cls.download_model( + model_test=model_test, model_dir=model_dir, models_dir=models_dir + ) + return model_dir + + def _add_test( + self, + category: str, + test_name: str, + test_func: Callable[..., Any], + report_item: list[ModelProto | NodeProto | None], + devices: Iterable[str] = ("CPU", "CUDA"), + **kwargs: Any, + ) -> None: + # We don't prepend the 'test_' prefix to improve greppability + if not test_name.startswith("test_"): + raise ValueError(f"Test name must start with test_: {test_name}") + + def add_device_test(device: str) -> None: + device_test_name = f"{test_name}_{device.lower()}" + if device_test_name in self._test_items[category]: + raise ValueError( + f'Duplicated test name "{device_test_name}" in category "{category}"' + ) + + @unittest.skipIf( # type: ignore + not self.backend.supports_device(device), + f"Backend doesn't support device {device}", + ) + @functools.wraps(test_func) + def device_test_func(*args: Any, **device_test_kwarg: Any) -> Any: + try: + merged_kwargs = {**kwargs, **device_test_kwarg} + return test_func(*args, device, **merged_kwargs) + except BackendIsNotSupposedToImplementIt as e: + # hacky verbose reporting + if "-v" in sys.argv or "--verbose" in sys.argv: + print(f"Test {device_test_name} is effectively skipped: {e}") + + self._test_items[category][device_test_name] = TestItem( + device_test_func, report_item + ) + + for device in devices: + add_device_test(device) + + @staticmethod + def generate_dummy_data( + x: ValueInfoProto, seed: int = 0, name: str = "", random: bool = False + ) -> np.ndarray: + """Generates a random tensor based on the input definition.""" + if not x.type.tensor_type: + raise NotImplementedError( + f"Input expected to have tensor type. " + f"Unable to generate random data for model {name!r} and input {x}." + ) + if x.type.tensor_type.elem_type != 1: + raise NotImplementedError( + f"Currently limited to float tensors. " + f"Unable to generate random data for model {name!r} and input {x}." + ) + shape = tuple( + d.dim_value if d.HasField("dim_value") else 1 + for d in x.type.tensor_type.shape.dim + ) + if random: + gen = np.random.default_rng(seed=seed) + return gen.random(shape, np.float32) + n = np.prod(shape) + return (np.arange(n).reshape(shape) / n).astype(np.float32) + + def _add_model_test(self, model_test: TestCase, kind: str) -> None: + # model is loaded at runtime, note sometimes it could even + # never loaded if the test skipped + model_marker: list[ModelProto | NodeProto | None] = [None] + + def run(test_self: Any, device: str, **kwargs) -> None: + if model_test.url is not None and model_test.url.startswith( + "onnx/backend/test/data/light/" + ): + # testing local files + model_pb_path = os.path.normpath( + os.path.join( + os.path.dirname(__file__), + "..", + "..", + "..", + "..", + model_test.url, + ) + ) + if not os.path.exists(model_pb_path): + raise FileNotFoundError(f"Unable to find model {model_pb_path!r}.") + onnx_home = os.path.expanduser( + os.getenv("ONNX_HOME", os.path.join("~", ".onnx")) + ) + models_dir = os.getenv( + "ONNX_MODELS", os.path.join(onnx_home, "models", "light") + ) + model_dir: str = os.path.join(models_dir, model_test.model_name) + if not os.path.exists(model_dir): + os.makedirs(model_dir) + use_dummy = True + else: + if model_test.model_dir is None: + model_dir = self.prepare_model_data(model_test) + else: + model_dir = model_test.model_dir + model_pb_path = os.path.join(model_dir, "model.onnx") + use_dummy = False + + if not ONNX_ML and "ai_onnx_ml" in model_dir: + return + + model = onnx.load(model_pb_path) + model_marker[0] = model + if ( + hasattr(self.backend, "is_compatible") + and callable(self.backend.is_compatible) + and not self.backend.is_compatible(model) + ): + raise unittest.SkipTest("Not compatible with backend") + + prepared_model = self.backend.prepare(model, device, **kwargs) + assert prepared_model is not None + + if use_dummy: + # When the backend test goes through a test involving a + # model stored in onnx/backend/test/data/light, + # this function generates expected output coming from + # from ReferenceEvaluator run with random inputs. + # A couple of models include many Conv operators and the + # python implementation is slow (such as test_bvlc_alexnet). + with open(model_pb_path, "rb") as f: + onx = onnx.load(f) + + test_data_set = os.path.join(model_dir, "test_data_set_0") + if not os.path.exists(test_data_set): + os.mkdir(test_data_set) + feeds = {} + inits = {i.name for i in onx.graph.initializer} + n_input = 0 + inputs = [] + for i in range(len(onx.graph.input)): + if onx.graph.input[i].name in inits: + continue + name = os.path.join(test_data_set, f"input_{n_input}.pb") + inputs.append(name) + n_input += 1 + x = onx.graph.input[i] + value = self.generate_dummy_data( + x, seed=0, name=model_test.model_name, random=False + ) + feeds[x.name] = value + with open(name, "wb") as f: + f.write(onnx.numpy_helper.from_array(value).SerializeToString()) + + # loads expected output if any available + prefix = os.path.splitext(model_pb_path)[0] + expected_outputs = [] + for i in range(len(onx.graph.output)): + name = f"{prefix}_output_{i}.pb" + if os.path.exists(name): + expected_outputs.append(name) + continue + expected_outputs = None + break + + if expected_outputs is None: + ref = onnx.reference.ReferenceEvaluator(onx) + outputs = ref.run(None, feeds) + for i, o in enumerate(outputs): + name = os.path.join(test_data_set, f"output_{i}.pb") + with open(name, "wb") as f: + f.write(onnx.numpy_helper.from_array(o).SerializeToString()) + else: + for i, o in enumerate(expected_outputs): + name = os.path.join(test_data_set, f"output_{i}.pb") + shutil.copy(o, name) + else: + # TODO after converting all npz files to protobuf, we can delete this. + for test_data_npz in glob.glob( + os.path.join(model_dir, "test_data_*.npz") + ): + test_data = np.load(test_data_npz, encoding="bytes") + inputs = list(test_data["inputs"]) + outputs = list(prepared_model.run(inputs)) + ref_outputs = tuple( + np.array(x) if not isinstance(x, (list, dict)) else x + for f in test_data["outputs"] + ) + self.assert_similar_outputs( + ref_outputs, + outputs, + rtol=model_test.rtol, + atol=model_test.atol, + model_dir=model_dir, + ) + + for test_data_dir in glob.glob(os.path.join(model_dir, "test_data_set*")): + inputs = [] + inputs_num = len(glob.glob(os.path.join(test_data_dir, "input_*.pb"))) + for i in range(inputs_num): + input_file = os.path.join(test_data_dir, f"input_{i}.pb") + self._load_proto(input_file, inputs, model.graph.input[i].type) + ref_outputs = [] + ref_outputs_num = len( + glob.glob(os.path.join(test_data_dir, "output_*.pb")) + ) + for i in range(ref_outputs_num): + output_file = os.path.join(test_data_dir, f"output_{i}.pb") + self._load_proto( + output_file, ref_outputs, model.graph.output[i].type + ) + outputs = list(prepared_model.run(inputs)) + self.assert_similar_outputs( + ref_outputs, + outputs, + rtol=model_test.rtol, + atol=model_test.atol, + model_dir=model_dir, + ) + + if model_test.name in self._test_kwargs: + self._add_test( + kind + "Model", + model_test.name, + run, + model_marker, + **self._test_kwargs[model_test.name], + ) + else: + self._add_test(kind + "Model", model_test.name, run, model_marker) + + def _load_proto( + self, + proto_filename: str, + target_list: list[np.ndarray | list[Any]], + model_type_proto: TypeProto, + ) -> None: + with open(proto_filename, "rb") as f: + protobuf_content = f.read() + if model_type_proto.HasField("sequence_type"): + sequence = onnx.SequenceProto() + sequence.ParseFromString(protobuf_content) + target_list.append(numpy_helper.to_list(sequence)) + elif model_type_proto.HasField("tensor_type"): + tensor = onnx.TensorProto() + tensor.ParseFromString(protobuf_content) + t = numpy_helper.to_array(tensor) + assert isinstance(t, np.ndarray) + target_list.append(t) + elif model_type_proto.HasField("optional_type"): + optional = onnx.OptionalProto() + optional.ParseFromString(protobuf_content) + target_list.append(numpy_helper.to_optional(optional)) # type: ignore[arg-type] + else: + print( + "Loading proto of that specific type (Map/Sparse Tensor) is currently not supported" + ) diff --git a/MLPY/Lib/site-packages/onnx/backend/test/runner/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/runner/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fdc481efe37cec954a1f10744af06344dea3ae3 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/runner/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/runner/__pycache__/item.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/backend/test/runner/__pycache__/item.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a0b84068a880f6e4c7694f4151ea4aab5d89f45 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/backend/test/runner/__pycache__/item.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/backend/test/runner/item.py b/MLPY/Lib/site-packages/onnx/backend/test/runner/item.py new file mode 100644 index 0000000000000000000000000000000000000000..1ec362b2e01176fa29704e17818c0bbb5b8e9b12 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/runner/item.py @@ -0,0 +1,17 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import dataclasses +from typing import Any, Callable, List, Optional, Union + +from onnx import ModelProto, NodeProto + +# A container that hosts the test function and the associated +# test item (ModelProto) + + +@dataclasses.dataclass +class TestItem: + func: Callable[..., Any] + proto: List[Optional[Union[ModelProto, NodeProto]]] diff --git a/MLPY/Lib/site-packages/onnx/backend/test/stat_coverage.py b/MLPY/Lib/site-packages/onnx/backend/test/stat_coverage.py new file mode 100644 index 0000000000000000000000000000000000000000..aeb07b8a01fc4cb16bd6de5fa2ebf3c747979a89 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/backend/test/stat_coverage.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python + +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import os +from typing import IO, Any, Dict, List, Sequence + +from onnx import AttributeProto, defs, load +from onnx.backend.test.case import collect_snippets +from onnx.backend.test.loader import load_model_tests +from onnx.backend.test.runner import Runner + + +def is_ml(schemas: Sequence[defs.OpSchema]) -> bool: + return any(s.domain == "ai.onnx.ml" for s in schemas) + + +def gen_outlines(f: IO[Any], ml: bool) -> None: + f.write("# Test Coverage Report") + if ml: + f.write(" (ONNX-ML Operators)\n") + else: + f.write(" (ONNX Core Operators)\n") + f.write("## Outlines\n") + f.write("* [Node Test Coverage](#node-test-coverage)\n") + f.write("* [Model Test Coverage](#model-test-coverage)\n") + f.write("* [Overall Test Coverage](#overall-test-coverage)\n") + + +common_covered: Sequence[str] = [] +experimental_covered: Sequence[str] = [] + + +def gen_node_test_coverage( + schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool +) -> None: + global common_covered # noqa: PLW0603 + global experimental_covered # noqa: PLW0603 + generators = set( + { + "Multinomial", + "RandomNormal", + "RandomNormalLike", + "RandomUniform", + "RandomUniformLike", + } + ) + node_tests = collect_snippets() + common_covered = sorted( + s.name + for s in schemas + if s.name in node_tests + and s.support_level == defs.OpSchema.SupportType.COMMON + and (s.domain == "ai.onnx.ml") == ml + ) + common_no_cover = sorted( + s.name + for s in schemas + if s.name not in node_tests + and s.support_level == defs.OpSchema.SupportType.COMMON + and (s.domain == "ai.onnx.ml") == ml + ) + common_generator = sorted(name for name in common_no_cover if name in generators) + experimental_covered = sorted( + s.name + for s in schemas + if s.name in node_tests + and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL + and (s.domain == "ai.onnx.ml") == ml + ) + experimental_no_cover = sorted( + s.name + for s in schemas + if s.name not in node_tests + and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL + and (s.domain == "ai.onnx.ml") == ml + ) + experimental_generator = sorted( + name for name in experimental_no_cover if name in generators + ) + num_common = len(common_covered) + len(common_no_cover) - len(common_generator) + num_experimental = ( + len(experimental_covered) + + len(experimental_no_cover) + - len(experimental_generator) + ) + f.write("# Node Test Coverage\n") + f.write("## Summary\n") + if num_common: + f.write( + f"Node tests have covered {len(common_covered)}/{num_common} " + f"({len(common_covered) / float(num_common) * 100:.2f}%, {len(common_generator)} " + f"generators excluded) common operators.\n\n" + ) + else: + f.write("Node tests have covered 0/0 (N/A) common operators. \n\n") + if num_experimental: + f.write( + "Node tests have covered {}/{} ({:.2f}%, {} generators excluded) " + "experimental operators.\n\n".format( + len(experimental_covered), + num_experimental, + (len(experimental_covered) / float(num_experimental) * 100), + len(experimental_generator), + ) + ) + else: + f.write("Node tests have covered 0/0 (N/A) experimental operators.\n\n") + titles = [ + "💚Covered Common Operators", + "💔No Cover Common Operators", + "💚Covered Experimental Operators", + "💔No Cover Experimental Operators", + ] + all_lists = [ + common_covered, + common_no_cover, + experimental_covered, + experimental_no_cover, + ] + for t in titles: + f.write(f"* [{t[9:]}](#{t[9:].lower().replace(' ', '-')})\n") + f.write("\n") + for t, l in zip(titles, all_lists): # noqa: E741 + f.write(f"## {t}\n") + for s in l: + f.write(f"### {s}") + if s in node_tests: + f.write( + f"\nThere are {len(node_tests[s])} test cases, listed as following:\n" + ) + for summary, code in sorted(node_tests[s]): + f.write("
\n") + f.write(f"{summary}\n\n") + f.write(f"```python\n{code}\n```\n\n") + f.write("
\n") + else: # noqa: PLR5501 + if s in generators: + f.write(" (random generator operator)\n") + else: + f.write(" (call for test cases)\n") + f.write("\n\n") + f.write("
\n\n") + + +def gen_model_test_coverage( + schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool +) -> None: + f.write("# Model Test Coverage\n") + # Process schemas + schema_dict = {} + for schema in schemas: + schema_dict[schema.name] = schema + # Load models from each model test using Runner.prepare_model_data + # Need to grab associated nodes + attrs: Dict[str, Dict[str, List[Any]]] = {} + model_paths: List[Any] = [] + for rt in load_model_tests(kind="real"): + if rt.url.startswith("onnx/backend/test/data/light/"): + # testing local files + model_name = os.path.normpath( + os.path.join(os.path.dirname(__file__), "..", "..", "..", rt.url) + ) + if not os.path.exists(model_name): + raise FileNotFoundError(f"Unable to find model {model_name!r}.") + model_paths.append(model_name) + else: + model_dir = Runner.prepare_model_data(rt) + model_paths.append(os.path.join(model_dir, "model.onnx")) + model_paths.sort() + model_written = False + for model_pb_path in model_paths: + model = load(model_pb_path) + if ml: + ml_present = False + for opset in model.opset_import: + if opset.domain == "ai.onnx.ml": + ml_present = True + if not ml_present: + continue + else: + model_written = True + f.write(f"## {model.graph.name}\n") + # Deconstruct model + num_covered = 0 + for node in model.graph.node: + if node.op_type in common_covered or node.op_type in experimental_covered: + num_covered += 1 + # Add details of which nodes are/aren't covered + # Iterate through and store each node's attributes + for attr in node.attribute: + if node.op_type not in attrs: + attrs[node.op_type] = {} + if attr.name not in attrs[node.op_type]: + attrs[node.op_type][attr.name] = [] + if attr.type == AttributeProto.FLOAT: + if attr.f not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.f) + elif attr.type == AttributeProto.INT: + if attr.i not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.i) + elif attr.type == AttributeProto.STRING: + if attr.s not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.s) + elif attr.type == AttributeProto.TENSOR: + if attr.t not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.t) + elif attr.type == AttributeProto.GRAPH: + if attr.g not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.g) + elif attr.type == AttributeProto.FLOATS: + if attr.floats not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.floats) + elif attr.type == AttributeProto.INTS: + if attr.ints not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.ints) + elif attr.type == AttributeProto.STRINGS: + if attr.strings not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.strings) + elif attr.type == AttributeProto.TENSORS: + if attr.tensors not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.tensors) + elif attr.type == AttributeProto.GRAPHS: + if attr.graphs not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.graphs) + f.write( + f"\n{model.graph.name} has {num_covered} nodes. " + f"Of these, {len(model.graph.node)} are covered by node tests " + f"({100.0 * float(num_covered) / float(len(model.graph.node))}%)\n\n\n" + ) + # Iterate through attrs, print + f.write("
\n") + f.write("nodes\n\n") + for op in sorted(attrs): + f.write("
\n") + # Get total number of attributes for node schema + f.write( + f"{op}: {len(attrs[op])} out of {len(schema_dict[op].attributes)} attributes covered\n\n" + ) + for attribute in sorted(schema_dict[op].attributes): + if attribute in attrs[op]: + f.write(f"{attribute}: {len(attrs[op][attribute])}\n") + else: + f.write(f"{attribute}: 0\n") + f.write("
\n") + f.write("
\n\n\n") + if not model_written and ml: + f.write("No model tests present for selected domain\n") + + +def gen_overall_test_coverage( + schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool +) -> None: + f.write("# Overall Test Coverage\n") + f.write("## To be filled.\n") + + +def gen_spdx(f: IO[Any]) -> None: + f.write("\n") + + +def main() -> None: + base_dir = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + ) + docs_dir = os.path.join(base_dir, "docs") + schemas = defs.get_all_schemas() + + has_ml = is_ml(schemas) + fname = os.path.join(docs_dir, "TestCoverage.md") + with open(fname, "w+", newline="", encoding="utf-8") as f: # type: ignore + gen_spdx(f) + gen_outlines(f, False) + gen_node_test_coverage(schemas, f, False) + gen_model_test_coverage(schemas, f, False) + gen_overall_test_coverage(schemas, f, False) + + if has_ml: + fname = os.path.join(docs_dir, "TestCoverage-ml.md") + with open(fname, "w+", newline="", encoding="utf-8") as f: # type: ignore + gen_spdx(f) + gen_outlines(f, True) + gen_node_test_coverage(schemas, f, True) + gen_model_test_coverage(schemas, f, True) + gen_overall_test_coverage(schemas, f, True) + + +if __name__ == "__main__": + main() diff --git a/MLPY/Lib/site-packages/onnx/bin/__init__.py b/MLPY/Lib/site-packages/onnx/bin/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..de8e3a385434d6c541b8b544342983b60f8eb0c3 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/bin/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/MLPY/Lib/site-packages/onnx/bin/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/bin/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..982c6ec08f4da72d5315c73c9c5abc62f1d5a569 Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/bin/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/bin/__pycache__/checker.cpython-39.pyc b/MLPY/Lib/site-packages/onnx/bin/__pycache__/checker.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69c2714cdd8e0f7da7f9e6827f688c2caf458f8f Binary files /dev/null and b/MLPY/Lib/site-packages/onnx/bin/__pycache__/checker.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/onnx/bin/checker.py b/MLPY/Lib/site-packages/onnx/bin/checker.py new file mode 100644 index 0000000000000000000000000000000000000000..b11f283d7c0add199079fbc84bcf4cab127afb4c --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/bin/checker.py @@ -0,0 +1,26 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +import argparse + +from onnx import NodeProto, checker, load + + +def check_model() -> None: + parser = argparse.ArgumentParser("check-model") + parser.add_argument("model_pb", type=argparse.FileType("rb")) + args = parser.parse_args() + + model = load(args.model_pb) + checker.check_model(model) + + +def check_node() -> None: + parser = argparse.ArgumentParser("check-node") + parser.add_argument("node_pb", type=argparse.FileType("rb")) + args = parser.parse_args() + + node = NodeProto() + node.ParseFromString(args.node_pb.read()) + checker.check_node(node) diff --git a/MLPY/Lib/site-packages/onnx/checker.cc b/MLPY/Lib/site-packages/onnx/checker.cc new file mode 100644 index 0000000000000000000000000000000000000000..64f56873f8a48b322967582cbce19de6b7a49a78 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/checker.cc @@ -0,0 +1,1085 @@ +// Copyright (c) ONNX Project Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +#include "onnx/checker.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "onnx/common/file_utils.h" +#include "onnx/defs/schema.h" +#include "onnx/defs/tensor_proto_util.h" +#include "onnx/proto_utils.h" +#include "onnx/shape_inference/implementation.h" +#include "onnx/string_utils.h" + +#ifdef _WIN32 +#include + +#include + +#else // POSIX +#include +#endif + +namespace ONNX_NAMESPACE { +namespace checker { + +#define enforce_has_field(proto, field) \ + do { \ + if (!proto.has_##field()) { \ + fail_check("Field '", #field, "' of '", #proto, "' is required but missing."); \ + } \ + } while (0) + +#define enforce_non_empty_field(proto, field) \ + do { \ + if (proto.field().empty()) { \ + fail_check("Field '", #field, "' of '", #proto, "' is required to be non-empty."); \ + } \ + } while (0) + +void check_value_info(const ValueInfoProto& value_info, const CheckerContext& ctx) { + enforce_non_empty_field(value_info, name); + // Relax constraint for subgraph input/output. + if (!ctx.is_main_graph()) + return; + enforce_has_field(value_info, type); + const auto value_case = value_info.type().value_case(); + switch (value_case) { + case TypeProto::kTensorType: { + const auto& type = value_info.type().tensor_type(); + enforce_has_field(type, elem_type); + enforce_has_field(type, shape); + } break; + case TypeProto::kOptionalType: { + const auto& type = value_info.type().optional_type(); + enforce_has_field(type, elem_type); + } break; + case TypeProto::kSequenceType: { + const auto& type = value_info.type().sequence_type(); + enforce_has_field(type, elem_type); + } break; + case TypeProto::kMapType: { + const auto& type = value_info.type().map_type(); + enforce_has_field(type, key_type); + enforce_has_field(type, value_type); + } break; +#ifdef ONNX_ML + case TypeProto::kOpaqueType: + break; +#endif + case TypeProto::kSparseTensorType: { + const auto& type = value_info.type().sparse_tensor_type(); + enforce_has_field(type, elem_type); + enforce_has_field(type, shape); + } break; + + default: + fail_check("Unrecognized type value case (value_info name: ", value_info.name(), "): ", value_case); + } +} + +void check_tensor(const TensorProto& tensor, const CheckerContext& ctx) { + enforce_has_field(tensor, data_type); + if (tensor.data_type() == TensorProto::UNDEFINED) { + fail_check("setting data_type field (tensor name: ", tensor.name(), ") to UNDEFINED is not allowed"); + } + + int num_value_fields = 0; + + const char* value_field = nullptr; + +#define check_data_field(field) \ + bool has_##field = tensor.field().size(); \ + if (has_##field) { \ + ++num_value_fields; \ + value_field = #field; \ + } + + check_data_field(float_data); + check_data_field(int32_data); + check_data_field(string_data); + check_data_field(int64_data); + check_data_field(raw_data); + check_data_field(double_data); + check_data_field(uint64_data); + +#undef check_data_field + + bool stored_externally = tensor.has_data_location() && tensor.data_location() == TensorProto::EXTERNAL; + if (stored_externally) { + if (num_value_fields != 0) { + fail_check( + "Data of TensorProto ( tensor name: ", + tensor.name(), + ") is stored externally and should not have data field.", + value_field); + } + + bool has_location = false; + for (const StringStringEntryProto& entry : tensor.external_data()) { + if (entry.has_key() && entry.has_value() && entry.key() == "location") { + has_location = true; + resolve_external_data_location(ctx.get_model_dir(), entry.value(), tensor.name()); + } + } + if (!has_location) { + fail_check("TensorProto ( tensor name: ", tensor.name(), ") is stored externally but doesn't have a location."); + } + return; + } + int64_t nelem = 1; + for (auto x : tensor.dims()) { + nelem *= x; + } + if (nelem == 0 && num_value_fields != 0) { + fail_check("TensorProto (tensor name: ", tensor.name(), ") is 0-element but contains data!"); + } + if (nelem != 0 && num_value_fields != 1) { + fail_check("TensorProto (tensor name: ", tensor.name(), ") should contain one and only one value field."); + } + if (has_raw_data) { + if (tensor.data_type() == TensorProto::STRING) { + fail_check("STRING data (tensor name: ", tensor.name(), ") should not be stored in raw_data field"); + } + return; + } else { +#define check_field(field) \ + if (nelem != 0 && !has_##field) { \ + fail_check( \ + "values of data_type '", \ + tensor.data_type(), \ + "' should be stored in field '", \ + #field, \ + "' instead of '", \ + value_field, \ + "'"); \ + } + + switch (tensor.data_type()) { + case TensorProto::FLOAT: + case TensorProto::COMPLEX64: + check_field(float_data); + break; + + case TensorProto::DOUBLE: + case TensorProto::COMPLEX128: + check_field(double_data); + break; + + case TensorProto::INT32: + case TensorProto::UINT8: + case TensorProto::INT8: + case TensorProto::UINT16: + case TensorProto::INT16: + case TensorProto::BOOL: + case TensorProto::FLOAT16: + case TensorProto::BFLOAT16: + case TensorProto::FLOAT8E4M3FN: + case TensorProto::FLOAT8E4M3FNUZ: + case TensorProto::FLOAT8E5M2: + case TensorProto::FLOAT8E5M2FNUZ: + case TensorProto::UINT4: + case TensorProto::INT4: + check_field(int32_data); + break; + + case TensorProto::INT64: + check_field(int64_data); + break; + + case TensorProto::UINT32: + case TensorProto::UINT64: + check_field(uint64_data); + break; + + case TensorProto::STRING: + check_field(string_data); + break; + + default: + fail_check("Unrecognized data_type (tensor name: ", tensor.name(), "): ", tensor.data_type()); + } + } + +#undef check_field +} + +void check_sequence(const SequenceProto& sequence, const CheckerContext& ctx) { + enforce_has_field(sequence, elem_type); + if (sequence.elem_type() == SequenceProto::TENSOR) { + for (const TensorProto& tensor : sequence.tensor_values()) { + check_tensor(tensor, ctx); + } + } else if (sequence.elem_type() == SequenceProto::SPARSE_TENSOR) { + for (const SparseTensorProto& sparse_tensor : sequence.sparse_tensor_values()) { + check_sparse_tensor(sparse_tensor, ctx); + } + } else if (sequence.elem_type() == SequenceProto::SEQUENCE) { + for (const SequenceProto& seq : sequence.sequence_values()) { + check_sequence(seq, ctx); + } + } else if (sequence.elem_type() == SequenceProto::MAP) { + for (const MapProto& map : sequence.map_values()) { + check_map(map, ctx); + } + } else { + fail_check( + "Sequence ( Structure name: ", + sequence.name(), + ", elem_type: ", + sequence.elem_type(), + ") is not have a valid element type."); + } +} + +void check_optional(const OptionalProto& optional, const CheckerContext& ctx) { + enforce_has_field(optional, elem_type); + if (optional.elem_type() == OptionalProto::UNDEFINED) { + return; + } else if (optional.elem_type() == OptionalProto::TENSOR) { + if (optional.has_tensor_value()) + check_tensor(optional.tensor_value(), ctx); + } else if (optional.elem_type() == OptionalProto::SPARSE_TENSOR) { + if (optional.has_sparse_tensor_value()) + check_sparse_tensor(optional.sparse_tensor_value(), ctx); + } else if (optional.elem_type() == OptionalProto::SEQUENCE) { + if (optional.has_sequence_value()) + check_sequence(optional.sequence_value(), ctx); + } else if (optional.elem_type() == OptionalProto::MAP) { + if (optional.has_map_value()) + check_map(optional.map_value(), ctx); + } else { + fail_check( + "Optional ( Structure name: ", + optional.name(), + ", elem_type: ", + optional.elem_type(), + ") is not have a valid element type."); + } +} + +void check_map(const MapProto& map, const CheckerContext& ctx) { + enforce_has_field(map, key_type); + if (map.key_type() == TensorProto::UNDEFINED) { + fail_check("setting key_type field (map name: ", map.name(), ") to UNDEFINED is not allowed"); + } + // Check if key is a valid type, specifically INT8, INT16, INT32, INT64, + // UINT8, UINT16, UINT32, UINT64, or STRING. + if ((map.key_type() == TensorProto::FLOAT) || (map.key_type() == TensorProto::BOOL) || + (map.key_type() == TensorProto::FLOAT16) || (map.key_type() == TensorProto::COMPLEX64) || + (map.key_type() == TensorProto::COMPLEX128)) { + fail_check( + "setting key_type field (map name: ", + map.name(), + ") to invalid TensorProto key_type ", + map.key_type(), + " is not allowed"); + } + + // MapProto will use either keys or string_keys, so only one should be > 0. + if ((map.keys_size() > 0) && (map.string_keys_size() > 0)) { + fail_check("Map (name: ", map.name(), ") should not contain more than one keys field."); + } + + int num_keys = map.keys_size() + map.string_keys_size(); + int num_values = 0; + + enforce_has_field(map, values); + check_sequence(map.values(), ctx); + + if (map.values().elem_type() == SequenceProto::TENSOR) { + num_values = map.values().tensor_values_size(); + } else if (map.values().elem_type() == SequenceProto::SPARSE_TENSOR) { + num_values = map.values().sparse_tensor_values_size(); + } else if (map.values().elem_type() == SequenceProto::SEQUENCE) { + num_values = map.values().sequence_values_size(); + } else if (map.values().elem_type() == SequenceProto::MAP) { + num_values = map.values().map_values_size(); + } + + if (num_keys != num_values) { + fail_check("Length of map keys and map values are not the same (map name: ", map.name(), ")"); + } +} + +// Check that the index data stored in a SparseTensorProto is valid. +// indices: a 1-dimensional tensor; indices[i] represents the +// linearized index value for the i-th nonzero value. +void check_sparse_tensor_indices_1( + const TensorProto& indices, + const SparseTensorProto& sparse_tensor_proto, + size_t nnz) { + int dense_rank = sparse_tensor_proto.dims_size(); + int64_t dense_size = 1; + for (int i = 0; i < dense_rank; ++i) + dense_size *= sparse_tensor_proto.dims(i); + if (static_cast(indices.dims(0)) != nnz) { + fail_check("Sparse tensor indices (", indices.name(), ") has ", indices.dims(0), " values, but NNZ is ", nnz); + } + + // Check if indices appear in ascending order, and if they have valid + // values. The i-th value in index_data is the linear index of the i-th + // non-zero value. + const std::vector index_data = ParseData(&indices); + + int64_t prev_index = -1; + for (size_t i = 0; i < nnz; ++i) { + int64_t curr_index = index_data[i]; // linearized index of i-th value + if (curr_index < 0 || curr_index >= dense_size) { + fail_check( + "Sparse tensor (", + indices.name(), + ") index value at position [", + i, + "] out of range [0, ", + dense_size - 1, + "]"); + } + if (curr_index <= prev_index) { + fail_check("Sparse tensor (", indices.name(), ") index value at position [", i, "] not in sorted order."); + } + prev_index = curr_index; + } +} + +// Check that the index data stored in a SparseTensorProto is valid. +// indices: a 2-dimensional tensor; indices[i,j] represents the j-th +// index value for the i-th nonzero value. +void check_sparse_tensor_indices_2( + const TensorProto& indices, + const SparseTensorProto& sparse_tensor_proto, + size_t nnz) { + int dense_rank = sparse_tensor_proto.dims_size(); + if (static_cast(indices.dims(0)) != nnz) { + fail_check("Sparse tensor indices (", indices.name(), ") first dimension size does not equal NNZ."); + } + if (indices.dims(1) != dense_rank) { + fail_check("Sparse tensor indices (", indices.name(), ") second dimension size does not match rank of tensor."); + } + + // Check if indices appear in ascending order, and if they have valid + // values. + const std::vector index_data = ParseData(&indices); + int64_t prev_index = -1; + for (size_t i = 0; i < nnz; ++i) { + int64_t curr_index = 0; // linearized index of i-th value + for (int j = 0; j < dense_rank; ++j) { + auto index_ij = index_data[i * dense_rank + j]; + if ((index_ij < 0) || (index_ij >= sparse_tensor_proto.dims(j))) { + fail_check("Sparse tensor (", indices.name(), ") index value at position [", i, ",", j, "] out of range."); + } + curr_index = curr_index * sparse_tensor_proto.dims(j) + index_ij; + } + if (curr_index <= prev_index) { + fail_check( + "Sparse tensor (", indices.name(), ") index value at position [", i, "] not in lexicographic sorted order."); + } + prev_index = curr_index; + } +} + +void check_sparse_tensor(const SparseTensorProto& sparse_tensor_proto, const CheckerContext& ctx) { + enforce_has_field(sparse_tensor_proto, values); + + const TensorProto& values = sparse_tensor_proto.values(); + check_tensor(values, ctx); + + // values must be a tensor of shape [NNZ] + // Currently we restrict the value associated with a particular index-tuple + // to be a single value. In the future, if there is a requirement, + // we may extend this to permit the value to be a "sub-tensor", in which + // case values will have dimension > 1. + if (values.dims_size() != 1) { + fail_check("Sparse tensor values (", values.name(), ") must have rank 1."); + } + size_t nnz = static_cast(values.dims(0)); + int dense_rank = sparse_tensor_proto.dims_size(); + if (dense_rank == 0) { + fail_check("Sparse tensor (", values.name(), ") must have a dense-rank > 0"); + } + for (int i = 0; i < dense_rank; ++i) { + if (sparse_tensor_proto.dims(i) <= 0) { + fail_check("Sparse tensor (", values.name(), ") dimensions are not positive."); + } + } + + if (sparse_tensor_proto.has_indices()) { + const TensorProto& indices = sparse_tensor_proto.indices(); + check_tensor(indices, ctx); + if (indices.data_type() != TensorProto::INT64) { + fail_check("Sparse tensor indices (", indices.name(), ") must have INT64 type."); + } + switch (indices.dims().size()) { + case 1: + // Indices in linearized format + check_sparse_tensor_indices_1(indices, sparse_tensor_proto, nnz); + return; + case 2: + // Check COO-style index. E.g., an index for a 3D tensor is a 3-tuple. + check_sparse_tensor_indices_2(indices, sparse_tensor_proto, nnz); + return; + default: + fail_check("Sparse tensor indices (", indices.name(), ") must have rank 1 or 2."); + } + } else if (nnz != 0) { + fail_check("Sparse tensor (", values.name(), ") has no index values."); + } +} + +// NB: This is a generic "attribute well-formedness" check, it doesn't +// actually test if an attribute is valid per a schema +void check_attribute(const AttributeProto& attr, const CheckerContext& ctx, const LexicalScopeContext& lex_ctx) { + enforce_non_empty_field(attr, name); + + if (ctx.get_ir_version() >= 0x00000002) { + enforce_has_field(attr, type); + } + + int used_fields = 0; + +#define check_type(expected_type) \ + if (attr.has_type() && attr.type() != expected_type) { \ + fail_check("type field and data field mismatch in attribute ", attr.name(), "."); \ + } + +#define check_singular_field(field, type) \ + if (attr.has_##field()) { \ + ++used_fields; \ + check_type(type); \ + } + +#define check_repeated_field(field, type) \ + if (attr.field##_size() > 0) { \ + ++used_fields; \ + check_type(type); \ + } + + check_singular_field(f, AttributeProto::FLOAT); + check_singular_field(i, AttributeProto::INT); + check_singular_field(s, AttributeProto::STRING); + check_singular_field(t, AttributeProto::TENSOR); + check_singular_field(g, AttributeProto::GRAPH); + check_singular_field(tp, AttributeProto::TYPE_PROTO); + check_singular_field(sparse_tensor, AttributeProto::SPARSE_TENSOR); + check_repeated_field(floats, AttributeProto::FLOATS); + check_repeated_field(ints, AttributeProto::INTS); + check_repeated_field(strings, AttributeProto::STRINGS); + check_repeated_field(tensors, AttributeProto::TENSORS); + check_repeated_field(graphs, AttributeProto::GRAPHS); + check_repeated_field(sparse_tensors, AttributeProto::SPARSE_TENSORS); + check_repeated_field(type_protos, AttributeProto::TYPE_PROTOS); + +#undef check_type +#undef check_singular_field +#undef check_repeated_field + + // Normally, used_fields is expected to be 1. + // In proto3, when the value to be set is type default value (say 0 for + // int), used_fields may be 0. + if (used_fields > 1) { + fail_check("Attribute (name: ", attr.name(), ") should not contain more than one value field."); + } + + if (!ctx.is_main_graph()) { + // It's an attribute of a node in function body. + if (attr.has_ref_attr_name() && used_fields != 0) { + // The attribute proto is supposed to refer to data outside and does not + // have its own value field set. + fail_check("Attribute (name: ", attr.name(), ") should refer to attribute in parent node."); + } + } + + if (attr.has_t()) { + check_tensor(attr.t(), ctx); + } + + if (attr.has_sparse_tensor()) { + check_sparse_tensor(attr.sparse_tensor(), ctx); + } + + if (attr.has_g()) { + CheckerContext subgraph_ctx(ctx); + subgraph_ctx.set_is_main_graph(false); + check_graph(attr.g(), subgraph_ctx, lex_ctx); + } + + for (const auto& tensor : attr.tensors()) { + check_tensor(tensor, ctx); + } + for (const auto& sparse_tensor : attr.sparse_tensors()) { + check_sparse_tensor(sparse_tensor, ctx); + } + if (attr.graphs().size() > 0) { + CheckerContext subgraph_ctx(ctx); + subgraph_ctx.set_is_main_graph(false); + for (const auto& graph : attr.graphs()) { + check_graph(graph, subgraph_ctx, lex_ctx); + } + } +} + +void print_warning_if_has_experimental(const std::unordered_set& used_experimental_ops) { + if (!used_experimental_ops.empty()) { + std::string all_experimental_ops; + for (const auto& op : used_experimental_ops) { + all_experimental_ops += " " + op + ","; + } + // Remove the last comma which is unnecessary + all_experimental_ops.pop_back(); + std::cout << "Warning: Model contains experimental ops:" + all_experimental_ops << std::endl; + } +} + +void check_node(const NodeProto& node, const CheckerContext& ctx, const LexicalScopeContext& lex_ctx) { + enforce_non_empty_field(node, op_type); + + if (node.input().empty() && node.output().empty()) { + fail_check("NodeProto (name: ", node.name(), ", type: ", node.op_type(), ") has zero input and zero output."); + } + + // Resolve domain for node + const auto& opset_imports = ctx.get_opset_imports(); + auto dit = opset_imports.find(node.domain()); + if (dit == opset_imports.end()) { + fail_check("No opset import for domain '" + node.domain() + "'"); + } + auto domain_version = dit->second; + + // for ops referencing local functions, there is no schema to verify it. + // will add a check to verify consistency between these ops and local functions. + std::unordered_set seen_attr_names{}; + for (const auto& attr : node.attribute()) { + if (!seen_attr_names.insert(attr.name()).second) { + fail_check("Attribute '", attr.name(), "' appeared multiple times."); + }; + + check_attribute(attr, ctx, lex_ctx); + } + + // This issue will be caught by check_graph instead + if (check_is_experimental_op(node)) { + return; + } + + const auto* schema = ctx.get_schema_registry()->GetSchema(node.op_type(), domain_version, node.domain()); + if (!schema) { + if (node.domain() == ONNX_DOMAIN || node.domain() == AI_ONNX_ML_DOMAIN || node.domain() == "ai.onnx" || + node.domain() == AI_ONNX_TRAINING_DOMAIN || ctx.check_custom_domain()) { + // fail the checker if op is in built-in domains or if it has no schema when `check_custom_domain` is true + fail_check( + "No Op registered for " + node.op_type() + " with domain_version of " + + ONNX_NAMESPACE::to_string(domain_version)); + } + } else if (schema->Deprecated()) { + fail_check( + "Op registered for " + node.op_type() + " is deprecated in domain_version of " + + ONNX_NAMESPACE::to_string(domain_version)); + } else { + schema->Verify(node); + } +} + +void check_graph(const GraphProto& graph, const CheckerContext& ctx, const LexicalScopeContext& parent_lex) { + enforce_non_empty_field(graph, name); + + for (const auto& value_info : graph.input()) { + check_value_info(value_info, ctx); + } + for (const auto& value_info : graph.output()) { + check_value_info(value_info, ctx); + } + + // Inherit values available in outer scope + // Note that we do not allow shadowing, so the presence of an already-defined + // name is always an error. + LexicalScopeContext lex_ctx{parent_lex}; + + for (const auto& value_info : graph.input()) { + // TODO: If shadowing isn't allowed, this should maybe use + // this_or_ancestor_graph_has + if (lex_ctx.this_graph_has(value_info.name())) { + fail_check( + "Graph must be in single static assignment (SSA) form, however '", + value_info.name(), + "' has been used as graph input names multiple times."); + } + lex_ctx.add(value_info.name()); + } + + std::unordered_set, std::hash, std::equal_to> + initializer_name_checker; + + for (const auto& init : graph.initializer()) { + enforce_has_field(init, name); + const auto& name = init.name(); + if (name.empty()) { + fail_check("Tensor initializers must have a non-empty name"); + } + + if (!initializer_name_checker.insert(std::cref(name)).second) { + fail_check(name + " initializer name is not unique"); + } + + check_tensor(init, ctx); + + if (ctx.get_ir_version() <= 0x00000003) { + // Initializers are a subset of graph inputs for IR_VERSION <= 3 + if (!lex_ctx.this_graph_has(name)) { + fail_check(name + " in initializer but not in graph input"); + } + } else { + // An initializer is allowed to have the same name as an input, + // but is not required to (for IR_VERSION >= 4) + lex_ctx.add(name); + } + } + + for (const auto& sparse_init : graph.sparse_initializer()) { + const auto& values = sparse_init.values(); + enforce_has_field(values, name); + const auto& name = values.name(); + if (name.empty()) { + fail_check("Sparse tensor initializers must have a non-empty name"); + } + if (!initializer_name_checker.insert(std::cref(name)).second) { + fail_check(name + " sparse initializer name is not unique across initializers and sparse_initializers"); + } + check_sparse_tensor(sparse_init, ctx); + lex_ctx.add(name); + } + std::unordered_set used_experimental_ops; + for (const auto& node : graph.node()) { + // nodes must be in topologically sorted order + for (const auto& input : node.input()) { + // explicit optional input + if (input.empty()) { + continue; + } + if (!lex_ctx.this_or_ancestor_graph_has(input)) { + fail_check( + "Nodes in a graph must be topologically sorted, however input '", + input, + "' of node: \n", + "name: ", + node.name(), + " OpType: ", + node.op_type(), + "\n is not output of any previous nodes."); + } + } + + if (check_is_experimental_op(node)) { + used_experimental_ops.insert(node.op_type()); + } + + // This needs to happen before SSA check since we don't want to recurse and + // find that outputs from control flow ops are colliding with names in the + // inner block + + ONNX_TRY { + check_node(node, ctx, lex_ctx); + } + ONNX_CATCH(ValidationError & ex) { + ONNX_HANDLE_EXCEPTION([&]() { + ex.AppendContext("Bad node spec for node. Name: " + node.name() + " OpType: " + node.op_type()); + ONNX_THROW_EX(ex); + }); + } + // check for SSA form + for (const auto& output : node.output()) { + // optional output + if (output.empty()) { + continue; + } + + if (lex_ctx.this_or_ancestor_graph_has(output)) { + fail_check( + "Graph must be in single static assignment (SSA) form, however '", + output, + "' has been used as output names multiple times."); + } + lex_ctx.add(output); + } + } + print_warning_if_has_experimental(used_experimental_ops); +} + +// Utilify function to get the imported version of domain from opset imports +// Returns -1 if requested domain is not found in the opset_imports +int get_version_for_domain(const std::string& domain, const std::unordered_map& opset_imports) { + auto it = opset_imports.find(domain); + if (it == opset_imports.end()) { + return -1; + } + + return it->second; +} + +void check_opset_compatibility( + const NodeProto& node, + const CheckerContext& ctx, + const std::unordered_map& func_opset_imports, + const std::unordered_map& model_opset_imports) { + auto func_opset_version = get_version_for_domain(node.domain(), func_opset_imports); + auto model_opset_version = get_version_for_domain(node.domain(), model_opset_imports); + + if (func_opset_version == -1) { + fail_check("No Opset registered for domain " + node.domain()); + } + + if (model_opset_version == -1) { + // model does not include opset import for a node present in function body. + // This is ok as along as the opset import is present in function level opset imports. + return; + } + + if (func_opset_version == model_opset_version) { + // both versions are same, no need to verify schema. + return; + } + + const auto* schema_for_model_import = + ctx.get_schema_registry()->GetSchema(node.op_type(), model_opset_version, node.domain()); + + const auto* schema_for_function_import = + ctx.get_schema_registry()->GetSchema(node.op_type(), func_opset_version, node.domain()); + + if (!schema_for_model_import && !schema_for_function_import) { + // the op belongs to a custom domain so we cannot verify schema + return; + } + + // if schema is present for 1 but not other or the schema since versions do not match then raise an error + if (!schema_for_model_import || !schema_for_function_import || + schema_for_function_import->since_version() != schema_for_model_import->since_version()) { + fail_check( + "Opset import for domain " + node.domain() + " in function op " + node.op_type() + + "is not compatible with the version imported by model. FunctionOp imports version " + + ONNX_NAMESPACE::to_string(func_opset_version) + " whereas model imports version " + + ONNX_NAMESPACE::to_string(model_opset_version)); + } +} + +void check_model_local_functions( + const ModelProto& model, + const CheckerContext& ctx, + const LexicalScopeContext& parent_lex) { + // make a copy of model opset imports to maintain a main copy of opset imports across the model and + // all model local functions to verify opset compatibility + std::unordered_map model_opset_imports(ctx.get_opset_imports()); + + // merge the opset imports from every function in model_opset_imports + // only add the opset import if an entry for it does not exist in model_opset_imports + // if there is an entry then the compatibility will be checked later on in check_opset_compatibility + // called by check_function. + for (const auto& function_proto : model.functions()) { + for (const auto& opset_import : function_proto.opset_import()) { + if (get_version_for_domain(opset_import.domain(), model_opset_imports) == -1) { + model_opset_imports[opset_import.domain()] = opset_import.version(); + } + } + } + + CheckerContext ctx_copy = ctx; + ctx_copy.set_opset_imports(model_opset_imports); + + for (const auto& function_proto : model.functions()) { + check_function(function_proto, ctx_copy, parent_lex); + } +} + +void check_function(const FunctionProto& function, const CheckerContext& ctx, const LexicalScopeContext& parent_lex) { + enforce_non_empty_field(function, name); + + if (ctx.get_ir_version() >= 0x00000008) { + enforce_has_field(function, domain); + } + + const auto& model_opset_imports = ctx.get_opset_imports(); + CheckerContext ctx_copy = ctx; + + std::unordered_map func_opset_imports; + for (auto& relied_opset : function.opset_import()) { + func_opset_imports[relied_opset.domain()] = static_cast(relied_opset.version()); + } + + ctx_copy.set_opset_imports(func_opset_imports); + + LexicalScopeContext lex_ctx{parent_lex}; + + for (const auto& input : function.input()) { + // TODO: If shadowing isn't allowed, this should maybe use + // this_or_ancestor_graph_has + if (lex_ctx.this_graph_has(input)) { + fail_check( + "Graph must be in single static assignment (SSA) form, however '", input, "' has been used multiple times."); + } + lex_ctx.add(input); + } + + std::unordered_set outputs; + for (const auto& output : function.output()) { + auto result = outputs.insert(output); + if (!result.second) { + fail_check("function (", function.name(), ") should not have duplicate outputs specified."); + } + } + + std::unordered_set attrs; + for (const auto& attr : function.attribute()) { + auto result = attrs.insert(attr); + if (!result.second) { + fail_check("function (", function.name(), ") should not have duplicate attributes specified."); + } + } + std::unordered_set used_experimental_ops; + for (const auto& node : function.node()) { + // nodes must be in topologically sorted order + for (const auto& input : node.input()) { + // explicit optional input + if (input.empty()) { + continue; + } + if (!lex_ctx.this_graph_has(input)) { + fail_check( + "Nodes in a function must be topologically sorted, however input '", + input, + "' of node: \n", + "Name: ", + node.name(), + " OpType: ", + node.op_type(), + "\n is neither output of any previous nodes nor input of the function."); + } + } + + // check whether the opset version imported for a domain by function and model are + // compatible + if (!ctx_copy.skip_opset_compatibility_check()) + check_opset_compatibility(node, ctx_copy, func_opset_imports, model_opset_imports); + if (check_is_experimental_op(node)) { + used_experimental_ops.insert(node.op_type()); + } + check_node(node, ctx_copy, lex_ctx); + + // check for SSA form + for (const auto& output : node.output()) { + // optional output + if (output.empty()) { + continue; + } + if (lex_ctx.this_or_ancestor_graph_has(output)) { + fail_check( + "Function must be in single static assignment (SSA) form, however '", + output, + "' has been used as output names multiple times."); + } + lex_ctx.add(output); + } + } + print_warning_if_has_experimental(used_experimental_ops); +} + +void check_model(const ModelProto& model, CheckerContext& ctx) { + if (!model.ir_version()) { + fail_check("The model does not have an ir_version set properly."); + } + if (model.ir_version() > IR_VERSION) { + fail_check("Your model ir_version ", model.ir_version(), " is higher than the checker's (", IR_VERSION, ")."); + } + if (model.metadata_props_size() > 1) { + std::unordered_set keys; + for (const StringStringEntryProto& entry : model.metadata_props()) { + auto i = keys.insert(entry.key()); + if (!i.second) { + fail_check("Your model has duplicate keys in metadata_props."); + } + } + } + std::unordered_map versions; + ctx.set_ir_version(static_cast(model.ir_version())); + std::unordered_map opset_imports; + for (const auto& opset_import : model.opset_import()) { + opset_imports[opset_import.domain()] = static_cast(opset_import.version()); + } + if (model.ir_version() >= 3) { + if (opset_imports.empty()) { + fail_check("model with IR version >= 3 must specify opset_import for ONNX"); + } + } else { + if (opset_imports.empty()) + opset_imports[ONNX_DOMAIN] = 1; + else { + fail_check("model with IR version < 3 cannot have opset_import specified"); + } + } + ctx.set_opset_imports(opset_imports); + LexicalScopeContext lex_ctx; + check_graph(model.graph(), ctx, lex_ctx); + + if (ctx.get_ir_version() >= 0x00000008) { + check_model_local_functions(model, ctx, lex_ctx); + // TODO: check consistency between local functions and ops referencing it. + } +} + +void check_model( + const std::string& model_path, + bool full_check, + bool skip_opset_compatibility_check, + bool check_custom_domain) { + ModelProto model; + LoadProtoFromPath(model_path, model); + + CheckerContext ctx; + std::string model_dir; + size_t pos = model_path.find_last_of("\\/"); + if (pos != std::string::npos) { + model_dir = model_path.substr(0, pos + 1); + } + ctx.set_model_dir(model_dir); + ctx.set_skip_opset_compatibility_check(skip_opset_compatibility_check); + ctx.set_check_custom_domain(check_custom_domain); + check_model(model, ctx); + + if (full_check) { + ShapeInferenceOptions options{true, 1, false}; + ONNX_NAMESPACE::shape_inference::InferShapes(model, ctx.get_schema_registry(), options); + } +} + +void check_model( + const ModelProto& model, + bool full_check, + bool skip_opset_compatibility_check, + bool check_custom_domain) { + CheckerContext ctx; + ctx.set_skip_opset_compatibility_check(skip_opset_compatibility_check); + ctx.set_check_custom_domain(check_custom_domain); + check_model(model, ctx); + if (full_check) { + ShapeInferenceOptions options{true, 1, false}; + // Do not update the model in place by the check from shape inference + // because checker should not modify the original model + ModelProto copy = model; + ONNX_NAMESPACE::shape_inference::InferShapes(copy, ctx.get_schema_registry(), options); + } +} + +std::string resolve_external_data_location( + const std::string& base_dir, + const std::string& location, + const std::string& tensor_name) { +#ifdef _WIN32 + auto file_path = std::filesystem::path(utf8str_to_wstring(location)); + if (file_path.is_absolute()) { + fail_check( + "Location of external TensorProto ( tensor name: ", + tensor_name, + ") should be a relative path, but it is an absolute path: ", + location); + } + auto relative_path = file_path.lexically_normal().make_preferred().wstring(); + // Check that normalized relative path contains ".." on Windows. + if (relative_path.find(L"..", 0) != std::string::npos) { + fail_check( + "Data of TensorProto ( tensor name: ", + tensor_name, + ") should be file inside the ", + base_dir, + ", but the '", + location, + "' points outside the directory"); + } + std::wstring data_path = path_join(utf8str_to_wstring(base_dir), relative_path); + struct _stat64 buff; + if (data_path.empty() || (data_path[0] != '#' && _wstat64(data_path.c_str(), &buff) != 0)) { + fail_check( + "Data of TensorProto ( tensor name: ", + tensor_name, + ") should be stored in ", + location, + ", but it doesn't exist or is not accessible."); + } + return wstring_to_utf8str(data_path); +#else // POSIX + if (location.empty()) { + fail_check("Location of external TensorProto ( tensor name: ", tensor_name, ") should not be empty."); + } else if (location[0] == '/') { + fail_check( + "Location of external TensorProto ( tensor name: ", + tensor_name, + ") should be a relative path, but it is an absolute path: ", + location); + } + std::string relative_path = clean_relative_path(location); + // Check that normalized relative path contains ".." on POSIX + if (relative_path.find("..", 0) != std::string::npos) { + fail_check( + "Data of TensorProto ( tensor name: ", + tensor_name, + ") should be file inside the ", + base_dir, + ", but the '", + location, + "' points outside the directory"); + } + std::string data_path = path_join(base_dir, relative_path); + // use stat64 to check whether the file exists +#if defined(__APPLE__) || defined(__wasm__) || !defined(__GLIBC__) + struct stat buffer; // APPLE, wasm and non-glic stdlibs do not have stat64 + if (data_path.empty() || (data_path[0] != '#' && stat((data_path).c_str(), &buffer) != 0)) { +#else + struct stat64 buffer; // All POSIX under glibc except APPLE and wasm have stat64 + if (data_path.empty() || (data_path[0] != '#' && stat64((data_path).c_str(), &buffer) != 0)) { +#endif + fail_check( + "Data of TensorProto ( tensor name: ", + tensor_name, + ") should be stored in ", + data_path, + ", but it doesn't exist or is not accessible."); + } + // Do not allow symlinks or directories. + if (data_path.empty() || (data_path[0] != '#' && !S_ISREG(buffer.st_mode))) { + fail_check( + "Data of TensorProto ( tensor name: ", + tensor_name, + ") should be stored in ", + data_path, + ", but it is not regular file."); + } + return data_path; +#endif +} + +std::set experimental_ops = { + "ATen", + "Affine", + "ConstantFill", + "Crop", + "DynamicSlice", + "GRUUnit", + "GivenTensorFill", + "ImageScaler", + "ParametricSoftplus", + "Scale", + "ScaledTanh"}; + +bool check_is_experimental_op(const NodeProto& node) { + return (node.domain() == ONNX_DOMAIN || node.domain() == "ai.onnx") && experimental_ops.count(node.op_type()); +} + +#undef enforce_has_field +#undef enforce_non_empty_field + +} // namespace checker +} // namespace ONNX_NAMESPACE diff --git a/MLPY/Lib/site-packages/onnx/checker.h b/MLPY/Lib/site-packages/onnx/checker.h new file mode 100644 index 0000000000000000000000000000000000000000..1024e79c181acf3a53d4d8341faff999edd3ecb1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/checker.h @@ -0,0 +1,187 @@ +// Copyright (c) ONNX Project Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include +#include +#include + +#include "onnx/defs/function.h" +#include "onnx/defs/schema.h" +#include "onnx/onnx-data_pb.h" +#include "onnx/onnx-operators_pb.h" +#include "onnx/onnx_pb.h" +#include "onnx/string_utils.h" + +namespace ONNX_NAMESPACE { +namespace checker { +class ValidationError final : public std::runtime_error { + public: + using std::runtime_error::runtime_error; + const char* what() const noexcept override { + if (!expanded_message_.empty()) { + return expanded_message_.c_str(); + } + return std::runtime_error::what(); + } + void AppendContext(const std::string& context) { + expanded_message_ = ONNX_NAMESPACE::MakeString(std::runtime_error::what(), "\n\n==> Context: ", context); + } + + private: + std::string expanded_message_; +}; + +#define fail_check(...) \ + ONNX_THROW_EX(ONNX_NAMESPACE::checker::ValidationError(ONNX_NAMESPACE::MakeString(__VA_ARGS__))); + +class CheckerContext final { + public: + int get_ir_version() const { + return ir_version_; + } + void set_ir_version(int v) { + ir_version_ = v; + } + const std::unordered_map& get_opset_imports() const { + return opset_imports_; + } + void set_opset_imports(std::unordered_map imps) { + opset_imports_ = std::move(imps); + } + bool is_main_graph() const { + return is_main_graph_; + } + void set_is_main_graph(bool is_main_graph) { + is_main_graph_ = is_main_graph; + } + + void set_schema_registry(const ISchemaRegistry* schema_registry) { + schema_registry_ = schema_registry; + } + + const ISchemaRegistry* get_schema_registry() const { + return schema_registry_; + } + + void set_model_dir(const std::string& model_dir) { + model_dir_ = model_dir; + } + + std::string get_model_dir() const { + return model_dir_; + } + + bool skip_opset_compatibility_check() const { + return skip_opset_compatibility_check_; + } + + void set_skip_opset_compatibility_check(bool value) { + skip_opset_compatibility_check_ = value; + } + + bool check_custom_domain() const { + return check_custom_domain_; + } + + void set_check_custom_domain(bool value) { + check_custom_domain_ = value; + } + + explicit CheckerContext() : ir_version_(-1) {} + + private: + int ir_version_; + std::unordered_map opset_imports_; + bool is_main_graph_ = true; + const ISchemaRegistry* schema_registry_ = OpSchemaRegistry::Instance(); + std::string model_dir_; + bool skip_opset_compatibility_check_ = false; + bool check_custom_domain_ = false; +}; + +class LexicalScopeContext final { + public: + LexicalScopeContext() = default; + + // Construct an instance with the lexical scope from the parent graph to allow + // lookup of names from that scope via this_or_ancestor_graph_has. + // The caller must ensure parent_context remains valid for the entire lifetime + // of the new instance. Alternatively, if that cannot be guaranteed, create an + // instance with the default constructor and populate output_names with the + // values from the parent scope so the values are copied instead. + LexicalScopeContext(const LexicalScopeContext& parent_context) : parent_context_{&parent_context} {} + LexicalScopeContext& operator=(const LexicalScopeContext& parent_context) { + parent_context_ = &parent_context; + return *this; + } + + void add(const std::string& name) { + output_names.insert(name); + } + + bool this_graph_has(const std::string& name) const { + return output_names.find(name) != output_names.cend(); + } + + bool this_or_ancestor_graph_has(const std::string& name) const { + return this_graph_has(name) || (parent_context_ && parent_context_->this_or_ancestor_graph_has(name)); + } + + // public for backwards compatibility. please prefer the public interface of + // this class over directly changing output_names + std::unordered_set output_names; + + private: + const LexicalScopeContext* parent_context_{nullptr}; +}; + +using IR_VERSION_TYPE = decltype(Version::IR_VERSION); +void check_value_info(const ValueInfoProto& value_info, const CheckerContext&); +void check_tensor(const TensorProto& tensor, const CheckerContext&); +void check_sparse_tensor(const SparseTensorProto& sparse_tensor, const CheckerContext&); +void check_sequence(const SequenceProto& sequence, const CheckerContext&); +void check_map(const MapProto& map, const CheckerContext&); +void check_optional(const OptionalProto& opt, const CheckerContext&); +void check_attribute(const AttributeProto& attr, const CheckerContext&, const LexicalScopeContext&); +void check_node(const NodeProto& node, const CheckerContext&, const LexicalScopeContext&); +void check_graph(const GraphProto& graph, const CheckerContext&, const LexicalScopeContext&); +void check_function(const FunctionProto& function, const CheckerContext&, const LexicalScopeContext&); + +// Check schema compatibility for 2 opset versions for a given node. +// Checks whether the schema for 2 versions is same, this is true when the opschema +// does not change between versions. +void check_opset_compatibility( + const NodeProto& node, + const CheckerContext& ctx, + const std::unordered_map& func_opset_imports, + const std::unordered_map& model_opset_imports); + +// Checks all model local functions present in ModelProto +void check_model_local_functions( + const ModelProto& model, + const CheckerContext& ctx, + const LexicalScopeContext& parent_lex); + +void check_model( + const ModelProto& model, + bool full_check = false, + bool skip_opset_compatibility_check = false, + bool check_custom_domain = false); +void check_model( + const std::string& model_path, + bool full_check = false, + bool skip_opset_compatibility_check = false, + bool check_custom_domain = false); +std::string resolve_external_data_location( + const std::string& base_dir, + const std::string& location, + const std::string& tensor_name); +bool check_is_experimental_op(const NodeProto& node); + +} // namespace checker +} // namespace ONNX_NAMESPACE diff --git a/MLPY/Lib/site-packages/onnx/checker.py b/MLPY/Lib/site-packages/onnx/checker.py new file mode 100644 index 0000000000000000000000000000000000000000..754396be60c070befb12a79ccb3e2d3940c81599 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/checker.py @@ -0,0 +1,187 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +"""Graph utilities for checking whether an ONNX proto message is legal.""" + +from __future__ import annotations + +__all__ = [ + "check_attribute", + "check_function", + "check_graph", + "check_model", + "check_node", + "check_sparse_tensor", + "check_tensor", + "check_value_info", + "DEFAULT_CONTEXT", + "LEXICAL_SCOPE_CONTEXT", + "ValidationError", + "C", + "MAXIMUM_PROTOBUF", +] + +import os +import sys +from typing import Any, Callable, TypeVar + +from google.protobuf.message import Message + +import onnx.defs +import onnx.onnx_cpp2py_export.checker as C # noqa: N812 +import onnx.shape_inference +from onnx import ( + IR_VERSION, + AttributeProto, + FunctionProto, + GraphProto, + ModelProto, + NodeProto, + SparseTensorProto, + TensorProto, + ValueInfoProto, +) + +# Limitation of single protobuf file is 2GB +MAXIMUM_PROTOBUF = 2000000000 + +# TODO: This thing where we reserialize the protobuf back into the +# string, only to deserialize it at the call site, is really goofy. +# Stop doing that. + + +# NB: Please don't edit this context! +DEFAULT_CONTEXT = C.CheckerContext() +DEFAULT_CONTEXT.ir_version = IR_VERSION +# TODO: Maybe ONNX-ML should also be defaulted? +DEFAULT_CONTEXT.opset_imports = {"": onnx.defs.onnx_opset_version()} + +LEXICAL_SCOPE_CONTEXT = C.LexicalScopeContext() + + +FuncType = TypeVar("FuncType", bound=Callable[..., Any]) + + +def _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None: + if not isinstance(proto, proto_type): + raise TypeError( + f"The proto message needs to be of type '{proto_type.__name__}'" + ) + + +def check_value_info( + value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT +) -> None: + _ensure_proto_type(value_info, ValueInfoProto) + return C.check_value_info(value_info.SerializeToString(), ctx) + + +def check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None: + _ensure_proto_type(tensor, TensorProto) + return C.check_tensor(tensor.SerializeToString(), ctx) + + +def check_attribute( + attr: AttributeProto, + ctx: C.CheckerContext = DEFAULT_CONTEXT, + lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: + _ensure_proto_type(attr, AttributeProto) + return C.check_attribute(attr.SerializeToString(), ctx, lexical_scope_ctx) + + +def check_node( + node: NodeProto, + ctx: C.CheckerContext = DEFAULT_CONTEXT, + lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: + _ensure_proto_type(node, NodeProto) + return C.check_node(node.SerializeToString(), ctx, lexical_scope_ctx) + + +def check_function( + function: FunctionProto, + ctx: C.CheckerContext | None = None, + lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: + _ensure_proto_type(function, FunctionProto) + if ctx is None: + ctx = C.CheckerContext() + ctx.ir_version = onnx.helper.find_min_ir_version_for( + function.opset_import, ignore_unknown=True + ) + ctx.opset_imports = { + domain_version.domain: domain_version.version + for domain_version in function.opset_import + } + C.check_function(function.SerializeToString(), ctx, lexical_scope_ctx) + + +def check_graph( + graph: GraphProto, + ctx: C.CheckerContext = DEFAULT_CONTEXT, + lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: + _ensure_proto_type(graph, GraphProto) + return C.check_graph(graph.SerializeToString(), ctx, lexical_scope_ctx) + + +def check_sparse_tensor( + sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT +) -> None: + _ensure_proto_type(sparse, SparseTensorProto) + C.check_sparse_tensor(sparse.SerializeToString(), ctx) + + +def check_model( + model: ModelProto | str | bytes | os.PathLike, + full_check: bool = False, + skip_opset_compatibility_check: bool = False, + check_custom_domain: bool = False, +) -> None: + """Check the consistency of a model. + + An exception will be raised if the model's ir_version is not set + properly or is higher than checker's ir_version, or if the model + has duplicate keys in metadata_props. + + If IR version >= 3, the model must specify opset_import. + If IR version < 3, the model cannot have any opset_import specified. + + Args: + model: Model to check. If model is a path, the function checks model + path first. If the model bytes size is larger than 2GB, function + should be called using model path. + full_check: If True, the function also runs shape inference check. + skip_opset_compatibility_check: If True, the function skips the check for + opset compatibility. + check_custom_domain: If True, the function will check all domains. Otherwise + only check built-in domains. + """ + # If model is a path instead of ModelProto + if isinstance(model, (str, os.PathLike)): + C.check_model_path( + os.fspath(model), + full_check, + skip_opset_compatibility_check, + check_custom_domain, + ) + else: + protobuf_string = ( + model if isinstance(model, bytes) else model.SerializeToString() + ) + # If the protobuf is larger than 2GB, + # remind users should use the model path to check + if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF: + raise ValueError( + "This protobuf of onnx model is too large (>2GB). Call check_model with model path instead." + ) + C.check_model( + protobuf_string, + full_check, + skip_opset_compatibility_check, + check_custom_domain, + ) + + +ValidationError = C.ValidationError diff --git a/MLPY/Lib/site-packages/onnx/common/array_ref.h b/MLPY/Lib/site-packages/onnx/common/array_ref.h new file mode 100644 index 0000000000000000000000000000000000000000..0335aca4a6639e2b2ff32c42ceef8b6dd7d61bdc --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/array_ref.h @@ -0,0 +1,199 @@ +// Copyright (c) ONNX Project Contributors + +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +// ATTENTION: The code in this file is highly EXPERIMENTAL. +// Adventurous users should note that the APIs will probably change. + +//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +// ONNX: modified from llvm::ArrayRef. +// removed llvm-specific functionality +// removed some implicit const -> non-const conversions that rely on +// complicated std::enable_if meta-programming +// removed a bunch of slice variants for simplicity... + +#pragma once +#include + +#include +#include + +namespace ONNX_NAMESPACE { +/// ArrayRef - Represent a constant reference to an array (0 or more elements +/// consecutively in memory), i.e. a start pointer and a length. It allows +/// various APIs to take consecutive elements easily and conveniently. +/// +/// This class does not own the underlying data, it is expected to be used in +/// situations where the data resides in some other buffer, whose lifetime +/// extends past that of the ArrayRef. For this reason, it is not in general +/// safe to store an ArrayRef. +/// +/// This is intended to be trivially copyable, so it should be passed by +/// value. +template +class ArrayRef { + public: + typedef const T* iterator; + typedef const T* const_iterator; + typedef size_t size_type; + + typedef std::reverse_iterator reverse_iterator; + + private: + /// The start of the array, in an external buffer. + const T* Data; + + /// The number of elements. + size_type Length; + + public: + /// @name Constructors + /// @{ + + /// Construct an empty ArrayRef. + /*implicit*/ ArrayRef() : Data(nullptr), Length(0) {} + + /// Construct an ArrayRef from a single element. + /*implicit*/ ArrayRef(const T& OneElt) : Data(&OneElt), Length(1) {} + + /// Construct an ArrayRef from a pointer and length. + /*implicit*/ ArrayRef(const T* data, size_t length) : Data(data), Length(length) {} + + /// Construct an ArrayRef from a range. + ArrayRef(const T* begin, const T* end) : Data(begin), Length(end - begin) {} + + /// Construct an ArrayRef from a std::vector. + template + /*implicit*/ ArrayRef(const std::vector& Vec) : Data(Vec.data()), Length(Vec.size()) {} + + /// Construct an ArrayRef from a std::array + template + /*implicit*/ constexpr ArrayRef(const std::array& Arr) : Data(Arr.data()), Length(N) {} + + /// Construct an ArrayRef from a C array. + template + /*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {} + + /// Construct an ArrayRef from a std::initializer_list. + /*implicit*/ ArrayRef(const std::initializer_list& Vec) + : Data(Vec.begin() == Vec.end() ? (T*)nullptr : Vec.begin()), Length(Vec.size()) {} + + /// @} + /// @name Simple Operations + /// @{ + + iterator begin() const { + return Data; + } + iterator end() const { + return Data + Length; + } + + reverse_iterator rbegin() const { + return reverse_iterator(end()); + } + reverse_iterator rend() const { + return reverse_iterator(begin()); + } + + /// empty - Check if the array is empty. + bool empty() const { + return Length == 0; + } + + const T* data() const { + return Data; + } + + /// size - Get the array size. + size_t size() const { + return Length; + } + + /// front - Get the first element. + const T& front() const { + assert(!empty()); + return Data[0]; + } + + /// back - Get the last element. + const T& back() const { + assert(!empty()); + return Data[Length - 1]; + } + + /// equals - Check for element-wise equality. + bool equals(ArrayRef RHS) const { + if (Length != RHS.Length) + return false; + return std::equal(begin(), end(), RHS.begin()); + } + + /// slice(n, m) - Chop off the first N elements of the array, and keep M + /// elements in the array. + ArrayRef slice(size_t N, size_t M) const { + assert(N + M <= size() && "Invalid specifier"); + return ArrayRef(data() + N, M); + } + + /// slice(n) - Chop off the first N elements of the array. + ArrayRef slice(size_t N) const { + return slice(N, size() - N); + } + + /// @} + /// @name Operator Overloads + /// @{ + const T& operator[](size_t Index) const { + assert(Index < Length && "Invalid index!"); + return Data[Index]; + } + + /// Vector compatibility + const T& at(size_t Index) const { + assert(Index < Length && "Invalid index!"); + return Data[Index]; + } + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + typename std::enable_if::value, ArrayRef>::type& operator=(U&& Temporary) = delete; + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + typename std::enable_if::value, ArrayRef>::type& operator=(std::initializer_list) = delete; + + /// @} + /// @name Expensive Operations + /// @{ + std::vector vec() const { + return std::vector(Data, Data + Length); + } + + /// @} + /// @name Conversion operators + /// @{ + operator std::vector() const { + return std::vector(Data, Data + Length); + } + + /// @} +}; + +} // namespace ONNX_NAMESPACE diff --git a/MLPY/Lib/site-packages/onnx/common/assertions.cc b/MLPY/Lib/site-packages/onnx/common/assertions.cc new file mode 100644 index 0000000000000000000000000000000000000000..337f1a14ba380612620e67a97ab6124c13b5c362 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/assertions.cc @@ -0,0 +1,45 @@ +// Copyright (c) ONNX Project Contributors + +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +// ATTENTION: The code in this file is highly EXPERIMENTAL. +// Adventurous users should note that the APIs will probably change. + +#include "onnx/common/assertions.h" + +#include +#include +#include + +#include "onnx/common/common.h" + +namespace ONNX_NAMESPACE { + +std::string barf(const char* fmt, ...) { + constexpr size_t buffer_size = 2048; + std::array msg{}; + va_list args; + + va_start(args, fmt); + + // use fixed length for buffer "msg" to avoid buffer overflow + vsnprintf(static_cast(msg.data()), msg.size() - 1, fmt, args); + + // ensure null-terminated string to avoid out of bounds read + msg.back() = '\0'; + va_end(args); + + return std::string(msg.data()); +} + +void throw_assert_error(std::string& msg) { + ONNX_THROW_EX(assert_error(msg)); +} + +void throw_tensor_error(std::string& msg) { + ONNX_THROW_EX(tensor_error(msg)); +} + +} // namespace ONNX_NAMESPACE diff --git a/MLPY/Lib/site-packages/onnx/common/assertions.h b/MLPY/Lib/site-packages/onnx/common/assertions.h new file mode 100644 index 0000000000000000000000000000000000000000..73984562cf8b788a8d3fa1af277dfb472fcdfe97 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/assertions.h @@ -0,0 +1,72 @@ +// Copyright (c) ONNX Project Contributors + +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +// ATTENTION: The code in this file is highly EXPERIMENTAL. +// Adventurous users should note that the APIs will probably change. + +#pragma once + +#include +#include + +namespace ONNX_NAMESPACE { + +struct assert_error : public std::runtime_error { + public: + explicit assert_error(const std::string& msg) : runtime_error(msg) {} +}; + +struct tensor_error : public assert_error { + public: + explicit tensor_error(const std::string& msg) : assert_error(msg) {} +}; + +std::string barf(const char* fmt, ...); + +[[noreturn]] void throw_assert_error(std::string&); + +[[noreturn]] void throw_tensor_error(std::string&); + +} // namespace ONNX_NAMESPACE + +#if defined(__GNUC__) || defined(__ICL) || defined(__clang__) +#define _ONNX_EXPECT(x, y) (__builtin_expect((x), (y))) +#else +#define _ONNX_EXPECT(x, y) (x) +#endif + +#define ONNX_ASSERT(cond) \ + if (_ONNX_EXPECT(!(cond), 0)) { \ + std::string error_msg = \ + ::ONNX_NAMESPACE::barf("%s:%u: %s: Assertion `%s` failed.", __FILE__, __LINE__, __func__, #cond); \ + throw_assert_error(error_msg); \ + } + +// The following is used to prevent MSVC from passing the whole __VA_ARGS__ list +// as the first parameter value to a macro call. +#define ONNX_EXPAND(x) x + +// Note: msg must be a string literal +#define _ONNX_ASSERTM(cond, msg, ...) \ + if (_ONNX_EXPECT(!(cond), 0)) { \ + std::string error_msg = ::ONNX_NAMESPACE::barf( \ + "%s:%u: %s: Assertion `%s` failed: " msg, __FILE__, __LINE__, __func__, #cond, __VA_ARGS__); \ + throw_assert_error(error_msg); \ + } + +// The trailing ' ' argument is a hack to deal with the extra comma when ... is empty. +// Another way to solve this is ##__VA_ARGS__ in _ONNX_ASSERTM, but this is a non-portable +// extension we shouldn't use. +#define ONNX_ASSERTM(...) ONNX_EXPAND(_ONNX_ASSERTM(__VA_ARGS__, " ")) + +#define _TENSOR_ASSERTM(cond, msg, ...) \ + if (_ONNX_EXPECT(!(cond), 0)) { \ + std::string error_msg = ::ONNX_NAMESPACE::barf( \ + "%s:%u: %s: Assertion `%s` failed: " msg, __FILE__, __LINE__, __func__, #cond, __VA_ARGS__); \ + throw_tensor_error(error_msg); \ + } + +#define TENSOR_ASSERTM(...) ONNX_EXPAND(_TENSOR_ASSERTM(__VA_ARGS__, " ")) diff --git a/MLPY/Lib/site-packages/onnx/common/common.h b/MLPY/Lib/site-packages/onnx/common/common.h new file mode 100644 index 0000000000000000000000000000000000000000..a1c6c08596eaccad49face1ce2c68c7a5fc39c05 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/common.h @@ -0,0 +1,55 @@ +// Copyright (c) ONNX Project Contributors + +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#define ONNX_UNUSED_PARAMETER(x) (void)(x) + +#ifdef ONNX_NO_EXCEPTIONS +#include +#define ONNX_THROW(...) \ + do { \ + std::cerr << ONNX_NAMESPACE::MakeString(__VA_ARGS__); \ + abort(); \ + } while (false) + +#define ONNX_THROW_EX(ex) \ + do { \ + std::cerr << ex.what() << std::endl; \ + abort(); \ + } while (false) + +#define ONNX_TRY if (true) +#define ONNX_CATCH(x) else if (false) +#define ONNX_HANDLE_EXCEPTION(func) + +#else +#define ONNX_THROW(...) throw std::runtime_error(ONNX_NAMESPACE::MakeString(__VA_ARGS__)) +#define ONNX_THROW_EX(ex) throw ex + +#define ONNX_TRY try +#define ONNX_CATCH(x) catch (x) +#define ONNX_HANDLE_EXCEPTION(func) func() +#endif + +// Macros to disable the copy and/or assignment methods +// These are usually placed in the private: declarations for a class. + +#define ONNX_DISALLOW_COPY(TypeName) TypeName(const TypeName&) = delete + +#define ONNX_DISALLOW_ASSIGNMENT(TypeName) TypeName& operator=(const TypeName&) = delete + +#define ONNX_DISALLOW_COPY_AND_ASSIGNMENT(TypeName) \ + ONNX_DISALLOW_COPY(TypeName); \ + ONNX_DISALLOW_ASSIGNMENT(TypeName) + +#define ONNX_DISALLOW_MOVE(TypeName) \ + TypeName(TypeName&&) = delete; \ + TypeName& operator=(TypeName&&) = delete + +#define ONNX_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TypeName) \ + ONNX_DISALLOW_COPY_AND_ASSIGNMENT(TypeName); \ + ONNX_DISALLOW_MOVE(TypeName) diff --git a/MLPY/Lib/site-packages/onnx/common/constants.h b/MLPY/Lib/site-packages/onnx/common/constants.h new file mode 100644 index 0000000000000000000000000000000000000000..9280bf213f57c1e0b9e4529a8796afa8e399fcc6 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/constants.h @@ -0,0 +1,45 @@ +// Copyright (c) ONNX Project Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +namespace ONNX_NAMESPACE { +// For ONNX op/function registration. + +// ONNX domains. +constexpr const char* AI_ONNX_ML_DOMAIN = "ai.onnx.ml"; +constexpr const char* AI_ONNX_TRAINING_DOMAIN = "ai.onnx.training"; +constexpr const char* AI_ONNX_PREVIEW_TRAINING_DOMAIN = "ai.onnx.preview.training"; +// The following two are equivalent in an onnx proto representation. +constexpr const char* ONNX_DOMAIN = ""; +constexpr const char* AI_ONNX_DOMAIN = "ai.onnx"; + +inline std::string NormalizeDomain(const std::string& domain) { + return (domain == AI_ONNX_DOMAIN) ? ONNX_DOMAIN : domain; +} + +inline bool IsOnnxDomain(const std::string& domain) { + return (domain == AI_ONNX_DOMAIN) || ((domain == ONNX_DOMAIN)); +} + +constexpr bool OPTIONAL_VALUE = false; + +// For dimension denotation. +constexpr const char* DATA_BATCH = "DATA_BATCH"; +constexpr const char* DATA_CHANNEL = "DATA_CHANNEL"; +constexpr const char* DATA_TIME = "DATA_TIME"; +constexpr const char* DATA_FEATURE = "DATA_FEATURE"; +constexpr const char* FILTER_IN_CHANNEL = "FILTER_IN_CHANNEL"; +constexpr const char* FILTER_OUT_CHANNEL = "FILTER_OUT_CHANNEL"; +constexpr const char* FILTER_SPATIAL = "FILTER_SPATIAL"; + +// For type denotation. +constexpr const char* TENSOR = "TENSOR"; +constexpr const char* IMAGE = "IMAGE"; +constexpr const char* AUDIO = "AUDIO"; +constexpr const char* TEXT = "TEXT"; + +} // namespace ONNX_NAMESPACE diff --git a/MLPY/Lib/site-packages/onnx/common/file_utils.h b/MLPY/Lib/site-packages/onnx/common/file_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..a58edcd2ccd693659f041eb062092bece900a1ca --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/file_utils.h @@ -0,0 +1,31 @@ +// Copyright (c) ONNX Project Contributors + +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include +#include + +#include "onnx/checker.h" +#include "onnx/common/path.h" + +namespace ONNX_NAMESPACE { + +template +void LoadProtoFromPath(const std::string proto_path, T& proto) { + std::filesystem::path proto_u8_path = std::filesystem::u8path(proto_path); + std::fstream proto_stream(proto_u8_path, std::ios::in | std::ios::binary); + if (!proto_stream.good()) { + fail_check("Unable to open proto file: ", proto_path, ". Please check if it is a valid proto. "); + } + std::string data{std::istreambuf_iterator{proto_stream}, std::istreambuf_iterator{}}; + if (!ParseProtoFromBytes(&proto, data.c_str(), data.size())) { + fail_check( + "Unable to parse proto from file: ", proto_path, ". Please check if it is a valid protobuf file of proto. "); + } +} +} // namespace ONNX_NAMESPACE diff --git a/MLPY/Lib/site-packages/onnx/common/graph_node_list.h b/MLPY/Lib/site-packages/onnx/common/graph_node_list.h new file mode 100644 index 0000000000000000000000000000000000000000..31ada8a80282311d1cab2d7ff2b3c43f96d95e4f --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/graph_node_list.h @@ -0,0 +1,167 @@ +// Copyright (c) ONNX Project Contributors + +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +// ATTENTION: The code in this file is highly EXPERIMENTAL. +// Adventurous users should note that the APIs will probably change. + +#include "onnx/common/assertions.h" + +namespace ONNX_NAMESPACE { + +// Intrusive doubly linked lists with sane reverse iterators. +// The header file is named graph_node_list.h because it is ONLY +// used for Graph's Node lists, and if you want to use it for other +// things, you will have to do some refactoring. +// +// At the moment, the templated type T must support a few operations: +// +// - It must have a field: T* next_in_graph[2] = { nullptr, nullptr }; +// which are used for the intrusive linked list pointers. +// +// - It must have a method 'destroy()', which removes T from the +// list and frees a T. +// +// In practice, we are only using it with Node and const Node. 'destroy()' +// needs to be renegotiated if you want to use this somewhere else. +// +// Besides the benefits of being intrusive, unlike std::list, these lists handle +// forward and backward iteration uniformly because we require a +// "before-first-element" sentinel. This means that reverse iterators +// physically point to the element they logically point to, rather than +// the off-by-one behavior for all standard library reverse iterators. + +static constexpr size_t kNextDirection = 0; +static constexpr size_t kPrevDirection = 1; + +template +struct generic_graph_node_list; + +template +struct generic_graph_node_list_iterator; + +struct Node; +using graph_node_list = generic_graph_node_list; +using const_graph_node_list = generic_graph_node_list; +using graph_node_list_iterator = generic_graph_node_list_iterator; +using const_graph_node_list_iterator = generic_graph_node_list_iterator; + +template +struct generic_graph_node_list_iterator final { + generic_graph_node_list_iterator() : cur(nullptr), d(kNextDirection) {} + generic_graph_node_list_iterator(T* cur, size_t d) : cur(cur), d(d) {} + T* operator*() const { + return cur; + } + T* operator->() const { + return cur; + } + generic_graph_node_list_iterator& operator++() { + ONNX_ASSERT(cur); + cur = cur->next_in_graph[d]; + return *this; + } + generic_graph_node_list_iterator operator++(int) { + generic_graph_node_list_iterator old = *this; + ++(*this); + return old; + } + generic_graph_node_list_iterator& operator--() { + ONNX_ASSERT(cur); + cur = cur->next_in_graph[reverseDir()]; + return *this; + } + generic_graph_node_list_iterator operator--(int) { + generic_graph_node_list_iterator old = *this; + --(*this); + return old; + } + + // erase cur without invalidating this iterator + // named differently from destroy so that ->/. bugs do not + // silently cause the wrong one to be called. + // iterator will point to the previous entry after call + void destroyCurrent() { + T* n = cur; + cur = cur->next_in_graph[reverseDir()]; + n->destroy(); + } + generic_graph_node_list_iterator reverse() { + return generic_graph_node_list_iterator(cur, reverseDir()); + } + + private: + size_t reverseDir() { + return d == kNextDirection ? kPrevDirection : kNextDirection; + } + T* cur; + size_t d; // direction 0 is forward 1 is reverse, see next_in_graph +}; + +template +struct generic_graph_node_list final { + using iterator = generic_graph_node_list_iterator; + using const_iterator = generic_graph_node_list_iterator; + generic_graph_node_list_iterator begin() { + return generic_graph_node_list_iterator(head->next_in_graph[d], d); + } + generic_graph_node_list_iterator begin() const { + return generic_graph_node_list_iterator(head->next_in_graph[d], d); + } + generic_graph_node_list_iterator end() { + return generic_graph_node_list_iterator(head, d); + } + generic_graph_node_list_iterator end() const { + return generic_graph_node_list_iterator(head, d); + } + generic_graph_node_list_iterator rbegin() { + return reverse().begin(); + } + generic_graph_node_list_iterator rbegin() const { + return reverse().begin(); + } + generic_graph_node_list_iterator rend() { + return reverse().end(); + } + generic_graph_node_list_iterator rend() const { + return reverse().end(); + } + generic_graph_node_list reverse() { + return generic_graph_node_list(head, d == kNextDirection ? kPrevDirection : kNextDirection); + } + const generic_graph_node_list reverse() const { + return generic_graph_node_list(head, d == kNextDirection ? kPrevDirection : kNextDirection); + } + generic_graph_node_list(T* head, size_t d) : head(head), d(d) {} + + private: + T* head; + size_t d; +}; + +template +static inline bool operator==(generic_graph_node_list_iterator a, generic_graph_node_list_iterator b) { + return *a == *b; +} + +template +static inline bool operator!=(generic_graph_node_list_iterator a, generic_graph_node_list_iterator b) { + return *a != *b; +} + +} // namespace ONNX_NAMESPACE + +namespace std { + +template +struct iterator_traits> { + using difference_type = int64_t; + using value_type = T*; + using pointer = T**; + using reference = T*&; + using iterator_category = bidirectional_iterator_tag; +}; + +} // namespace std diff --git a/MLPY/Lib/site-packages/onnx/common/interned_strings.cc b/MLPY/Lib/site-packages/onnx/common/interned_strings.cc new file mode 100644 index 0000000000000000000000000000000000000000..4c00ef26d6479734b2b04df2000321e7120b2ff1 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/interned_strings.cc @@ -0,0 +1,81 @@ +// Copyright (c) ONNX Project Contributors + +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +// ATTENTION: The code in this file is highly EXPERIMENTAL. +// Adventurous users should note that the APIs will probably change. + +#include "onnx/common/interned_strings.h" + +#include + +#include +#include +#include +#include + +#include "onnx/common/assertions.h" + +namespace ONNX_NAMESPACE { + +struct InternedStrings { + InternedStrings() : next_sym(kLastSymbol) { +#define REGISTER_SYMBOL(s) \ + string_to_sym_[#s] = k##s; \ + sym_to_string_[k##s] = #s; + FORALL_BUILTIN_SYMBOLS(REGISTER_SYMBOL) +#undef REGISTER_SYMBOL + } + uint32_t symbol(const std::string& s) { + std::lock_guard guard(mutex_); + auto it = string_to_sym_.find(s); + if (it != string_to_sym_.end()) + return it->second; + uint32_t k = next_sym++; + string_to_sym_[s] = k; + sym_to_string_[k] = s; + return k; + } + const char* string(Symbol sym) { + // Builtin Symbols are also in the maps, but + // we can bypass the need to acquire a lock + // to read the map for Builtins because we already + // know their string value + switch (sym) { +#define DEFINE_CASE(s) \ + case k##s: \ + return #s; + FORALL_BUILTIN_SYMBOLS(DEFINE_CASE) +#undef DEFINE_CASE + default: + return customString(sym); + } + } + + private: + const char* customString(Symbol sym) { + std::lock_guard guard(mutex_); + auto it = sym_to_string_.find(sym); + ONNX_ASSERT(it != sym_to_string_.end()); + return it->second.c_str(); + } + std::unordered_map string_to_sym_; + std::unordered_map sym_to_string_; + uint32_t next_sym; + std::mutex mutex_; +}; + +static InternedStrings& globalStrings() { + static InternedStrings s; + return s; +} + +const char* Symbol::toString() const { + return globalStrings().string(*this); +} + +Symbol::Symbol(const std::string& s) : value(globalStrings().symbol(s)) {} + +} // namespace ONNX_NAMESPACE diff --git a/MLPY/Lib/site-packages/onnx/common/interned_strings.h b/MLPY/Lib/site-packages/onnx/common/interned_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..b9f95e3f7c50f6b47bcb6f04291eca0c7f3c4727 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/interned_strings.h @@ -0,0 +1,244 @@ +// Copyright (c) ONNX Project Contributors + +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +// ATTENTION: The code in this file is highly EXPERIMENTAL. +// Adventurous users should note that the APIs will probably change. + +#pragma once +#include + +#include +#include +#include + +namespace ONNX_NAMESPACE { + +#define FORALL_BUILTIN_SYMBOLS(_) \ + _(spatial) \ + _(select_last_index) \ + _(coordinate_transformation_mode) \ + _(PythonOp) \ + _(CppOp) \ + _(Param) \ + _(Select) \ + _(Return) \ + _(Eval) \ + _(add) \ + _(Add) \ + _(Div) \ + _(Mul) \ + _(Neg) \ + _(Sub) \ + _(Pow) \ + _(Sigmoid) \ + _(ArgMax) \ + _(Concat) \ + _(Softmax) \ + _(LogSoftmax) \ + _(Dropout) \ + _(Tanh) \ + _(mul) \ + _(neg) \ + _(sigmoid) \ + _(tanh) \ + _(Constant) \ + _(cat) \ + _(Slice) \ + _(Squeeze) \ + _(Undefined) \ + _(FusionGroup) \ + _(MatMul) \ + _(Gemm) \ + _(Tile) \ + _(SubConstant) \ + _(Scale) \ + _(Transpose) \ + _(Pad) \ + _(Reshape) \ + _(split) \ + _(chunk) \ + _(Offset) \ + _(value) \ + _(Subgraph) \ + _(BatchNormalization) \ + _(Conv) \ + _(ConvTranspose) \ + _(is_test) \ + _(epsilon) \ + _(expand) \ + _(Expand) \ + _(order) \ + _(momentum) \ + _(consumed_inputs) \ + _(kernels) \ + _(kernel_shape) \ + _(kernel) \ + _(scale) \ + _(strides) \ + _(stride) \ + _(pads) \ + _(pad) \ + _(beta) \ + _(alpha) \ + _(dilations) \ + _(dilation) \ + _(broadcast) \ + _(axis) \ + _(ratio) \ + _(size) \ + _(dim) \ + _(keepdims) \ + _(perm) \ + _(shape) \ + _(axes) \ + _(group) \ + _(inplace) \ + _(transA) \ + _(transB) \ + _(other) \ + _(__and__) \ + _(__lshift__) \ + _(__or__) \ + _(__rshift__) \ + _(__xor__) \ + _(abs) \ + _(acos) \ + _(asin) \ + _(atan) \ + _(atan2) \ + _(ceil) \ + _(clamp) \ + _(cos) \ + _(cosh) \ + _(div) \ + _(eq) \ + _(equal) \ + _(Exp) \ + _(ends) \ + _(expm1) \ + _(floor) \ + _(fmod) \ + _(frac) \ + _(ge) \ + _(gt) \ + _(le) \ + _(lerp) \ + _(lgamma) \ + _(Log) \ + _(log1p) \ + _(lt) \ + _(max) \ + _(min) \ + _(ne) \ + _(ones) \ + _(pow) \ + _(reciprocal) \ + _(remainder) \ + _(round) \ + _(rsqrt) \ + _(sin) \ + _(sinh) \ + _(Sqrt) \ + _(sub) \ + _(starts) \ + _(tan) \ + _(trunc) \ + _(zeros) \ + _(exponent) \ + _(device) \ + _(mode) \ + _(Identity) \ + _(Loop) \ + _(If) \ + _(body) \ + _(then_branch) \ + _(else_branch) \ + _(Captured) \ + _(__control_inputs) \ + _(count_include_pad) \ + _(storage_order) \ + _(Unsqueeze) \ + _(ReduceL1) \ + _(ReduceL2) \ + _(ReduceLogSum) \ + _(ReduceLogSumExp) \ + _(ReduceMax) \ + _(ReduceMean) \ + _(ReduceMin) \ + _(ReduceProd) \ + _(ReduceSum) \ + _(ReduceSumSquare) \ + _(Cast) \ + _(to) \ + _(PRelu) \ + _(Greater) \ + _(Less) \ + _(scales) \ + _(Upsample) \ + _(RNN) \ + _(layout) \ + _(k) \ + _(Flatten) \ + _(ScatterElements) \ + _(Resize) \ + _(ceil_mode) \ + _(num_outputs) \ + _(start) \ + _(end) \ + _(num_groups) \ + _(stash_type) \ + _(block_size) \ + _(output_dtype) + +enum BuiltinSymbol { +#define DEFINE_SYMBOL(s) k##s, + FORALL_BUILTIN_SYMBOLS(DEFINE_SYMBOL) +#undef DEFINE_SYMBOL + kLastSymbol, // where we start counting for new symbols +}; + +struct Symbol { + Symbol() {} + /*implicit*/ Symbol(BuiltinSymbol value) : value(value) {} + explicit Symbol(const std::string& s); + explicit Symbol(uint32_t value) : value(value) {} + + operator uint32_t() const { + return value; + } + const char* toString() const; + + private: + uint32_t value; +}; + +static inline bool operator==(Symbol lhs, Symbol rhs) { + return static_cast(lhs) == static_cast(rhs); +} +// necessary to prevent ambiguous overload resolutions +static inline bool operator==(BuiltinSymbol lhs, Symbol rhs) { + return static_cast(lhs) == static_cast(rhs); +} +static inline bool operator==(Symbol lhs, BuiltinSymbol rhs) { + return static_cast(lhs) == static_cast(rhs); +} + +inline Symbol operator"" _sym(const char* s, size_t) { + return Symbol(s); +} + +} // namespace ONNX_NAMESPACE + +// make symbol behave like an integer in hash tables +namespace std { +template <> +struct hash { + std::size_t operator()(ONNX_NAMESPACE::Symbol s) const { + return std::hash()(static_cast(s)); + } +}; + +} // namespace std diff --git a/MLPY/Lib/site-packages/onnx/common/ir.h b/MLPY/Lib/site-packages/onnx/common/ir.h new file mode 100644 index 0000000000000000000000000000000000000000..7b1a2ebe8f1eee9e5ec4cc269fb9b3aa7216bf08 --- /dev/null +++ b/MLPY/Lib/site-packages/onnx/common/ir.h @@ -0,0 +1,1449 @@ +// Copyright (c) ONNX Project Contributors + +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +// ATTENTION: The code in this file is highly EXPERIMENTAL. +// Adventurous users should note that the APIs will probably change. + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "onnx/common/array_ref.h" +#include "onnx/common/assertions.h" +#include "onnx/common/common.h" +#include "onnx/common/graph_node_list.h" +#include "onnx/common/interned_strings.h" +#include "onnx/common/tensor.h" +#include "onnx/string_utils.h" + +#define ONNX_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete + +namespace ONNX_NAMESPACE { + +namespace { // internal/private API + +std::string toVarName(size_t i) { + std::ostringstream oss; + oss << "_v_" << i; + return oss.str(); +} + +} // namespace + +// Graph represents one "function" of computation. +// It uses a simple ownership model where the graph owns all the nodes inside it. +// All references inside the graph are raw pointers. +// Destroying the Graph will invalidate any pointers to nodes in the graph. +struct Graph; + +// Node is the base class of the IR graph. It represents one computation +// and dependencies on a list of Values. The "prim-ops", so to speak. +struct Node; + +// A Value represents an input or output to node that is either a +// Tensor or an opaque Handle object, as determined by type(). +struct Value; + +class ResourceGuard final { + std::function destructor_; + bool released_; + + public: + ONNX_DISALLOW_COPY_AND_ASSIGN(ResourceGuard); + explicit ResourceGuard(std::function destructor) : destructor_(std::move(destructor)), released_(false) {} + ResourceGuard(ResourceGuard&& other) = default; + ResourceGuard& operator=(ResourceGuard&& other) = default; + + ~ResourceGuard() { + if (!released_) + destructor_(); + } + + void release() { + released_ = true; + } +}; + +struct Dimension final { + Dimension() : is_unknown(true), is_int(false), dim(-1) {} + Dimension(std::string param) : is_unknown(false), is_int(false), dim(-1), param(std::move(param)) {} // NOLINT + Dimension(int64_t dim) : is_unknown(false), is_int(true), dim(dim) {} // NOLINT + + bool is_unknown; + bool is_int; + int64_t dim; + std::string param; +}; + +enum class AttributeKind : uint8_t { + // float, float list, int, int list, string, string list, + // tensor, tensor list, subgraph, subgraph list. type proto, type proto list + f, + fs, + i, + is, + s, + ss, + t, + ts, + g, + gs, + tp, + tps +}; + +static inline const char* toString(AttributeKind kind) { + static constexpr const char* names[] = {"f", "fs", "i", "is", "s", "ss", "t", "ts", "g", "gs", "tp", "tps"}; + ONNX_ASSERT(size_t(kind) < sizeof(names) / sizeof(const char*)); + return names[int(kind)]; +} + +struct AttributeValue { + explicit AttributeValue(Symbol name) : name(name) {} + using Ptr = std::unique_ptr; + Symbol name; + virtual AttributeKind kind() const = 0; + virtual Ptr clone() const = 0; + virtual ~AttributeValue() = default; +}; + +template +struct ScalarAttributeValue final : public AttributeValue { + using ConstructorType = const T&; + using ValueType = T; + ScalarAttributeValue(Symbol name, ConstructorType value_) : AttributeValue(name), value_(value_) {} + ValueType& value() { + return value_; + } + virtual Ptr clone() const override { + return Ptr(new ScalarAttributeValue(name, value_)); + } + virtual AttributeKind kind() const override { + return Kind; + } + + private: + ValueType value_; +}; + +template +struct VectorAttributeValue final : public AttributeValue { + using ConstructorType = const std::vector&&; + using ValueType = std::vector; + VectorAttributeValue(Symbol name, ConstructorType value_) : AttributeValue(name), value_(std::move(value_)) {} + ValueType& value() { + return value_; + } + virtual AttributeKind kind() const override { + return Kind; + } + virtual std::unique_ptr clone() const override { + auto copy = value_; + return Ptr(new VectorAttributeValue(name, std::move(copy))); + } + + private: + ValueType value_; +}; + +using FloatAttr = ScalarAttributeValue; +using FloatsAttr = VectorAttributeValue; +using IntAttr = ScalarAttributeValue; +using IntsAttr = VectorAttributeValue; +using StringAttr = ScalarAttributeValue; +using StringsAttr = VectorAttributeValue; +using TensorAttr = ScalarAttributeValue; +using TensorsAttr = VectorAttributeValue; +using GraphAttr = ScalarAttributeValue, AttributeKind::g>; +using GraphsAttr = VectorAttributeValue, AttributeKind::gs>; +using TypeProtoAttr = ScalarAttributeValue; +using TypeProtosAttr = VectorAttributeValue; + +// CRTP so that Node which inherits Attributes can be return for +// method chaining e.g: +// Node * n = g->create(kSelect)->set_i(kOffset,3)->set_f(kValue,3.5); +// we return Derived* pointers because Nodes are normally held as pointers. +template +struct Attributes { + Attributes() {} + void copyAttributes(const Attributes& rhs) { + values_.clear(); + values_.reserve(rhs.values_.size()); + for (auto& i : rhs.values_) { + values_.push_back(i->clone()); + } + } + bool hasAttribute(Symbol name) const { + return find(name, false) != values_.end(); + } + AttributeKind kindOf(Symbol name) const { + return (*find(name, true))->kind(); + } + Derived* removeAttribute(Symbol name) { + values_.erase(find(name, true)); + return This(); + } + bool hasAttributes() const { + return !values_.empty(); + } + // The names are returned in order, since name actually is the index. + std::vector attributeNames() const { + std::vector names; + names.reserve(values_.size()); + for (auto& a : values_) + names.push_back(a->name); + return names; + } + +#define CREATE_ACCESSOR(Kind, method) \ + Derived* method##_(Symbol name, Kind##Attr::ConstructorType v) { \ + return set(name, std::forward(v)); \ + } \ + const Kind##Attr::ValueType& method(Symbol name) const { \ + return get(name); \ + } + CREATE_ACCESSOR(Float, f) + CREATE_ACCESSOR(Floats, fs) + CREATE_ACCESSOR(String, s) + CREATE_ACCESSOR(Strings, ss) + CREATE_ACCESSOR(Int, i) + CREATE_ACCESSOR(Ints, is) + CREATE_ACCESSOR(Tensor, t) + CREATE_ACCESSOR(Tensors, ts) + CREATE_ACCESSOR(Graph, g) + CREATE_ACCESSOR(Graphs, gs) + CREATE_ACCESSOR(TypeProto, tp) + CREATE_ACCESSOR(TypeProtos, tps) + +#undef CREATE_ACCESSOR + + private: + Derived* This() { + return static_cast(this); + } + template + Derived* set(Symbol name, typename T::ConstructorType v) { + auto it = find(name, false); + auto nv = AVPtr(new T(name, std::forward(v))); + if (it == values_.end()) { + values_.push_back(std::move(nv)); + } else { + *it = std::move(nv); + } + return This(); + } + template + typename T::ValueType& get(Symbol name) const { + auto it = find(name, true); + T* child = static_cast(it->get()); + return child->value(); + } + using AVPtr = AttributeValue::Ptr; + // NB: For determinism, we use a vector rather than a hash map. This does + // mean that lookups are O(n), so you shouldn't use Attributes to store + // a big pile of messages. + std::vector values_; + using iterator = std::vector::iterator; + iterator find(Symbol name, bool required) { + auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) { return v->name == name; }); + ONNX_ASSERT(!required || it != values_.end()); + return it; + } + using const_iterator = std::vector::const_iterator; + const_iterator find(Symbol name, bool required) const { + auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) { return v->name == name; }); + ONNX_ASSERTM( + !required || it != values_.end(), + "%s:%u: %s: required undefined attribute '%s'", + __FILE__, + __LINE__, + __func__, + name.toString()); + return it; + } +}; + +// Each use is represented by this type, see Node::uses() +// 'user' is the consumer of the value, offset is the index into +// 'user's input this where the produces will be found. +struct Use final { + Use(Node* user, size_t offset) : user(user), offset(offset) {} + Node* user; + size_t offset; +}; + +static inline bool operator==(const Use& a, const Use& b) { + return a.user == b.user && a.offset == b.offset; +} + +// the list types are intentionally simple, but we type-def +// them here so if we need to change them, refactoring will be easier +using node_list = std::vector; +using value_list = std::vector; +using use_list = std::vector; +using NodeKind = Symbol; + +struct Value final { + ONNX_DISALLOW_COPY_AND_ASSIGN(Value); + Value(Node* node_, size_t offset_); + Value(Value&&) = default; + Value& operator=(Value&&) = default; + ~Value() = default; + + private: + friend struct Node; + friend struct Graph; + Node* node_; + size_t offset_; + size_t unique_ = 0; // unique id + size_t stage_ = 0; // 0-forward, 1-backward, 2-double-backward,... + use_list uses_in_current_graph_; + bool has_unique_name_; + std::string unique_name_; + int32_t elem_type_; + bool has_sizes_; + std::vector sizes_; + + public: + Value* setElemType(int32_t elem_type) { + elem_type_ = elem_type; + return this; + } + int32_t elemType() const { + return elem_type_; + } + bool has_sizes() const { + return has_sizes_; + } + Value* setSizes(std::vector sizes) { + has_sizes_ = true; + sizes_ = std::move(sizes); + return this; + } + Value* wipeSizes() { + has_sizes_ = false; + sizes_ = std::vector(); + return this; + } + const std::vector& sizes() const { + return sizes_; + } + size_t unique() const { + return unique_; + } + bool has_unique_name() const { + return has_unique_name_; + } + std::string uniqueName() const { + if (has_unique_name()) + return unique_name_; + return toVarName(unique()); + } + Value* setUniqueName(const std::string& name, bool rename_subgraph_captured_nodes = true); + Value* setStage(size_t s) { + stage_ = s; + return this; + } + size_t stage() const { + return stage_; + } + Node* node() { + return node_; + } + size_t offset() const { + return offset_; + } + const Node* node() const { + return node_; + } + Graph* owningGraph(); + const Graph* owningGraph() const; + // TODO: make this more const correct + const use_list uses() const; + + // Replaces all uses of this node with 'newValue'. + // + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = h(%3, %3) + // Execute: %3.replaceAllUsesWith(%6) + // Result: %3 = f(%1, %2) + // %4 = g(%6) + // %5 = h(%6, %6) + void replaceAllUsesWith(Value* newValue); + + Value* copyMetadata(Value* from) { + setElemType(from->elemType()); + setSizes(from->sizes()); + if (from->has_unique_name()) { + setUniqueName(from->uniqueName()); + } + return this; + } +}; + +struct Node : public Attributes { + ONNX_DISALLOW_COPY_AND_ASSIGN(Node); + friend struct Graph; + friend struct Value; + friend graph_node_list; + friend const_graph_node_list; + friend graph_node_list_iterator; + friend const_graph_node_list_iterator; + + private: + // each node but Return/Param + // is associated with exactly one place in the node list... + // of the graph_ + // this circular is a doubly-linked list, the Return node is used as the sentinel for the beginning and end of the + // list such that the list never has null pointers next_in_graph[0] is next pointer next_in_graph[1] is prev pointer + // using an array to allow the same iterator class for forward and reverse node lists + // This list represents a topological sort + + Node* next_in_graph[2] = {nullptr, nullptr}; + Node*& next() { + return next_in_graph[kNextDirection]; + } + Node*& prev() { + return next_in_graph[kPrevDirection]; + } + Node* const& next() const { + return next_in_graph[kNextDirection]; + } + Node* const& prev() const { + return next_in_graph[kPrevDirection]; + } + + const NodeKind kind_; + std::vector inputs_; + std::vector outputs_; + Graph* graph_; + size_t stage_; + bool has_name_; + std::string name_; + bool has_domain_; + std::string domain_; + bool has_doc_string_; + std::string doc_string_; + bool has_overload_; + std::string overload_; + + protected: + Node(Graph* graph_, NodeKind kind_); // defined after graph + + public: + bool has_name() const { + return has_name_; + } + const std::string& name() const { + return name_; + } + void setName(std::string name) { + has_name_ = true; + name_ = std::move(name); + } + bool has_domain() const { + return has_domain_; + } + const std::string& domain() const { + return domain_; + } + void setDomain(std::string domain) { + has_domain_ = true; + domain_ = std::move(domain); + } + bool has_overload() const { + return has_overload_; + } + const std::string& overload() const { + return overload_; + } + void setOverload(std::string overload) { + has_overload_ = true; + overload_ = std::move(overload); + } + bool has_doc_string() const { + return has_doc_string_; + } + const std::string& docString() const { + return doc_string_; + } + void setDocString(std::string doc_string) { + has_doc_string_ = true; + doc_string_ = std::move(doc_string); + } + NodeKind kind() const { + return kind_; + } + Graph* owningGraph() { + return graph_; + } + const Graph* owningGraph() const { + return graph_; + } + size_t stage() const { + return stage_; + } + Node* setStage(size_t s) { + stage_ = s; + return this; + } + // NB: This returns an ArrayRef; that means that it will + // get invalidated if you resize inputs (e.g., using addInput) + // We can't return a std::vector& because there's no + // way to soundly cast to std::vector (an insane + // implementation of std::vector could make this representationally + // different.) + ArrayRef inputs() { + return inputs_; + } + ArrayRef inputs() const { + // Vectors are not convertible in const-ness of elements, but + // raw pointers are. + return {inputs_.data(), inputs_.size()}; + } + // NB: This returns an ArrayRef; that means that it will + // get invalidated if you resize inputs (e.g., using addInput) + // We can't return a std::vector& because there's no + // way to soundly cast to std::vector (an insane + // implementation of std::vector could make this representationally + // different.) + ArrayRef outputs() { + return outputs_; + } + ArrayRef outputs() const { + // Vectors are not convertible in const-ness of elements, but + // raw pointers are. + return {outputs_.data(), outputs_.size()}; + } + bool hasUses() const { + for (auto o : outputs()) { + if (!o->uses().empty()) + return true; + } + return false; + } + void replaceAllUsesWith(Node* n) { + ONNX_ASSERT(outputs().size() == n->outputs().size()); + size_t nOutputs = outputs().size(); + for (size_t i = 0; i < nOutputs; i++) { + outputs()[i]->replaceAllUsesWith(n->outputs()[i]); + } + } + // lots of things like chunk have a single input or single output, so we have a + // helper to make accessing it easier + Value* input() { + ONNX_ASSERT(inputs_.size() == 1); + return inputs_.at(0); + } + Value* output() { + ONNX_ASSERT(outputs_.size() == 1); + return outputs_.at(0); + } + const Value* input() const { + ONNX_ASSERT(inputs_.size() == 1); + return inputs_.at(0); + } + Value* output() const { + ONNX_ASSERT(outputs_.size() == 1); + return outputs_.at(0); + } + // Access a particular input. This is a checked index. + Value* input(size_t i) { + return inputs_.at(i); + } + const Value* input(size_t i) const { + return inputs_.at(i); + } + + // Graphs + + // Note [Topological invariant] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // We always maintain an up-to-date topological ordering of all nodes via + // the next()/prev() links. All transformations to graphs must preserve + // this topological ordering: for example, it is only valid to 'addInput' + // with an input which is topologically before the current node. + // + // Usually, it is obvious whether or not topological order is maintained; + // for example, if you are adding nodes to the end of the topsort, it's + // impossible for them to refer to inputs that are not in the topsort. + // If it is not obvious, please comment accordingly. + + // Add 'node' as an input to 'this' at the end of existing + // arguments. Returns the added node for ease of chaining. + // + // Given: %3 = f(%1, %2) + // Execute: %3.addInput(%4) + // Result: %3 = f(%1, %2, %4) + Value* addInput(Value* node) { + ONNX_ASSERT(graph_ == node->owningGraph()); + node->uses_in_current_graph_.emplace_back(this, inputs_.size()); + inputs_.push_back(node); + return node; + } + + // Replace the input of 'this' at position 'i' with + // 'newValue', returning the old node. + // + // Given: %3 = f(%1, %2) + // Execute: %3.replaceInput(1, %4) + // Result: %3 = f(%1, %4) + Value* replaceInput(size_t i, Value* newValue) { + ONNX_ASSERT(newValue->owningGraph() == graph_); + Value* old = dropInput(i); + inputs_[i] = newValue; + newValue->uses_in_current_graph_.emplace_back(this, i); + return old; + } + + // Replace all occurrences of 'from' in the inputs of this + // node with 'to'. Corresponds to llvm's replaceUsesOfWith. + // + // Given: %3 = f(%1, %2, %1) + // Execute: %3.replaceInputWith(%1, %4) + // Result: %3 = f(%4, %2, %4) + void replaceInputWith(Value* from, Value* to) { + ONNX_ASSERT(from->owningGraph() == graph_); + ONNX_ASSERT(to->owningGraph() == graph_); + size_t i = 0; + for (auto input : inputs()) { + if (input == from) + replaceInput(i, to); + i++; + } + } + + Value* addOutput() { + outputs_.push_back(new Value(this, outputs_.size())); + return outputs_.back(); + } + + void eraseOutput(size_t i); + + // Insert unattached 'this' node after 'n' in the topological order. + // Returns this (for chaining). + // + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // and unattached: %5 = h(%1) + // Execute: %5.insertBefore(%4) + // Result: %3 = f(%1, %2) + // %5 = h(%1) + // %4 = g(%3) + Node* insertBefore(Node* n) { + ONNX_ASSERT(n->inGraphList()); + insertAfter(n->prev()); + return this; + } + + // Insert unattached 'this' node after 'n' in the topological order. + // Returns this (for chaining). + // + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // and unattached: %5 = h(%1) + // Execute: %5.insertAfter(%4) + // Result: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = h(%1) + Node* insertAfter(Node* n) { + ONNX_ASSERT(!inGraphList() && n->inGraphList()); + Node* next = n->next(); + n->next() = this; + this->prev() = n; + this->next() = next; + next->prev() = this; + return this; + } + + // Move 'this' (already in the graph) after 'n' in the topological order. + // + // Given: %2 = f(%1) + // %3 = g(%1) + // Execute: %2.moveAfter(%3) + // Result: %3 = g(%1) + // %2 = f(%1) + // + void moveAfter(Node* n) { + removeFromList(); + insertAfter(n); + } + + // Move a node 'n' (already in the graph) before 'this' in the topological order. + // + // Given: %2 = f(%1) + // %3 = g(%1) + // Execute: %3.moveBefore(%2) + // Result: %3 = g(%1) + // %2 = f(%1) + void moveBefore(Node* n) { + removeFromList(); + insertBefore(n); + } + + // Remove the input at 'i' from this node. + // + // WARNING: This is O(n) in the number of inputs, so avoid repeatedly calling + // removeInput. + // + // Given: %3 = f(%1, %2) + // Execute: %3.removeInput(1) + // Result: %3 = f(%1) + void removeInput(size_t i) { + dropInput(i); + // everything after this input shifts left, + // so we need to update their use offsets to match + for (size_t j = i + 1; j < inputs_.size(); j++) { + auto it = findUseForInput(j); + it->offset--; + } + inputs_.erase(inputs_.begin() + i); + } + + // Remove all inputs from a node. + // + // Given: %3 = f(%1, %2) + // Execute: %3.removeAllInputs() + // Result: %3 = f() + void removeAllInputs() { + for (size_t i = 0; i < inputs().size(); ++i) + dropInput(i); + inputs_.clear(); + } + + // Check whether this node is before node n in the graph. + bool isBefore(Node* n); + + // iterators of the node list starting at this node + // useful for resuming a search starting at this node + graph_node_list_iterator iterator(); + graph_node_list_iterator reverseIterator(); + const_graph_node_list_iterator iterator() const; + const_graph_node_list_iterator reverseIterator() const; + + // Remove 'this' from the instruction list and deallocate it. + // + // Invariant: no outputs of 'this' may have any uses. + // + // Given: %2 = f(%1) + // %3 = g(%1) + // Execute: %2.destroy() + // Result: %3 = g(%1) + void destroy(); + + // Dynamically cast this node to the subclass indicated by the + // template variable, returning nullptr if the cast is invalid.. + // + // Example usage: if(auto s = n.cast